prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from cvxopt import matrix,solvers
import pylab as pl
if __name__=="__main__":
data=pd.read_csv("nonlinsep.txt",sep=',',header=None)
dataarray=np.array(data)
classifier = np.array(dataarray)[:, 2]
classifier = np.resize(classifier, (100, 1))
X = np.array(dataarray)[:,0:2]
up_poly_array = np.zeros(shape=(100, 6))
for i in range(100):
up_poly_array[i][0] = 1
up_poly_array[i][1] = X[i][0] **2
up_poly_array[i][2] = X[i][1] ** 2
up_poly_array[i][3] = np.sqrt(2) * (X[i][0])
up_poly_array[i][4] = np.sqrt(2) * (X[i][1])
up_poly_array[i][5] = np.sqrt(2) * (X[i][0] * X[i][0])
P = matrix(np.dot(up_poly_array, up_poly_array.T) * np.dot(classifier, classifier.T))
q = matrix(np.ones(100) * -1)
G = matrix(np.diag( | np.ones(100) | numpy.ones |
"""
images3_photometry.py
Includes all the functions that perform photometry processes.
All the functions take as input either an HDUList object or a DataSet object, as defined in the basics.py file and
return the input object and a dictionary that contains the extracted light-curves. In all cases, the default values for
the input parameters are the values in the respective pipeline.variables object. Note that the parameters for the
supporting functions do not have default values, as their purpose is to be used only in this particular file.
Functions included:
photometry: ...
split_photometry: ...
Supporting functions included:
get_flux_integral: ...
get_flux_gauss: ...
"""
__all__ = ['photometry', 'plot_photometry', 'split_photometry']
import numpy as np
import scipy
import warnings
import pylightcurve as plc
from matplotlib import pyplot as plt
from iraclis.classes import *
def get_flux_integral(fits, lower_wavelength, upper_wavelength,
aperture_lower_extend, aperture_upper_extend, sigma, plot=False):
x_star = variables.x_star.custom_from_fits(fits).value
y_star = variables.y_star.custom_from_fits(fits).value
spectrum_direction = variables.spectrum_direction.custom_from_fits(fits).value
scan_length = variables.scan_length.custom_from_fits(fits).value
wdpt_constant_coefficient_1 = variables.wdpt_constant_coefficient_1.custom_from_fits(fits).value
wdpt_constant_coefficient_2 = variables.wdpt_constant_coefficient_2.custom_from_fits(fits).value
wdpt_constant_coefficient_3 = variables.wdpt_constant_coefficient_3.custom_from_fits(fits).value
wdpt_slope_coefficient_1 = variables.wdpt_slope_coefficient_1.custom_from_fits(fits).value
wdpt_slope_coefficient_2 = variables.wdpt_slope_coefficient_2.custom_from_fits(fits).value
wdpt_slope_coefficient_3 = variables.wdpt_slope_coefficient_3.custom_from_fits(fits).value
trace_at0 = calibrations.trace_at0.match(fits)
trace_at1 = calibrations.trace_at1.match(fits)
trace_at2 = calibrations.trace_at2.match(fits)
trace_at3 = calibrations.trace_at3.match(fits)
trace_at4 = calibrations.trace_at4.match(fits)
trace_at5 = calibrations.trace_at5.match(fits)
trace_bt0 = calibrations.trace_bt0.match(fits)
trace_bt1 = calibrations.trace_bt1.match(fits)
trace_bt2 = calibrations.trace_bt2.match(fits)
def get_trace(dy):
xx0 = x_star
yy0 = y_star + dy
sub = 507 - len(fits[1].data) / 2
bt = trace_bt0 + trace_bt1 * xx0 + trace_bt2 * yy0
at = (trace_at0 + trace_at1 * xx0 + trace_at2 * yy0 + trace_at3 * xx0 * xx0 +
trace_at4 * xx0 * yy0 + trace_at5 * yy0 * yy0)
return at, bt + yy0 - at * xx0 - sub + at * sub
if spectrum_direction > 0:
y0 = aperture_lower_extend
y1 = scan_length + aperture_upper_extend
else:
y0 = - scan_length - aperture_upper_extend
y1 = - aperture_lower_extend
va1 = (wdpt_slope_coefficient_1 / (wdpt_slope_coefficient_2 + lower_wavelength) + wdpt_slope_coefficient_3)
vb1 = (wdpt_constant_coefficient_1 / (wdpt_constant_coefficient_2 + lower_wavelength) + wdpt_constant_coefficient_3)
va2 = (wdpt_slope_coefficient_1 / (wdpt_slope_coefficient_2 + upper_wavelength) + wdpt_slope_coefficient_3)
vb2 = (wdpt_constant_coefficient_1 / (wdpt_constant_coefficient_2 + upper_wavelength) + wdpt_constant_coefficient_3)
ha1, hb1 = get_trace(y0)
ha2, hb2 = get_trace(y1)
ha2 += sigma
ha2 -= sigma
if plot:
xxx = np.arange((hb1 - vb1) / (va1 - ha1), (hb1 - vb2) / (va2 - ha1), 0.0001)
plt.plot(xxx, ha1 * xxx + hb1, 'w-')
xxx = np.arange((hb2 - vb1) / (va1 - ha2), (hb2 - vb2) / (va2 - ha2), 0.0001)
plt.plot(xxx, ha2 * xxx + hb2, 'w-')
xxx = np.arange((hb2 - vb1) / (va1 - ha2), (hb1 - vb1) / (va1 - ha1), 0.0001)
plt.plot(xxx, va1 * xxx + vb1, 'w-')
xxx = np.arange((hb2 - vb2) / (va2 - ha2), (hb1 - vb2) / (va2 - ha1), 0.0001)
plt.plot(xxx, va2 * xxx + vb2, 'w-')
fcr = np.full_like(fits[1].data, fits[1].data)
fhm = np.roll(fcr, 1, axis=1)
fhp = np.roll(fcr, -1, axis=1)
fvm = np.roll(fcr, -1, axis=0)
fvp = np.roll(fcr, 1, axis=0)
x0, y0 = np.meshgrid(np.arange(len(fcr)), np.arange(len(fcr)))
summ1 = (2.0 * fcr - fhm - fhp)
summ2 = (4.0 * fcr - 4.0 * fhm)
summ3 = (8.0 * fcr + 4.0 * fhm - 2.0 * fhp + 4.0 * fvm - 2.0 * fvp)
summ4 = (4.0 * fcr - 4.0 * fvm)
summ5 = (2.0 * fcr - fvm - fvp)
summ6 = (4.0 * fcr - 4.0 * fhp)
summ7 = (10.0 * fcr - 2.0 * fhm + 4.0 * fhp - fvm + fvp)
summ8 = (20.0 * fcr - 4.0 * fhm + 8.0 * fhp)
summ9 = (8.0 * fcr - 2.0 * fhm + 4.0 * fhp - 2.0 * fvm + 4.0 * fvp)
summ10 = (4.0 * fcr - 4.0 * fvp)
summ11 = (2.0 * fcr - fvm - fvp)
# left edge
a, b = va1, vb1
x1 = (-b + y0) / a
x2 = (1 - b + y0) / a
formula = a * x0 + b - y0
formula_2 = formula * formula
formula_3 = formula_2 * formula
formula_4 = formula_3 * formula
case1 = (
+ fcr - (1.0 / (24.0 * (a ** 3))) * (
+ summ1 * formula_4
+ a * summ2 * formula_3
+ (a ** 2) * (- summ3 * formula_2 - summ4 * formula_3 + summ5 * formula_4)
)
)
formula = a + a * x0 + b - y0
formula_2 = formula * formula
formula_3 = formula_2 * formula
case2 = (
- (1.0 / (24.0 * (a ** 3))) * (
+ 4.0 * summ1 * (- 0.25 + formula - 1.5 * formula_2 + formula_3)
+ (a * 3.0) * summ6 * (-1.0 / 3 + formula - formula_2)
+ (a ** 2) * (summ7 - summ8 * formula)
)
)
formula = - 1.0 + a + a * x0 + b - y0
formula_2 = formula * formula
formula_3 = formula_2 * formula
formula_4 = formula_3 * formula
case3 = (
- (1.0 / (24.0 * (a ** 3))) * (
- summ1 * formula_4
+ a * summ6 * formula_3
+ (a ** 2) * (summ9 * formula_2 - summ10 * formula_3 - summ11 * formula_4)
)
)
new_data = np.full_like(fits[1].data, fits[1].data)
new_data = np.where((x1 > x0) & (x2 < x0), case1, new_data)
new_data = np.where((x1 > x0) & (x0 + 1 > x1) & (x2 > x0) & (x0 + 1 > x2), case2, new_data)
new_data = np.where((x1 > x0 + 1) & (x2 < x0 + 1), case3, new_data)
new_data = np.where((x1 > x0 + 1) & (x2 > x0 + 1), 0, new_data)
# right edge
a, b = va2, vb2
x1 = (-b + y0) / a
x2 = (1 - b + y0) / a
formula = a * x0 + b - y0
formula_2 = formula * formula
formula_3 = formula_2 * formula
formula_4 = formula_3 * formula
case1 = (
+ (1.0 / (24.0 * (a ** 3))) * (
+ summ1 * formula_4
+ a * summ2 * formula_3
+ (a ** 2) * (- summ3 * formula_2 - summ4 * formula_3 + summ5 * formula_4)
)
)
formula = a + a * x0 + b - y0
formula_2 = formula * formula
formula_3 = formula_2 * formula
case2 = (
fcr + (1.0 / (24.0 * (a ** 3))) * (
+ 4.0 * summ1 * (- 0.25 + formula - 1.5 * formula_2 + formula_3)
+ (a * 3.0) * summ6 * (-1.0 / 3 + formula - formula_2)
+ (a ** 2) * (summ7 - summ8 * formula)
)
)
formula = - 1.0 + a + a * x0 + b - y0
formula_2 = formula * formula
formula_3 = formula_2 * formula
formula_4 = formula_3 * formula
case3 = (
fcr + (1.0 / (24.0 * (a ** 3))) * (
- summ1 * formula_4
+ a * summ6 * formula_3
+ (a ** 2) * (summ9 * formula_2 - summ10 * formula_3 - summ11 * formula_4)
)
)
new_data = np.where((x1 < x0) & (x2 < x0), 0, new_data)
new_data = np.where((x1 > x0) & (x2 < x0), case1, new_data)
new_data = np.where((x1 > x0) & (x0 + 1 > x1) & (x2 > x0) & (x0 + 1 > x2), case2, new_data)
new_data = np.where((x1 > x0 + 1) & (x2 < x0 + 1), case3, new_data)
# upper edge
new_data = np.rot90(new_data)
fcr = np.ones_like(new_data) * new_data
fhm = np.roll(fcr, 1, axis=1)
fhp = np.roll(fcr, -1, axis=1)
fvm = np.roll(fcr, -1, axis=0)
fvp = np.roll(fcr, 1, axis=0)
x0, y0 = np.meshgrid(np.arange(len(fcr)), np.arange(len(fcr)))
summ1 = (2.0 * fcr - fhm - fhp)
summ2 = (4.0 * fcr - 4.0 * fhm)
summ3 = (8.0 * fcr + 4.0 * fhm - 2.0 * fhp + 4.0 * fvm - 2.0 * fvp)
summ4 = (4.0 * fcr - 4.0 * fvm)
summ5 = (2.0 * fcr - fvm - fvp)
summ6 = (4.0 * fcr - 4.0 * fhp)
summ7 = (10.0 * fcr - 2.0 * fhm + 4.0 * fhp - fvm + fvp)
summ8 = (20.0 * fcr - 4.0 * fhm + 8.0 * fhp)
summ9 = (8.0 * fcr - 2.0 * fhm + 4.0 * fhp - 2.0 * fvm + 4.0 * fvp)
summ10 = (4.0 * fcr - 4.0 * fvp)
summ11 = (2.0 * fcr - fvm - fvp)
a, b = ha2, hb2
a, b = - 1.0 / a, len(fcr) + b / a
x1 = (-b + y0) / a
x2 = (1 - b + y0) / a
formula = a * x0 + b - y0
formula_2 = formula * formula
formula_3 = formula_2 * formula
formula_4 = formula_3 * formula
case1 = (
+ (1.0 / (24.0 * (a ** 3))) * (
+ summ1 * formula_4
+ a * summ2 * formula_3
+ (a ** 2) * (- summ3 * formula_2 - summ4 * formula_3 + summ5 * formula_4)
)
)
formula = a + a * x0 + b - y0
formula_2 = formula * formula
formula_3 = formula_2 * formula
case2 = (
fcr + (1.0 / (24.0 * (a ** 3))) * (
+ 4.0 * summ1 * (- 0.25 + formula - 1.5 * formula_2 + formula_3)
+ (a * 3.0) * summ6 * (-1.0 / 3 + formula - formula_2)
+ (a ** 2) * (summ7 - summ8 * formula)
)
)
formula = - 1.0 + a + a * x0 + b - y0
formula_2 = formula * formula
formula_3 = formula_2 * formula
formula_4 = formula_3 * formula
case3 = (
fcr + (1.0 / (24.0 * (a ** 3))) * (
- summ1 * formula_4
+ a * summ6 * formula_3
+ (a ** 2) * (summ9 * formula_2 - summ10 * formula_3 - summ11 * formula_4)
)
)
new_data = np.where((x1 < x0) & (x2 < x0), 0, new_data)
new_data = np.where((x1 > x0) & (x2 < x0), case1, new_data)
new_data = np.where((x1 > x0) & (x0 + 1 > x1) & (x2 > x0) & (x0 + 1 > x2), case2, new_data)
new_data = np.where((x1 > x0 + 1) & (x2 < x0 + 1), case3, new_data)
# lower edge
a, b = ha1, hb1
a, b = - 1.0 / a, len(fcr) + b / a
x1 = (-b + y0) / a
x2 = (1 - b + y0) / a
formula = a * x0 + b - y0
formula_2 = formula * formula
formula_3 = formula_2 * formula
formula_4 = formula_3 * formula
case1 = (
+ fcr - (1.0 / (24.0 * (a ** 3))) * (
+ summ1 * formula_4
+ a * summ2 * formula_3
+ (a ** 2) * (- summ3 * formula_2 - summ4 * formula_3 + summ5 * formula_4)
)
)
formula = a + a * x0 + b - y0
formula_2 = formula * formula
formula_3 = formula_2 * formula
case2 = (
- (1.0 / (24.0 * (a ** 3))) * (
+ 4.0 * summ1 * (- 0.25 + formula - 1.5 * formula_2 + formula_3)
+ (a * 3.0) * summ6 * (-1.0 / 3 + formula - formula_2)
+ (a ** 2) * (summ7 - summ8 * formula)
)
)
formula = - 1.0 + a + a * x0 + b - y0
formula_2 = formula * formula
formula_3 = formula_2 * formula
formula_4 = formula_3 * formula
case3 = (
- (1.0 / (24.0 * (a ** 3))) * (
- summ1 * formula_4
+ a * summ6 * formula_3
+ (a ** 2) * (summ9 * formula_2 - summ10 * formula_3 - summ11 * formula_4)
)
)
new_data = np.where((x1 > x0) & (x2 < x0), case1, new_data)
new_data = np.where((x1 > x0) & (x0 + 1 > x1) & (x2 > x0) & (x0 + 1 > x2), case2, new_data)
new_data = np.where((x1 > x0 + 1) & (x2 < x0 + 1), case3, new_data)
new_data = np.where((x1 > x0 + 1) & (x2 > x0 + 1), 0, new_data)
new_data = np.rot90(new_data, 3)
# error array
xx = np.where(fits[1].data == 0, 1, fits[1].data)
error = np.sqrt(new_data / xx) * fits[2].data
flux = np.sum(new_data)
error = np.sqrt(np.nansum(error * error))
return flux, error
def get_flux_gauss(fits, lower_wavelength, upper_wavelength,
aperture_lower_extend, aperture_upper_extend, sigma, plot=False):
spectrum_direction = variables.spectrum_direction.custom_from_fits(fits).value
scan_length = variables.scan_length.custom_from_fits(fits).value
scan_frame = variables.scan_frame.custom_from_fits(fits).value
wavelength_frame = variables.wavelength_frame.custom_from_fits(fits).value
if spectrum_direction > 0:
y1 = min(aperture_lower_extend, aperture_upper_extend)
y2 = scan_length + max(aperture_lower_extend, aperture_upper_extend)
else:
y1 = - scan_length - max(aperture_lower_extend, aperture_upper_extend)
y2 = - min(aperture_lower_extend, aperture_upper_extend)
science_frame = np.array(fits[plc.fits_sci(fits)[0]].data)
error_frame = np.array(fits[plc.fits_err(fits)[0]].data)
ph_error_frame = np.sqrt(np.abs(science_frame))
scan_weight = (scipy.special.erf((scan_frame - y1) / ((sigma / 45.) * np.sqrt(2.0))) -
scipy.special.erf((scan_frame - y2) / ((sigma / 45.) * np.sqrt(2.0)))) / 2
wavelength_weight = (scipy.special.erf((wavelength_frame - lower_wavelength) / (sigma * np.sqrt(2.0))) -
scipy.special.erf((wavelength_frame - upper_wavelength) / (sigma * np.sqrt(2.0)))) / 2
weighted_science_frame = science_frame * scan_weight * wavelength_weight
weighted_error_frame = error_frame * scan_weight * wavelength_weight
weighted_ph_error_frame = ph_error_frame * scan_weight * wavelength_weight
flux = np.sum(weighted_science_frame)
error = np.sqrt(np.nansum(weighted_error_frame * weighted_error_frame))
ph_error = np.sqrt(np.nansum(weighted_ph_error_frame * weighted_ph_error_frame))
if plot:
get_flux_integral(fits, lower_wavelength, upper_wavelength,
aperture_lower_extend, aperture_upper_extend, sigma, plot=True)
return flux, error, ph_error
def photometry(input_data, white_lower_wavelength=None, white_upper_wavelength=None, bins_file=None,
aperture_lower_extend=None, aperture_upper_extend=None, extraction_method=None,
extraction_gauss_sigma=None, plot=False):
# load pipeline and calibration variables to be used
white_lower_wavelength = variables.white_lower_wavelength.custom(white_lower_wavelength)
white_upper_wavelength = variables.white_upper_wavelength.custom(white_upper_wavelength)
bins_file = variables.bins_file.custom(bins_file)
aperture_lower_extend = variables.aperture_lower_extend.custom(aperture_lower_extend)
aperture_upper_extend = variables.aperture_upper_extend.custom(aperture_upper_extend)
extraction_method = variables.extraction_method.custom(extraction_method)
extraction_gauss_sigma = variables.extraction_gauss_sigma.custom(extraction_gauss_sigma)
ra_target = variables.ra_target.custom()
dec_target = variables.dec_target.custom()
subarray_size = variables.sub_array_size.custom()
grism = variables.grism.custom()
exposure_time = variables.exposure_time.custom()
bins_number = variables.bins_number.custom()
bjd_tdb = variables.bjd_tdb.custom()
spectrum_direction = variables.spectrum_direction.custom()
sky_background_level = variables.sky_background_level.custom()
y_star = variables.y_star.custom()
y_shift_error = variables.y_shift_error.custom()
x_star = variables.x_star.custom()
x_shift_error = variables.x_shift_error.custom()
scan_length = variables.scan_length.custom()
scan_length_error = variables.scan_length_error.custom()
bjd_tdb_array = variables.bjd_tdb_array.custom()
spectrum_direction_array = variables.spectrum_direction_array.custom()
sky_background_level_array = variables.sky_background_level_array.custom()
x_star_array = variables.x_star_array.custom()
x_shift_error_array = variables.x_shift_error_array.custom()
y_star_array = variables.y_star_array.custom()
y_shift_error_array = variables.y_shift_error_array.custom()
scan_length_array = variables.scan_length_array.custom()
scan_length_error_array = variables.scan_length_error_array.custom()
white_ldc1 = variables.white_ldc1.custom()
white_ldc2 = variables.white_ldc2.custom()
white_ldc3 = variables.white_ldc3.custom()
white_ldc4 = variables.white_ldc4.custom()
lower_wavelength = variables.lower_wavelength.custom()
upper_wavelength = variables.upper_wavelength.custom()
flux_array = variables.flux_array.custom()
error_array = variables.error_array.custom()
ph_error_array = variables.ph_error_array.custom()
# set bins
white_dictionary, bins_dictionaries = \
variables.set_binning(input_data, white_lower_wavelength.value, white_upper_wavelength.value,
white_ldc1.value, white_ldc2.value, white_ldc3.value, white_ldc4.value,
bins_file.value)
# select extraction method
used_extraction_method = {'integral': get_flux_integral, 'gauss': get_flux_gauss}[extraction_method.value]
# initiate counter
counter = PipelineCounter('Photometry', len(input_data.spectroscopic_images))
# iterate over the list of HDUList objects included in the input data
light_curve = {}
for fits in input_data.spectroscopic_images:
try:
ra_target.from_dictionary(light_curve)
except KeyError:
ra_target.from_fits(fits)
ra_target.to_dictionary(light_curve)
dec_target.from_fits(fits)
dec_target.to_dictionary(light_curve)
subarray_size.set(len(fits[1].data))
subarray_size.to_dictionary(light_curve)
grism.from_fits(fits)
grism.to_dictionary(light_curve)
exposure_time.from_fits(fits)
exposure_time.to_dictionary(light_curve)
aperture_lower_extend.to_dictionary(light_curve)
aperture_upper_extend.to_dictionary(light_curve)
extraction_method.to_dictionary(light_curve)
extraction_gauss_sigma.to_dictionary(light_curve)
bjd_tdb.from_fits(fits)
bjd_tdb_array.set(
np.append(bjd_tdb_array.value, bjd_tdb.value))
spectrum_direction.from_fits(fits)
spectrum_direction_array.set(np.append(spectrum_direction_array.value, spectrum_direction.value))
sky_background_level.from_fits(fits, position=plc.fits_sci(fits)[0])
sky_background_level_array.set(np.append(sky_background_level_array.value, sky_background_level.value))
y_star.from_fits(fits)
y_star_array.set(np.append(y_star_array.value, y_star.value))
y_shift_error.from_fits(fits)
y_shift_error_array.set(np.append(y_shift_error_array.value, y_shift_error.value))
x_star.from_fits(fits)
x_star_array.set(np.append(x_star_array.value, x_star.value))
x_shift_error.from_fits(fits)
x_shift_error_array.set(np.append(x_shift_error_array.value, x_shift_error.value))
scan_length.from_fits(fits)
scan_length_array.set(np.append(scan_length_array.value, scan_length.value))
scan_length_error.from_fits(fits)
scan_length_error_array.set(np.append(scan_length_error_array.value, scan_length_error.value))
bins_number.set(len(bins_dictionaries))
bins_number.to_dictionary(light_curve)
for i in [white_dictionary] + bins_dictionaries:
lower_wavelength.from_dictionary(i)
upper_wavelength.from_dictionary(i)
flux, error, ph_error = used_extraction_method(fits, lower_wavelength.value, upper_wavelength.value,
aperture_lower_extend.value, aperture_upper_extend.value,
extraction_gauss_sigma.value)
flux_array.from_dictionary(i)
flux_array.to_dictionary(i, value=np.append(flux_array.value, flux))
error_array.from_dictionary(i)
error_array.to_dictionary(i, value=np.append(error_array.value, error))
ph_error_array.from_dictionary(i)
ph_error_array.to_dictionary(i, value= | np.append(ph_error_array.value, ph_error) | numpy.append |
import os.path as osp
import glob
import copy as copy
import multiprocessing as mp
from tqdm import tqdm
import random
import torch
import pandas
import numpy as np
import matplotlib.pyplot as plt
from torch_geometric.utils import is_undirected
from torch_geometric.data import Data, Dataset
class TrackMLParticleTrackingDataset(Dataset):
r"""The `TrackML Particle Tracking Challenge
<https://www.kaggle.com/c/trackml-particle-identification>`_ dataset to
reconstruct particle tracks from 3D points left in the silicon detectors.
Args:
root (string): Root directory where the dataset should be saved.
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
n_events (int): Number of events in the raw folder to process
GRAPH CONSTRUCTION PARAMETERS
###########################################################################
volume_layer_ids (List): List of the volume and layer ids to be included
in the graph. Layers get indexed by increasing volume and layer id.
Refer to the following map for the layer indices, and compare them
to the chart at https://www.kaggle.com/c/trackml-particle-identification/data
41
34 --- 39 | 42 --- 47
40
27
18 --- 23 | 28 --- 33
24
10
0 --- 6 | 11 --- 17
7
layer_pairs (List): List of which pairs of layers can have edges between them.
Uses the layer indices described above to reference layers.
Example for Barrel Only:
[[7,8],[8,9],[9,10],[10,24],[24,25],[25,26],[26,27],[27,40],[40,41]]
pt_range ([min, max]): A truth cut applied to reduce the number of nodes in the graph.
Only nodes associated with particles in this momentum range are included.
eta_range ([min, max]): A cut applied to nodes to select a specific eta
phi_slope_max (float32): A cut applied to edges to limit the change in phi between
the two nodes.
z0_max (float32): A cut applied to edges that limits how far from the center of
the detector the particle edge can originate from.
n_phi_sections (int): Break the graph into multiple segments in the phi direction.
n_eta_sections (int): Break the graph into multiple segments in the eta direction.
augments (bool): Toggle for turning data augmentation on and off
intersect (bool): Toggle for interseting lines cut. When connecting Barrel
edges to the inner most endcap layer, sometimes the edge passes through
the layer above, this cut removes those edges.
hough (bool): Toggle for using a hough transform to construct an edge weight.
Each node in the graph casts votes into an accumulator for a linear
parameter space. The edge is then used to address this accumulator and
extract the vote count.
noise (bool): Toggle if you want noise hits in the graph
tracking (bool): Toggle for building truth tracks. Track data is a tensor with
dimensions (Nx5) with the following columns:
[r coord, phi coord, z coord, layer index, track number]
directed (bool): Edges are directed, for an undirected graph, edges are
duplicated and in reverse direction.
layer_pairs_plus (bool): Allows for edge connections within the same layer
MULTIPROCESSING PARAMETERS
###########################################################################
n_workers (int): Number of worker nodes for multiprocessing
n_tasks (int): Break the processing into a number of tasks
"""
url = 'https://www.kaggle.com/c/trackml-particle-identification'
def __init__(self, root, transform=None, n_events=0,
directed=False, layer_pairs_plus=False,
volume_layer_ids=[[8, 2], [8, 4], [8, 6], [8, 8]], #Layers Selected
layer_pairs=[[7, 8], [8, 9], [9, 10]], #Connected Layers
pt_range=[1.5, 2], eta_range=[-5, 5], #Node Cuts
phi_slope_max=0.0006, z0_max=150, #Edge Cuts
n_phi_sections=1, n_eta_sections=1, #N Sections
augments=False, intersect=False, #Toggle Switches
hough=False, tracking=False, #Toggle Switches
noise=False, duplicates=False, #Truth Toggle Switches
n_workers=mp.cpu_count(), n_tasks=1, #multiprocessing
mmap=False, #module map
data_type="TrackML" #Other Detectors
):
events = glob.glob(osp.join(osp.join(root, 'raw'), 'event*-truth.csv'))
events = [e.split(osp.sep)[-1].split('-')[0][5:] for e in events]
self.events = sorted(events)
if (n_events > 0):
self.events = self.events[:n_events]
self.data_type = data_type
self.mmap = mmap
self.directed = directed
self.layer_pairs_plus = layer_pairs_plus
self.volume_layer_ids = torch.tensor(volume_layer_ids)
self.layer_pairs = torch.tensor(layer_pairs)
self.pt_range = pt_range
self.eta_range = eta_range
self.phi_slope_max = phi_slope_max
self.z0_max = z0_max
self.n_phi_sections = n_phi_sections
self.n_eta_sections = n_eta_sections
self.augments = augments
self.intersect = intersect
self.hough = hough
self.noise = noise
self.duplicates = duplicates
self.tracking = tracking
self.n_workers = n_workers
self.n_tasks = n_tasks
self.accum0_m = [-30, 30, 2000] # cot(theta) [eta]
self.accum0_b = [-20, 20, 2000] # z0
self.accum1_m = [-.0003, .0003, 2000] # phi-slope [qA/pT]
self.accum1_b = [-3.3, 3.3, 2000] # phi0
# bin = 2000
# m = torch.cot(2*torch.atan(torch.e^(-eta_range)))
# self.accum0_m = [m[0], m[1], bin] # cot(theta) [eta]
# # self.accum0_b = [-z0_max, z0_max, bin] # z0
# self.accum0_b = [-20, 20, bin] # z0
# self.accum1_m = [-phi_slope_max, phi_slope_max, bin] # phi-slope [qA/pT]
# self.accum1_b = [-np.pi, np.pi, bin] # phi0
super(TrackMLParticleTrackingDataset, self).__init__(root, transform)
@property
def raw_file_names(self):
if not hasattr(self,'input_files'):
self.input_files = sorted(glob.glob(self.raw_dir+'/*.csv'))
return [f.split('/')[-1] for f in self.input_files]
@property
def processed_file_names(self):
N_sections = self.n_phi_sections*self.n_eta_sections
if not hasattr(self,'processed_files'):
proc_names = ['event{}_section{}.pt'.format(idx, i) for idx in self.events for i in range(N_sections)]
if(self.augments):
proc_names_aug = ['event{}_section{}_aug.pt'.format(idx, i) for idx in self.events for i in range(N_sections)]
proc_names = [x for y in zip(proc_names, proc_names_aug) for x in y]
self.processed_files = [osp.join(self.processed_dir,name) for name in proc_names]
return self.processed_files
@property
def average_node_count(self):
if not hasattr(self,'node_avg'):
N_nodes = np.asarray([self[idx].x.shape[0] for idx in range(len(self.events))])
self.node_avg = N_nodes.mean()
fig0, (ax0) = plt.subplots(1, 1, dpi=500, figsize=(6, 6))
ax0.hist(N_nodes)
ax0.set_xlabel('Nodes')
ax0.set_ylabel('Count')
# ax0.set_xlim(-1.1*np.abs(z_co).max(), 1.1*np.abs(z_co).max())
# ax0.set_ylim(-1.1*r_co.max(), 1.1*r_co.max())
fig0.savefig('Nodes_distribution.pdf', dpi=500)
return self.node_avg
@property
def maximum_node_count(self):
if not hasattr(self,'node_max'):
N_nodes = np.asarray([self[idx].x.shape[0] for idx in range(len(self.events))])
self.node_max = N_nodes.max()
return self.node_max
@property
def average_total_node_count(self):
if not hasattr(self,'total_node_avg'):
N_total_nodes = np.asarray([self[idx].tracks.shape[0] for idx in range(len(self.events))])
self.total_node_avg = N_total_nodes.mean()
return self.total_node_avg
@property
def average_total_pixel_node_count(self):
if not hasattr(self,'total_pixel_node_avg'):
N_total_nodes = np.asarray([self[idx].tracks[self[idx].tracks[:,3] < 18].shape[0] for idx in range(len(self.events))])
self.total_pixel_node_avg = N_total_nodes.mean()
return self.total_pixel_node_avg
@property
def average_edge_count(self):
if not hasattr(self,'edge_avg'):
N_edges = np.asarray([self[idx].y.shape[0] for idx in range(len(self.events))])
self.edge_avg = N_edges.mean()
fig0, (ax0) = plt.subplots(1, 1, dpi=500, figsize=(6, 6))
ax0.hist(N_edges)
ax0.set_xlabel('Edges')
ax0.set_ylabel('Count')
# ax0.set_xlim(-1.1*np.abs(z_co).max(), 1.1*np.abs(z_co).max())
# ax0.set_ylim(-1.1*r_co.max(), 1.1*r_co.max())
fig0.savefig('Edges_distribution.pdf', dpi=500)
return self.edge_avg
@property
def maximum_edge_count(self):
if not hasattr(self,'edge_max'):
N_edges = np.asarray([self[idx].y.shape[0] for idx in range(len(self.events))])
self.edge_max = N_edges.max()
return self.edge_max
@property
def average_true_edge_count(self):
if not hasattr(self,'true_edge_avg'):
N_true_edges = np.asarray([torch.sum(self[idx].y) for idx in range(len(self.events))])
self.true_edge_avg = N_true_edges.mean()
fig0, (ax0) = plt.subplots(1, 1, dpi=500, figsize=(6, 6))
ax0.hist(N_true_edges)
ax0.set_xlabel('True Edges')
ax0.set_ylabel('Count')
# ax0.set_xlim(-1.1*np.abs(z_co).max(), 1.1*np.abs(z_co).max())
# ax0.set_ylim(-1.1*r_co.max(), 1.1*r_co.max())
fig0.savefig('True_edges_distribution.pdf', dpi=500)
return self.true_edge_avg
@property
def maximum_true_edge_count(self):
if not hasattr(self,'true_edge_max'):
N_true_edges = np.asarray([torch.sum(self[idx].y) for idx in range(len(self.events))])
self.true_edge_max = N_true_edges.max()
return self.true_edge_max
@property
def average_total_true_edge_count(self):
if not hasattr(self,'total_true_edge_avg'):
true_edges = np.asarray([torch.sum(self[idx].track_attr[:,3])-self[idx].track_attr.shape[0] for idx in range(len(self.events))])
if not self.directed:
self.total_true_edge_avg = 2*true_edges.mean()
else:
self.total_true_edge_avg = true_edges.mean()
return self.total_true_edge_avg
@property
def average_total_pixel_true_edge_count(self):
if not hasattr(self,'total_pixel_true_edge_avg'):
true_edges = np.asarray([torch.sum(self[idx].track_attr_pix[:,3])-self[idx].track_attr_pix.shape[0] for idx in range(len(self.events))])
if not self.directed:
self.total_pixel_true_edge_avg = 2*true_edges.mean()
else:
self.total_pixel_true_edge_avg = true_edges.mean()
return self.total_pixel_true_edge_avg
@property
def average_pruned_pixel_true_edge_count(self):
if not hasattr(self,'pruned_pixel_true_edge_avg'):
true_edges = np.asarray([torch.sum(self[idx].track_attr_pruned[:,3])-self[idx].track_attr_pruned.shape[0] for idx in range(len(self.events))])
if not self.directed:
self.pruned_pixel_true_edge_avg = 2*true_edges.mean()
else:
self.pruned_pixel_true_edge_avg = true_edges.mean()
return self.pruned_pixel_true_edge_avg
@property
def average_total_track_count(self):
if not hasattr(self,'total_track_avg'):
N_tracks = np.asarray([self[idx].track_attr.shape[0] for idx in range(len(self.events))])
self.total_track_avg = N_tracks.mean()
return self.total_track_avg
@property
def average_pixel_track_count(self):
if not hasattr(self,'pixel_track_avg'):
N_tracks = np.asarray([self[idx].track_attr_pix.shape[0] for idx in range(len(self.events))])
self.pixel_track_avg = N_tracks.mean()
return self.pixel_track_avg
@property
def average_pixel_track_threshold_count(self):
if not hasattr(self,'pixel_track_threshold_avg'):
N_tracks = np.asarray([self[idx].track_attr_pruned[self[idx].track_attr_pruned[:,3] > 2].shape[0] for idx in range(len(self.events))])
self.pixel_track_threshold_avg = N_tracks.mean()
return self.pixel_track_threshold_avg
def download(self):
raise RuntimeError(
'Dataset not found. Please download it from {} and move all '
'*.csv files to {}'.format(self.url, self.raw_dir))
def len(self):
N_events = len(self.events)
N_augments = 2 if self.augments else 1
return N_events*self.n_phi_sections*self.n_eta_sections*N_augments
def __len__(self):
N_events = len(self.events)
N_augments = 2 if self.augments else 1
return N_events*self.n_phi_sections*self.n_eta_sections*N_augments
def read_hits(self, idx):
hits_filename = osp.join(self.raw_dir, f'event{idx}-hits.csv')
hits = pandas.read_csv(
hits_filename, usecols=['hit_id', 'x', 'y', 'z', 'volume_id', 'layer_id', 'module_id'],
dtype={
'hit_id': np.int64,
'x': np.float32,
'y': np.float32,
'z': np.float32,
'volume_id': np.int64,
'layer_id': np.int64,
'module_id': np.int64
})
return hits
def read_cells(self, idx):
cells_filename = osp.join(self.raw_dir, f'event{idx}-cells.csv')
cells = pandas.read_csv(
cells_filename, usecols=['hit_id', 'ch0', 'ch1', 'value'],
dtype={
'hit_id': np.int64,
'ch0': np.int64,
'ch1': np.int64,
'value': np.float32
})
return cells
def read_particles(self, idx):
particles_filename = osp.join(self.raw_dir, f'event{idx}-particles.csv')
if self.data_type == "TrackML":
particles = pandas.read_csv(
particles_filename, usecols=['particle_id', 'vx', 'vy', 'vz', 'px', 'py', 'pz', 'q', 'nhits'],
dtype={
'particle_id': np.int64,
'vx': np.float32,
'vy': np.float32,
'vz': np.float32,
'px': np.float32,
'py': np.float32,
'pz': np.float32,
'q': np.int64,
'nhits': np.int64
})
elif self.data_type == "ATLAS":
particles = pandas.read_csv(
particles_filename, usecols=['particle_id', 'px', 'py', 'pz', 'pt', 'eta', 'vx', 'vy', 'vz', 'radius', 'status', 'charge', 'pdgId', 'pass'],
dtype={
'particle_id': np.int64,
'px': np.float32,
'py': np.float32,
'pz': np.float32,
'pt': np.float32,
'eta': np.float32,
'vx': np.float32,
'vy': np.float32,
'vz': np.float32,
'radius': np.float32,
'status': np.int64,
'charge': np.float32,
'pdgId': np.int64,
'pass': str
})
return particles
def read_truth(self, idx):
truth_filename = osp.join(self.raw_dir, f'event{idx}-truth.csv')
if self.data_type == "TrackML":
truth = pandas.read_csv(
truth_filename, usecols=['hit_id', 'particle_id', 'tx', 'ty', 'tz', 'tpx', 'tpy', 'tpz', 'weight'],
dtype={
'hit_id': np.int64,
'particle_id': np.int64,
'tx': np.float32,
'ty': np.float32,
'tz': np.float32,
'tpx': np.float32,
'tpy': np.float32,
'tpz': np.float32,
'weight': np.float32
})
elif self.data_type == "ATLAS":
truth = pandas.read_csv(
# truth_filename, usecols=['hit_id', 'x', 'y', 'z', 'cluster_index_1', 'cluster_index_2', 'particle_id', 'hardware', 'cluster_x', 'cluster_y', 'cluster_z', 'barrel_endcap', 'layer_disk', 'eta_module', 'phi_module', 'eta_angle', 'phi_angle', 'norm_x', 'norm_y', 'norm_z'],
truth_filename, usecols=['hit_id', 'x', 'y', 'z', 'cluster_index_1', 'cluster_index_2', 'particle_id', 'hardware', 'barrel_endcap', 'layer_disk', 'eta_module', 'phi_module'],
dtype={
'hit_id': np.int64,
'x': np.float32,
'y': np.float32,
'z': np.float32,
'cluster_index_1': np.int64,
'cluster_index_2': np.int64,
'particle_id': np.int64,
'hardware': str,
# 'cluster_x': np.float32,
# 'cluster_y': np.float32,
# 'cluster_z': np.float32,
'barrel_endcap': np.int64,
'layer_disk': np.int64,
'eta_module': np.int64,
'phi_module': np.int64
# 'eta_angle': np.float32,
# 'phi_angle': np.float32,
# 'norm_x': np.float32,
# 'norm_y': np.float32,
# 'norm_z': np.float32
})
return truth
def build_module_map(self, hits, particles, truth):
return 1
def select_hits(self, hits, particles, truth):
# print('Selecting Hits')
valid_layer = 20 * self.volume_layer_ids[:,0] + self.volume_layer_ids[:,1]
n_det_layers = len(valid_layer)
layer = torch.from_numpy(20 * hits['volume_id'].values + hits['layer_id'].values)
index = layer.unique(return_inverse=True)[1]
hits = hits[['hit_id', 'x', 'y', 'z', 'module_id']].assign(layer=layer, index=index)
valid_groups = hits.groupby(['layer'])
hits = pandas.concat([valid_groups.get_group(valid_layer.numpy()[i]) for i in range(n_det_layers)])
pt = np.sqrt(particles['px'].values**2 + particles['py'].values**2)
particles = particles[np.bitwise_and(pt > self.pt_range[0], pt < self.pt_range[1])]
# Manually creates the noise particle
if self.noise:
particles.loc[len(particles)] = [0,0,0,0,0,0,0,0,0]
hits = (hits[['hit_id', 'x', 'y', 'z', 'module_id', 'index']].merge(truth[['hit_id', 'particle_id']], on='hit_id'))
hits = (hits.merge(particles[['particle_id']], on='particle_id'))
r = np.sqrt(hits['x'].values**2 + hits['y'].values**2)
phi = np.arctan2(hits['y'].values, hits['x'].values)
theta = np.arctan2(r,hits['z'].values)
eta = -1*np.log(np.tan(theta/2))
hits = hits[['z', 'index', 'particle_id', 'module_id']].assign(r=r, phi=phi, eta=eta)
# Splits out the noise hits from the true hits
if self.noise:
noise = hits.groupby(['particle_id']).get_group(0)
hits = hits.drop(hits.groupby(['particle_id']).get_group(0).index)
# Remove duplicate true hits within same layer
if not self.duplicates:
# hits = hits.loc[hits.groupby(['particle_id', 'index'], as_index=False).r.idxmin()]
hits = hits.loc[hits.groupby(['particle_id', 'index'], as_index=False).r.idxmin().r.values.tolist()]
# Append the noise hits back to the list
if self.noise:
hits = pandas.concat([noise, hits])
r = torch.from_numpy(hits['r'].values)
phi = torch.from_numpy(hits['phi'].values)
z = torch.from_numpy(hits['z'].values)
eta = torch.from_numpy(hits['eta'].values)
layer = torch.from_numpy(hits['index'].values)
particle = torch.from_numpy(hits['particle_id'].values)
module = torch.from_numpy(hits['module_id'].values)
pos = torch.stack([r, phi, z], 1)
return pos, layer, particle, eta
def select_hits_atlas(self, particles, truth):
# print('Selecting Hits')
valid_layer = 20 * self.volume_layer_ids[:,0] + self.volume_layer_ids[:,1]
n_det_layers = len(valid_layer)
truth.loc[truth['hardware'] == 'STRIP','barrel_endcap'] = truth.loc[truth['hardware'] == 'STRIP','barrel_endcap'] + 100
layer = torch.from_numpy(20 * truth['barrel_endcap'].values + truth['layer_disk'].values)
index = layer.unique(return_inverse=True)[1]
truth = truth[['hit_id', 'x', 'y', 'z', 'particle_id']].assign(layer=layer, index=index)
valid_groups = truth.groupby(['layer'])
truth = pandas.concat([valid_groups.get_group(valid_layer.numpy()[i]) for i in range(n_det_layers)])
pt = particles['pt'].values/1000
particles = particles[np.bitwise_and(pt > self.pt_range[0], pt < self.pt_range[1])]
# Manually creates the noise particle
if self.noise:
particles.loc[len(particles)] = [0,0,0,0,0,0,0,0,0]
hits = (truth.merge(particles[['particle_id']], on='particle_id'))
r = np.sqrt(hits['x'].values**2 + hits['y'].values**2)
phi = np.arctan2(hits['y'].values, hits['x'].values)
theta = np.arctan2(r,hits['z'].values)
eta = -1*np.log(np.tan(theta/2))
hits = hits[['z', 'index', 'particle_id']].assign(r=r, phi=phi, eta=eta)
# Splits out the noise hits from the true hits
if self.noise:
noise = hits.groupby(['particle_id']).get_group(0)
hits = hits.drop(hits.groupby(['particle_id']).get_group(0).index)
# Remove duplicate true hits within same layer
if not self.duplicates:
hits = hits.loc[hits.groupby(['particle_id', 'index'], as_index=False).r.idxmin()]
# Append the noise hits back to the list
if self.noise:
hits = pandas.concat([noise, hits])
r = torch.from_numpy(hits['r'].values)
phi = torch.from_numpy(hits['phi'].values)
z = torch.from_numpy(hits['z'].values)
eta = torch.from_numpy(hits['eta'].values)
layer = torch.from_numpy(hits['index'].values)
particle = torch.from_numpy(hits['particle_id'].values)
pos = torch.stack([r, phi, z], 1)
return pos, layer, particle, eta
def compute_edge_index(self, pos, layer):
# print("Constructing Edge Index")
edge_indices = torch.empty(2,0, dtype=torch.long)
layer_pairs = self.layer_pairs
if self.layer_pairs_plus:
layers = layer.unique()
layer_pairs_plus = torch.tensor([[layers[i],layers[i]] for i in range(layers.shape[0])])
layer_pairs = torch.cat((layer_pairs, layer_pairs_plus), 0)
for (layer1, layer2) in layer_pairs:
mask1 = layer == layer1
mask2 = layer == layer2
nnz1 = mask1.nonzero().flatten()
nnz2 = mask2.nonzero().flatten()
dr = pos[:, 0][mask2].view(1, -1) - pos[:, 0][mask1].view(-1, 1)
dphi = pos[:, 1][mask2].view(1, -1) - pos[:, 1][mask1].view(-1, 1)
dz = pos[:, 2][mask2].view(1, -1) - pos[:, 2][mask1].view(-1, 1)
dphi[dphi > np.pi] -= 2 * np.pi
dphi[dphi < -np.pi] += 2 * np.pi
# Calculate phi_slope and z0 which will be cut on
phi_slope = dphi / dr
z0 = pos[:, 2][mask1].view(-1, 1) - pos[:, 0][mask1].view(-1, 1) * dz / dr
# Check for intersecting edges between barrel and endcap connections
intersected_layer = dr.abs() < -1
if (self.intersect and self.data_type == "TrackML"):
if((layer1 == 7 and (layer2 == 6 or layer2 == 11)) or
(layer2 == 7 and (layer1 == 6 or layer1 == 11))):
z_int = 71.56298065185547 * dz / dr + z0
intersected_layer = z_int.abs() < 490.975
elif((layer1 == 8 and (layer2 == 6 or layer2 == 11)) or
(layer2 == 8 and (layer1 == 6 or layer1 == 11))):
z_int = 115.37811279296875 * dz / dr + z0
intersected_layer = z_int.abs() < 490.975
elif (self.intersect and self.data_type == "ATLAS"):
if((layer1 == 21 and (layer2 == 15 or layer2 == 25)) or
(layer2 == 21 and (layer1 == 15 or layer1 == 25))):
z_int = 562 * dz / dr + z0
intersected_layer = z_int.abs() < 1400
elif((layer1 == 22 and (layer2 == 15 or layer2 == 25)) or
(layer2 == 22 and (layer1 == 15 or layer1 == 25))):
z_int = 762 * dz / dr + z0
intersected_layer = z_int.abs() < 1400
elif((layer1 == 4 and layer2 == 15) or (layer1 == 14 and layer2 == 25) or
(layer2 == 4 and layer1 == 15) or (layer2 == 14 and layer1 == 25)):
z_int = 405 * dz / dr + z0
intersected_layer = z_int.abs() < 1400
elif((layer1 == 4 and layer2 == 20) or (layer1 == 14 and layer2 == 30) or
(layer2 == 4 and layer1 == 20) or (layer2 == 14 and layer1 == 30)):
r0 = pos[:, 0][mask1].view(-1, 1) - pos[:, 2][mask1].view(-1, 1) * dr / dz
r_int = 2602 * dr / dz + r0
intersected_layer = r_int.abs() > 384.5
# intersected_layer = r_int > 384.5 & r_int < 967.8
elif((layer1 == 4 and layer2 == 19) or (layer1 == 14 and layer2 == 29) or
(layer2 == 4 and layer1 == 19) or (layer2 == 14 and layer1 == 29)):
r0 = pos[:, 0][mask1].view(-1, 1) - pos[:, 2][mask1].view(-1, 1) * dr / dz
r_int = 2252 * dr / dz + r0
intersected_layer = r_int.abs() > 384.5
# intersected_layer = r_int > 384.5 & r_int < 967.8
elif((layer1 == 4 and layer2 == 18) or (layer1 == 14 and layer2 == 28) or
(layer2 == 4 and layer1 == 18) or (layer2 == 14 and layer1 == 28)):
r0 = pos[:, 0][mask1].view(-1, 1) - pos[:, 2][mask1].view(-1, 1) * dr / dz
r_int = 1952 * dr / dz + r0
intersected_layer = r_int.abs() > 384.5
# intersected_layer = r_int > 384.5 & r_int < 967.8
elif((layer1 == 4 and layer2 == 17) or (layer1 == 14 and layer2 == 27) or
(layer2 == 4 and layer1 == 17) or (layer2 == 14 and layer1 == 27)):
r0 = pos[:, 0][mask1].view(-1, 1) - pos[:, 2][mask1].view(-1, 1) * dr / dz
r_int = 1702 * dr / dz + r0
intersected_layer = r_int.abs() > 384.5
# intersected_layer = r_int > 384.5 & r_int < 967.8
elif((layer1 == 4 and layer2 == 16) or (layer1 == 14 and layer2 == 26) or
(layer2 == 4 and layer1 == 16) or (layer2 == 14 and layer1 == 26)):
r0 = pos[:, 0][mask1].view(-1, 1) - pos[:, 2][mask1].view(-1, 1) * dr / dz
r_int = 1512 * dr / dz + r0
intersected_layer = r_int.abs() > 384.5
# intersected_layer = r_int > 384.5 & r_int < 967.8
adj = (phi_slope.abs() < self.phi_slope_max) & (z0.abs() < self.z0_max) & (intersected_layer == False)
row, col = adj.nonzero().t()
row = nnz1[row]
col = nnz2[col]
edge_index = torch.stack([row, col], dim=0)
edge_indices = torch.cat((edge_indices, edge_index), 1)
return edge_indices
def compute_y_index(self, edge_indices, particle):
# print("Constructing y Index")
pid1 = [ particle[i].item() for i in edge_indices[0] ]
pid2 = [ particle[i].item() for i in edge_indices[1] ]
# print(pid1)
# print(pid2)
y = np.zeros(edge_indices.shape[1], dtype=np.int64)
for i in range(edge_indices.shape[1]):
if pid1[i] == pid2[i] and pid1[i] != 0:
y[i] = 1
return torch.from_numpy(y)
def split_detector_sections(self, pos, layer, particle, eta, phi_edges, eta_edges):
pos_sect, layer_sect, particle_sect = [], [], []
for i in range(len(phi_edges) - 1):
phi_mask1 = pos[:,1] > phi_edges[i]
phi_mask2 = pos[:,1] < phi_edges[i+1]
phi_mask = phi_mask1 & phi_mask2
phi_pos = pos[phi_mask]
phi_layer = layer[phi_mask]
phi_particle = particle[phi_mask]
phi_eta = eta[phi_mask]
for j in range(len(eta_edges) - 1):
eta_mask1 = phi_eta > eta_edges[j]
eta_mask2 = phi_eta < eta_edges[j+1]
eta_mask = eta_mask1 & eta_mask2
phi_eta_pos = phi_pos[eta_mask]
phi_eta_layer = phi_layer[eta_mask]
phi_eta_particle = phi_particle[eta_mask]
pos_sect.append(phi_eta_pos)
layer_sect.append(phi_eta_layer)
particle_sect.append(phi_eta_particle)
return pos_sect, layer_sect, particle_sect
def read_event(self, idx):
if self.data_type == "TrackML":
hits = self.read_hits(idx)
particles = self.read_particles(idx)
truth = self.read_truth(idx)
elif self.data_type == "ATLAS":
hits = 0
particles = self.read_particles(idx)
truth = self.read_truth(idx)
return hits, particles, truth
def process(self, reprocess=False):
print('Constructing Graphs using n_workers = ' + str(self.n_workers))
task_paths = np.array_split(self.processed_paths, self.n_tasks)
for i in range(self.n_tasks):
if reprocess or not self.files_exist(task_paths[i]):
self.process_task(i)
def process_task(self, idx):
print('Running task ' + str(idx))
task_events = np.array_split(self.events, self.n_tasks)
with mp.Pool(processes = self.n_workers) as pool:
pool.map(self.process_event, tqdm(task_events[idx]))
def process_event(self, idx):
hits, particles, truth = self.read_event(idx)
if (self.mmap):
module_map = self.build_module_map(hits, particles, truth)
if self.data_type == "TrackML":
pos, layer, particle, eta = self.select_hits(hits, particles, truth)
elif self.data_type == "ATLAS":
pos, layer, particle, eta = self.select_hits_atlas(particles, truth)
tracks = torch.empty(0, dtype=torch.long)
track_attr = torch.empty(0, dtype=torch.long)
track_attr_pix = torch.empty(0, dtype=torch.long)
track_attr_pruned = torch.empty(0, dtype=torch.long)
if(self.tracking):
if self.data_type == "TrackML":
tracks, track_attr, track_attr_pix, track_attr_pruned = self.build_tracks(hits, particles, truth)
elif self.data_type == "ATLAS":
tracks, track_attr, track_attr_pix, track_attr_pruned = self.build_tracks_atlas(particles, truth)
phi_edges = np.linspace(*(-np.pi, np.pi), num=self.n_phi_sections+1)
eta_edges = np.linspace(*self.eta_range, num=self.n_eta_sections+1)
pos_sect, layer_sect, particle_sect = self.split_detector_sections(pos, layer, particle, eta, phi_edges, eta_edges)
for i in range(len(pos_sect)):
edge_index = self.compute_edge_index(pos_sect[i], layer_sect[i])
y = self.compute_y_index(edge_index, particle_sect[i])
edge_votes = torch.zeros(edge_index.shape[1], 0, dtype=torch.long)
# edge_votes = torch.zeros(edge_index.shape[1], 2, dtype=torch.long)
if(self.hough):
# accumulator0, accumulator1 = self.build_accumulator(pos_sect[i])
# edge_votes = self.extract_votes(accumulator0, accumulator1, pos_sect[i], edge_index)
edge_votes = self.extract_votes(pos_sect[i], edge_index)
data = Data(x=pos_sect[i], edge_index=edge_index, edge_attr=edge_votes, y=y, tracks=tracks, track_attr=track_attr, track_attr_pix=track_attr_pix, track_attr_pruned=track_attr_pruned, particles=particle_sect[i])
if not self.directed and not data.is_undirected():
rows,cols = data.edge_index
temp = torch.stack((cols,rows))
data.edge_index = torch.cat([data.edge_index,temp],dim=-1)
data.y = torch.cat([data.y,data.y])
data.edge_attr = torch.cat([data.edge_attr,data.edge_attr])
if (self.augments):
data_a = copy.deepcopy(data)
data_a.x[:,1]= -data_a.x[:,1]
torch.save(data_a, osp.join(self.processed_dir, 'event{}_section{}_aug.pt'.format(idx, i)))
torch.save(data, osp.join(self.processed_dir, 'event{}_section{}.pt'.format(idx, i)))
# if self.pre_filter is not None and not self.pre_filter(data):
# continue
#
# if self.pre_transform is not None:
# data = self.pre_transform(data)
def get(self, idx):
data = torch.load(self.processed_files[idx])
return data
def draw(self, idx, dpi=500):
# print("Making plots for " + str(self.processed_files[idx]))
width1 = .1 #blue edge (false)
width2 = .2 #black edge (true)
points = .25 #hit points
dpi = 500
X = self[idx].x.cpu().numpy()
index = self[idx].edge_index.cpu().numpy()
y = self[idx].y.cpu().numpy()
true_index = index[:,y > 0]
r_co = X[:,0]
phi_co = X[:,1]
z_co = X[:,2]
x_co = X[:,0]* | np.cos(X[:,1]) | numpy.cos |
import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionAddTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_binary_add_scalar(self):
self.assertEqual(dnp.add(1.2 + 1j, 1.2 - 1j), np.add(1.2 + 1j, 1.2 - 1j))
self.assertEqual(dnp.add(0.5, 9), np.add(0.5, 9))
self.assertEqual(dnp.add(-1, 8.5), np.add(-1, 8.5))
self.assertEqual(dnp.add(1, 4), 5)
self.assertEqual(np.add(1, 4), 5)
self.assertEqual(dnp.add(1, 4), np.add(1, 4))
self.assertEqual(dnp.add(1, -5), -4)
self.assertEqual(np.add(1, -5), -4)
self.assertEqual(dnp.add(1, -5), np.add(1, -5))
self.assertEqual(dnp.add(0, 9), 9)
self.assertEqual(np.add(0, 9), 9)
self.assertEqual(dnp.add(0, 9), np.add(0, 9))
self.assertEqual(dnp.isnan(dnp.add(dnp.nan, -5)), True)
self.assertEqual(np.isnan(np.add(dnp.nan, -5)), True)
def test_function_math_binary_add_list(self):
lst1 = [1, 2, 3]
lst2 = [4, 6, 9]
assert_array_equal(dnp.add(lst1, lst2), np.add(lst1, lst2))
def test_function_math_binary_add_array_with_scalar(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
assert_array_equal(dnp.add(dnpa, 1), np.add(npa, 1))
assert_array_equal(dnp.add(dnpa, dnp.nan), np.add(npa, np.nan))
assert_array_equal(dnp.add(1, dnpa), np.add(1, npa))
def test_function_math_binary_add_array_with_array(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
assert_array_equal(dnp.add(dnpa1, dnpa2), np.add(npa1, npa2))
def test_function_math_binary_add_array_with_array_param_out(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
npa = np.zeros(shape=(1, 3))
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
dnpa = dnp.zeros(shape=(1, 3))
np.add(npa1, npa2, out=npa)
dnp.add(dnpa1, dnpa2, out=dnpa)
# TODO: dolphindb numpy add bug
# assert_array_equal(dnpa.to_numpy(), npa)
def test_function_math_binary_add_array_with_series(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
ps = pd.Series([4, 6, 9])
os = orca.Series([4, 6, 9])
assert_series_equal(dnp.add(dnpa, os).to_pandas(), np.add(npa, ps))
assert_series_equal(dnp.add(os, dnpa).to_pandas(), np.add(ps, npa))
pser = pd.Series([1, 2, 4])
oser = orca.Series([1, 2, 4])
assert_series_equal(dnp.add(os, oser).to_pandas(), np.add(ps, pser))
def test_function_math_binary_add_array_with_dataframe(self):
npa = | np.array([1, 2, 3]) | numpy.array |
"""
Divide a given video into multiple shots using the kernel temporal segmentation
library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from __future__ import unicode_literals
import os
from scipy.misc import imresize
from PIL import Image
from skimage import color
# from skimage.feature import hog
import numpy as np
import _init_paths # noqa
import utils
from kts.cpd_auto import cpd_auto
def color_hist(im, colBins):
"""
Get color histogram descriptors for RGB and LAB space.
Input: im: (h,w,c): 0-255: np.uint8
Output: descriptor: (colBins*6,)
"""
assert im.ndim == 3 and im.shape[2] == 3, "image should be rgb"
arr = np.concatenate((im, color.rgb2lab(im)), axis=2).reshape((-1, 6))
desc = np.zeros((colBins * 6,), dtype=np.float)
for i in range(3):
desc[i * colBins:(i + 1) * colBins], _ = np.histogram(
arr[:, i], bins=colBins, range=(0, 255))
desc[i * colBins:(i + 1) * colBins] /= np.sum(
desc[i * colBins:(i + 1) * colBins]) + (
np.sum(desc[i * colBins:(i + 1) * colBins]) < 1e-4)
i += 1
desc[i * colBins:(i + 1) * colBins], _ = np.histogram(
arr[:, i], bins=colBins, range=(0, 100))
desc[i * colBins:(i + 1) * colBins] /= np.sum(
desc[i * colBins:(i + 1) * colBins]) + (
np.sum(desc[i * colBins:(i + 1) * colBins]) < 1e-4)
for i in range(4, 6):
desc[i * colBins:(i + 1) * colBins], _ = np.histogram(
arr[:, i], bins=colBins, range=(-128, 127))
desc[i * colBins:(i + 1) * colBins] /= np.sum(
desc[i * colBins:(i + 1) * colBins]) + (
np.sum(desc[i * colBins:(i + 1) * colBins]) < 1e-4)
return desc
def compute_features(im, colBins):
"""
Compute features of images: RGB histogram + SIFT
im: (h,w,c): 0-255: np.uint8
feat: (d,)
"""
colHist = color_hist(im, colBins=colBins)
# hogF = hog(
# color.rgb2gray(im), orientations=hogBins,
# pixels_per_cell=(hogCellSize, hogCellSize),
# cells_per_block=(int(np.sqrt(hogCells)),
# int(np.sqrt(hogCells))),
# visualise=False)
# return np.hstack((hogF, colHist))
return colHist
def vid2shots(imSeq, maxShots=5, vmax=0.6, colBins=40):
"""
Convert a given video into number of shots
imSeq: (n,h,w,c): 0-255: np.uint8: RGB
shotIdx: (k,): start Index of shot: 0-indexed
shotScore: (k,): First change ../lib/kts/cpd_auto.py return value to
scores2 instead of costs (a bug)
"""
X = np.zeros((imSeq.shape[0], compute_features(imSeq[0], colBins).size))
print('Feature Matrix shape:', X.shape)
for i in range(imSeq.shape[0]):
X[i] = compute_features(imSeq[i], colBins)
K = np.dot(X, X.T)
shotIdx, _ = cpd_auto(K, maxShots - 1, vmax)
shotIdx = | np.concatenate(([0], shotIdx)) | numpy.concatenate |
import numpy as np
import scipy.optimize as optimization
import matplotlib.pyplot as plt
try:
from submm_python_routines.KIDs import calibrate
except:
from KIDs import calibrate
from numba import jit # to get working on python 2 I had to downgrade llvmlite pip install llvmlite==0.31.0
# module for fitting resonances curves for kinetic inductance detectors.
# written by <NAME> 12/21/16
# for example see test_fit.py in this directory
# To Do
# I think the error analysis on the fit_nonlinear_iq_with_err probably needs some work
# add in step by step fitting i.e. first amplitude normalizaiton, then cabel delay, then i0,q0 subtraction, then phase rotation, then the rest of the fit.
# need to have fit option that just specifies tau becuase that never really changes for your cryostat
#Change log
#JDW 2017-08-17 added in a keyword/function to allow for gain varation "amp_var" to be taken out before fitting
#JDW 2017-08-30 added in fitting for magnitude fitting of resonators i.e. not in iq space
#JDW 2018-03-05 added more clever function for guessing x0 for fits
#JDW 2018-08-23 added more clever guessing for resonators with large phi into guess seperate functions
J=np.exp(2j*np.pi/3)
Jc=1/J
@jit(nopython=True)
def cardan(a,b,c,d):
'''
analytical root finding fast: using numba looks like x10 speed up
returns only the largest real root
'''
u=np.empty(2,np.complex128)
z0=b/3/a
a2,b2 = a*a,b*b
p=-b2/3/a2 +c/a
q=(b/27*(2*b2/a2-9*c/a)+d)/a
D=-4*p*p*p-27*q*q
r=np.sqrt(-D/27+0j)
u=((-q-r)/2)**(1/3.)#0.33333333333333333333333
v=((-q+r)/2)**(1/3.)#0.33333333333333333333333
w=u*v
w0=np.abs(w+p/3)
w1=np.abs(w*J+p/3)
w2=np.abs(w*Jc+p/3)
if w0<w1:
if w2<w0 : v*=Jc
elif w2<w1 : v*=Jc
else: v*=J
roots = np.asarray((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
#print(roots)
where_real = np.where(np.abs(np.imag(roots)) < 1e-15)
#if len(where_real)>1: print(len(where_real))
#print(D)
if D>0: return np.max(np.real(roots)) # three real roots
else: return np.real(roots[np.argsort(np.abs(np.imag(roots)))][0]) #one real root get the value that has smallest imaginary component
#return np.max(np.real(roots[where_real]))
#return np.asarray((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
# function to descript the magnitude S21 of a non linear resonator
@jit(nopython=True)
def nonlinear_mag(x,fr,Qr,amp,phi,a,b0,b1,flin):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# b0 DC level of s21 away from resonator
# b1 Frequency dependant gain varation
# flin is probably the frequency of the resonator when a = 0
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0+b1 x_lin)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# where the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) where yg = Qr*xg and xg = (f-fr)/fr
#
'''
xlin = (x - flin)/flin
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
#print(roots)
#roots = np.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about real roots
#where_real = np.where(np.imag(roots) == 0)
#where_real = np.where(np.abs(np.imag(roots)) < 1e-10) #analytic version has some floating point error accumulation
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))#np.max(np.real(roots[where_real]))
z = (b0 +b1*xlin)*np.abs(1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))**2
return z
@jit(nopython=True)
def linear_mag(x,fr,Qr,amp,phi,b0):
'''
# simplier version for quicker fitting when applicable
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# b0 DC level of s21 away from resonator
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jxg) 2 /
#
# no y just xg
# with no nonlinear kinetic inductance
'''
if not np.isscalar(fr): #vectorize
x = np.reshape(x,(x.shape[0],1,1,1,1,1))
xg = (x-fr)/fr
z = (b0)*np.abs(1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*xg*Qr) + amp/2.*(np.exp(1.0j*phi) -1.0))**2
return z
# function to describe the i q loop of a nonlinear resonator
@jit(nopython=True)
def nonlinear_iq(x,fr,Qr,amp,phi,a,i0,q0,tau,f0):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readou system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# i0
# q0 these are constants that describes an overall phase rotation of the iq loop + a DC gain offset
# tau cabel delay
# f0 is all the center frequency, not sure why we include this as a secondary paramter should be the same as fr
#
# This is based of fitting code from MUSIC
#
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# (-j 2 pi deltaf tau) / (j phi) (j phi) \
# (i0+j*q0)*e^ *|1 -amp*e^ +amp*(e^ -1) |
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# where the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) where yg = Qr*xg and xg = (f-fr)/fr
#
'''
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = np.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about real roots
#where_real = np.where(np.imag(roots) == 0)
#y[i] = np.max(np.real(roots[where_real]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* np.exp(-1.0j* 2* np.pi *deltaf*tau) * (1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))
return z
def nonlinear_iq_for_fitter(x,fr,Qr,amp,phi,a,i0,q0,tau,f0,**keywords):
'''
when using a fitter that can't handel complex number
one needs to return both the real and imaginary components seperatly
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
print("hello")
else:
use_given_tau = False
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
for i in range(0,x.shape[0]):
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#where_real = np.where(np.imag(roots) == 0)
#y[i] = np.max(np.real(roots[where_real]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* np.exp(-1.0j* 2* np.pi *deltaf*tau) * (1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))
real_z = np.real(z)
imag_z = np.imag(z)
return np.hstack((real_z,imag_z))
def brute_force_linear_mag_fit(x,z,ranges,n_grid_points,error = None, plot = False,**keywords):
'''
x frequencies Hz
z complex or abs of s21
ranges is the ranges for each parameter i.e. np.asarray(([f_low,Qr_low,amp_low,phi_low,b0_low],[f_high,Qr_high,amp_high,phi_high,b0_high]))
n_grid_points how finely to sample each parameter space.
this can be very slow for n>10
an increase by a factor of 2 will take 2**5 times longer
to marginalize over you must minimize over the unwanted axies of sum_dev
i.e for fr np.min(np.min(np.min(np.min(fit['sum_dev'],axis = 4),axis = 3),axis = 2),axis = 1)
'''
if error is None:
error = np.ones(len(x))
fs = np.linspace(ranges[0][0],ranges[1][0],n_grid_points)
Qrs = np.linspace(ranges[0][1],ranges[1][1],n_grid_points)
amps = np.linspace(ranges[0][2],ranges[1][2],n_grid_points)
phis = np.linspace(ranges[0][3],ranges[1][3],n_grid_points)
b0s = np.linspace(ranges[0][4],ranges[1][4],n_grid_points)
evaluated_ranges = np.vstack((fs,Qrs,amps,phis,b0s))
a,b,c,d,e = np.meshgrid(fs,Qrs,amps,phis,b0s,indexing = "ij") #always index ij
evaluated = linear_mag(x,a,b,c,d,e)
data_values = np.reshape(np.abs(z)**2,(abs(z).shape[0],1,1,1,1,1))
error = np.reshape(error,(abs(z).shape[0],1,1,1,1,1))
sum_dev = np.sum(((np.sqrt(evaluated)-np.sqrt(data_values))**2/error**2),axis = 0) # comparing in magnitude space rather than magnitude squared
min_index = np.where(sum_dev == np.min(sum_dev))
index1 = min_index[0][0]
index2 = min_index[1][0]
index3 = min_index[2][0]
index4 = min_index[3][0]
index5 = min_index[4][0]
fit_values = np.asarray((fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5]))
fit_values_names = ('f0','Qr','amp','phi','b0')
fit_result = linear_mag(x,fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5])
marginalized_1d = np.zeros((5,n_grid_points))
marginalized_1d[0,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2),axis = 1)
marginalized_1d[1,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2),axis = 0)
marginalized_1d[2,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 1),axis = 0)
marginalized_1d[3,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 1),axis = 0)
marginalized_1d[4,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 1),axis = 0)
marginalized_2d = np.zeros((5,5,n_grid_points,n_grid_points))
#0 _
#1 x _
#2 x x _
#3 x x x _
#4 x x x x _
# 0 1 2 3 4
marginalized_2d[0,1,:] = marginalized_2d[1,0,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2)
marginalized_2d[2,0,:] = marginalized_2d[0,2,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 1)
marginalized_2d[2,1,:] = marginalized_2d[1,2,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 0)
marginalized_2d[3,0,:] = marginalized_2d[0,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 1)
marginalized_2d[3,1,:] = marginalized_2d[1,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 0)
marginalized_2d[3,2,:] = marginalized_2d[2,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 1),axis = 0)
marginalized_2d[4,0,:] = marginalized_2d[0,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 1)
marginalized_2d[4,1,:] = marginalized_2d[1,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 0)
marginalized_2d[4,2,:] = marginalized_2d[2,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 1),axis = 0)
marginalized_2d[4,3,:] = marginalized_2d[3,4,:] = np.min(np.min(np.min(sum_dev,axis = 2),axis = 1),axis = 0)
if plot:
levels = [2.3,4.61] #delta chi squared two parameters 68 90 % confidence
fig_fit = plt.figure(-1)
axs = fig_fit.subplots(5, 5)
for i in range(0,5): # y starting from top
for j in range(0,5): #x starting from left
if i > j:
#plt.subplot(5,5,i+1+5*j)
#axs[i, j].set_aspect('equal', 'box')
extent = [evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1],evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1]]
axs[i,j].imshow(marginalized_2d[i,j,:]-np.min(sum_dev),extent =extent,origin = 'lower', cmap = 'jet')
axs[i,j].contour(evaluated_ranges[j],evaluated_ranges[i],marginalized_2d[i,j,:]-np.min(sum_dev),levels = levels,colors = 'white')
axs[i,j].set_ylim(evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1])
axs[i,j].set_xlim(evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1])
axs[i,j].set_aspect((evaluated_ranges[j,0]-evaluated_ranges[j,n_grid_points-1])/(evaluated_ranges[i,0]-evaluated_ranges[i,n_grid_points-1]))
if j == 0:
axs[i, j].set_ylabel(fit_values_names[i])
if i == 4:
axs[i, j].set_xlabel("\n"+fit_values_names[j])
if i<4:
axs[i,j].get_xaxis().set_ticks([])
if j>0:
axs[i,j].get_yaxis().set_ticks([])
elif i < j:
fig_fit.delaxes(axs[i,j])
for i in range(0,5):
#axes.subplot(5,5,i+1+5*i)
axs[i,i].plot(evaluated_ranges[i,:],marginalized_1d[i,:]-np.min(sum_dev))
axs[i,i].plot(evaluated_ranges[i,:],np.ones(len(evaluated_ranges[i,:]))*1.,color = 'k')
axs[i,i].plot(evaluated_ranges[i,:],np.ones(len(evaluated_ranges[i,:]))*2.7,color = 'k')
axs[i,i].yaxis.set_label_position("right")
axs[i,i].yaxis.tick_right()
axs[i,i].xaxis.set_label_position("top")
axs[i,i].xaxis.tick_top()
axs[i,i].set_xlabel(fit_values_names[i])
#axs[0,0].set_ylabel(fit_values_names[0])
#axs[4,4].set_xlabel(fit_values_names[4])
axs[4,4].xaxis.set_label_position("bottom")
axs[4,4].xaxis.tick_bottom()
#make a dictionary to return
fit_dict = {'fit_values': fit_values,'fit_values_names':fit_values_names, 'sum_dev': sum_dev, 'fit_result': fit_result,'marginalized_2d':marginalized_2d,'marginalized_1d':marginalized_1d,'evaluated_ranges':evaluated_ranges}#, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_iq(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
# tau forces tau to specific value
# tau_guess fixes the guess for tau without have to specifiy all of x0
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
else:
use_given_tau = False
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),50,.01,-np.pi,0,-np.inf,-np.inf,0,np.min(x)],[np.max(x),200000,1,np.pi,5,np.inf,np.inf,1*10**-6,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.mean(np.real(z)),np.mean(np.imag(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear(x,z,verbose = True)
print(x0)
if ('fr_guess' in keywords):
x0[0] = keywords['fr_guess']
if ('tau_guess' in keywords):
x0[7] = keywords['tau_guess']
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
if use_given_tau == True:
del bounds[0][7]
del bounds[1][7]
del x0[7]
fit = optimization.curve_fit(lambda x_lamb,a,b,c,d,e,f,g,h: nonlinear_iq_for_fitter(x_lamb,a,b,c,d,e,f,g,tau,h), x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],tau,fit[0][7])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],tau,x0[7])
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_iq_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above funciton but takes fine and gain scans seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(fine_x),500.,.01,-np.pi,0,-np.inf,-np.inf,1*10**-9,np.min(fine_x)],[np.max(fine_x),1000000,1,np.pi,5,np.inf,np.inf,1*10**-6,np.max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.mean(np.real(z)),np.mean(np.imag(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
#print(x0)
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
x = np.hstack((fine_x,gain_x))
z = np.hstack((fine_z,gain_z))
if use_err:
z_err = np.hstack((fine_z_err,gain_z_err))
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
if use_err:
z_err_stacked = np.hstack((np.real(z_err),np.imag(z_err)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,sigma = z_err_stacked,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
if use_err:
#only do it for fine data
#red_chi_sqr = np.sum(z_stacked-np.hstack((np.real(fit_result),np.imag(fit_result))))**2/z_err_stacked**2)/(len(z_stacked)-8.)
#only do it for fine data
red_chi_sqr = np.sum((np.hstack((np.real(fine_z),np.imag(fine_z)))-np.hstack((np.real(fit_result[0:len(fine_z)]),np.imag(fit_result[0:len(fine_z)]))))**2/np.hstack((np.real(fine_z_err),np.imag(fine_z_err)))**2)/(len(fine_z)*2.-8.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
# same function but double fits so that it can get error and a proper covariance matrix out
def fit_nonlinear_iq_with_err(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),2000,.01,-np.pi,0,-5,-5,1*10**-9,np.min(x)],[np.max(x),200000,1,np.pi,5,5,5,1*10**-6,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[np.argmin(np.abs(z))]
x0 = guess_x0_iq_nonlinear(x,z)
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
fit_result_stacked = nonlinear_iq_for_fitter(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
# get error
var = np.sum((z_stacked-fit_result_stacked)**2)/(z_stacked.shape[0] - 1)
err = np.ones(z_stacked.shape[0])*np.sqrt(var)
# refit
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,err,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_mag(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),100,.01,-np.pi,0,-np.inf,-np.inf,np.min(x)],[np.max(x),200000,1,np.pi,5,np.inf,np.inf,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.abs(z[0])**2,np.abs(z[0])**2,fr_guess]
x0 = guess_x0_mag_nonlinear(x,z,verbose = True)
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_mag_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above but fine and gain scans are provided seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(fine_x),100,.01,-np.pi,0,-np.inf,-np.inf,np.min(fine_x)],[np.max(fine_x),1000000,100,np.pi,5,np.inf,np.inf,np.max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
x0 = guess_x0_mag_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
#stack the scans for curvefit
x = np.hstack((fine_x,gain_x))
z = np.hstack((fine_z,gain_z))
if use_err:
z_err = np.hstack((fine_z_err,gain_z_err))
z_err = np.sqrt(4*np.real(z_err)**2*np.real(z)**2+4*np.imag(z_err)**2*np.imag(z)**2) #propogation of errors left out cross term
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,sigma = z_err,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#compute reduced chi squared
print(len(z))
if use_err:
#red_chi_sqr = np.sum((np.abs(z)**2-fit_result)**2/z_err**2)/(len(z)-7.)
# only use fine scan for reduced chi squared.
red_chi_sqr = np.sum((np.abs(fine_z)**2-fit_result[0:len(fine_z)])**2/z_err[0:len(fine_z)]**2)/(len(fine_z)-7.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
def amplitude_normalization(x,z):
'''
# normalize the amplitude varation requires a gain scan
#flag frequencies to use in amplitude normaliztion
'''
index_use = np.where(np.abs(x-np.median(x))>100000) #100kHz away from resonator
poly = np.polyfit(x[index_use],np.abs(z[index_use]),2)
poly_func = np.poly1d(poly)
normalized_data = z/poly_func(x)*np.median(np.abs(z[index_use]))
return normalized_data
def amplitude_normalization_sep(gain_x,gain_z,fine_x,fine_z,stream_x,stream_z):
'''
# normalize the amplitude varation requires a gain scan
# uses gain scan to normalize does not use fine scan
#flag frequencies to use in amplitude normaliztion
'''
index_use = np.where(np.abs(gain_x-np.median(gain_x))>100000) #100kHz away from resonator
poly = np.polyfit(gain_x[index_use],np.abs(gain_z[index_use]),2)
poly_func = np.poly1d(poly)
poly_data = poly_func(gain_x)
normalized_gain = gain_z/poly_data*np.median(np.abs(gain_z[index_use]))
normalized_fine = fine_z/poly_func(fine_x)*np.median(np.abs(gain_z[index_use]))
normalized_stream = stream_z/poly_func(stream_x)*np.median(np.abs(gain_z[index_use]))
amp_norm_dict = {'normalized_gain':normalized_gain,
'normalized_fine':normalized_fine,
'normalized_stream':normalized_stream,
'poly_data':poly_data}
return amp_norm_dict
def guess_x0_iq_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_iq_nonlinear_sep
# below. it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = np.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
df = np.abs(x-np.roll(x,1))
fine_df = np.min(df[np.where(df != 0)])
fine_z_index = np.where(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = np.where(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = np.argmin(np.abs(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = np.argmin(np.abs(right))+fr_guess_index_fine
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(z)))-np.min(20*np.log10(np.abs(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#i0 and iq guess
if np.max(np.abs(fine_z))==np.max(np.abs(z)): #if the resonator has an impedance mismatch rotation that makes the fine greater that the cabel delay
i0_guess = np.real(fine_z[np.argmax(np.abs(fine_z))])
q0_guess = np.imag(fine_z[np.argmax(np.abs(fine_z))])
else:
i0_guess = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.
q0_guess = (np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
#cabel delay guess tau
#y = mx +b
#m = (y2 - y1)/(x2-x1)
#b = y-mx
if len(gain_z)>1: #is there a gain scan?
m = (gain_phase - np.roll(gain_phase,1))/(gain_x-np.roll(gain_x,1))
b = gain_phase -m*gain_x
m_best = np.median(m[~np.isnan(m)])
tau_guess = m_best/(2*np.pi)
else:
tau_guess = 3*10**-9
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("i0 guess = %.2f" %i0_guess)
print("q0 guess = %.2f" %q0_guess)
print("tau guess = %.2f x 10^-7" %(tau_guess/10**-7))
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,i0_guess,q0_guess,tau_guess,fr_guess]
return x0
def guess_x0_mag_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_mag_nonlinear_sep
#below it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = np.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
#this will probably break if there is no fine scan
df = np.abs(x-np.roll(x,1))
fine_df = np.min(df[np.where(df != 0)])
fine_z_index = np.where(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = np.where(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = np.arctan2(np.real(gain_z), | np.imag(gain_z) | numpy.imag |
import cv2
import numpy as np
from skimage.feature import hog
class FeatureDetector(object):
"""
This class takes care of the feature extraction
"""
def __init__(self):
self.color_space = cv2.COLOR_RGB2YCrCb
self.orientations = 16
self.pixels_per_cell = (12,12)
self.cells_per_block = (2,2)
self.image_size = (32,32)
self.color_feat_size = (64,64)
self.no_of_bins = 32
self.old_heatmap = None
self.color_features = False
self.spatial_features = False
self.HOG_features = True
def get_features(self,image):
"""
All feature of the image are computed here and
are concatenated to form a single feature vector
"""
_image = np.copy(image)
_image = cv2.resize(_image, self.image_size)
_image = self.convert_color_space(_image)
Features = []
if self.color_features:
color_hist = self.get_color_features(_image)
Features.append(color_hist)
if self.spatial_features:
spatial_hist = self.get_spatial_features(_image)
Features.append(spatial_hist)
if self.HOG_features:
hog_hist = self.get_HOG(_image)
Features.append(hog_hist)
# features = np.concatenate((color_hist, spatial_hist, hog_hist))
features = np.concatenate((Features))
return features
def convert_color_space(self,image):
return cv2.cvtColor(image,self.color_space)
def get_spatial_features(self,image):
"""
returns the histogram of individual channels of
image in given color space.
returns stacked feature vector of all 3 channels
"""
ch1_hist = np.histogram(image[:, :, 0], bins=self.no_of_bins)
ch2_hist = np.histogram(image[:, :, 1], bins=self.no_of_bins)
ch3_hist = np.histogram(image[:, :, 2], bins=self.no_of_bins)
return np.concatenate((ch1_hist[0], ch2_hist[0], ch3_hist[0]))
def get_color_features(self,image):
"""
flattens the given channel of the image
returns stacked feature vector of all 3 channels
"""
ch1_featr = image[:,:,0].ravel()
ch2_featr = image[:,:,1].ravel()
ch3_featr = image[:,:,2].ravel()
return np.hstack((ch1_featr, ch2_featr, ch3_featr))
def get_HOG(self,image):
"""
HOG of every channel of given image is compuuted and
is concatenated to form one single feature vector
"""
feat_ch1 = hog(image[:,:,0],
orientations= self.orientations ,
pixels_per_cell= self.pixels_per_cell ,
cells_per_block= self.cells_per_block,
visualise=False)
feat_ch2 = hog(image[:,:,1],
orientations= self.orientations ,
pixels_per_cell= self.pixels_per_cell ,
cells_per_block= self.cells_per_block,
visualise=False)
feat_ch3 = hog(image[:,:,2],
orientations= self.orientations ,
pixels_per_cell= self.pixels_per_cell ,
cells_per_block= self.cells_per_block,
visualise=False)
return np.concatenate((feat_ch1, feat_ch2, feat_ch3))
def get_heatmap(self,image,bboxes,threshold=2):
"""
A heatmap of image is created and heat is added in the region
covered by individual bounding box. Threshold is then applied
and outliers are removed
"""
heat_map = | np.zeros((image.shape[0],image.shape[1])) | numpy.zeros |
"""
Unit tests for optimizers.
"""
import numpy as np
import pytest
from numpy.linalg import norm
from sklearn.base import BaseEstimator
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import Lasso
from sklearn.utils.validation import check_is_fitted
from pysindy.optimizers import ConstrainedSR3
from pysindy.optimizers import SINDyOptimizer
from pysindy.optimizers import SR3
from pysindy.optimizers import STLSQ
from pysindy.utils import supports_multiple_targets
class DummyLinearModel(BaseEstimator):
# Does not natively support multiple targets
def fit(self, x, y):
self.coef_ = | np.ones(x.shape[1]) | numpy.ones |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
astrometry_output_comparison.py
This script is intended to load a star tracker
output file and an astrometry.net output file
and then compare their outputs in order to enable
the use of astrometry.net as an independent way to
assess the accuracy of the results.
'''
################################
#IMPORT MODULES
################################
import os
import csv
import time
import numpy as np
import pandas as pd
import output_converter
import matplotlib.pyplot as plt
from math import *
from datetime import datetime
from star_tracker.support_functions import *
from star_tracker.array_transformations import *
################################
#USER INPUT
################################
enable_plots = True #enable/disable plotting
plot_filename_prefix = '' #the prefix for your plot filenames
plot_title_prefix = '' #the prefix for your plot titles
star_tracker_filename = '' #the name of the first file to compare, output from the star tracker
star_tracker_image_name_fieldname = 'image name'
star_tracker_quat_scalar_fieldname = 'qs'
star_tracker_quat_vec1_fieldname = 'qv0'
star_tracker_quat_vec2_fieldname = 'qv1'
star_tracker_quat_vec3_fieldname = 'qv2'
star_tracker_solvetime_fieldname = 'image solve time (s)'
astrometry_filename = '' #the name of the second file to compare, output from astrometry
astrometry_image_name_fieldname = 'image_name'
astrometry_right_asc_fieldname = 'ra'
astrometry_dec_fieldname = 'dec'
################################
#MAIN CODE
################################
rad2deg = 180/pi
deg2rad = pi/180
# load data
print("loading data")
st_df = pd.read_csv(star_tracker_filename)
astro_df = pd.read_csv(astrometry_filename)
# get total number of file 1 solns
file1_solns = 0
file1_soln_names = []
file1_solvetime_good = []
file1_solvetime_bad = []
for n in range(0,len(st_df[star_tracker_image_name_fieldname])):
if st_df[star_tracker_quat_scalar_fieldname][n] < 999:
file1_solns+=1
file1_soln_names+=[st_df[star_tracker_image_name_fieldname][n]]
file1_solvetime_good+=[st_df[star_tracker_solvetime_fieldname][n]]
else:
file1_solvetime_bad+=[st_df[star_tracker_solvetime_fieldname][n]]
# get total number of file 2 solns
file2_solns = 0
file2_soln_names = []
file2_solvetime_good = []
file2_solvetime_bad = []
for n in range(0,len(astro_df[astrometry_image_name_fieldname])):
if astro_df[astrometry_right_asc_fieldname][n] < 999:
file2_solns+=1
file2_soln_names+=[astro_df[astrometry_image_name_fieldname][n]]
print("\nFile 1 ("+star_tracker_filename+") has "+str(file1_solns)+" solutions of "+str(len(st_df[star_tracker_quat_scalar_fieldname]))+" total ("+str((file1_solns/len(st_df[star_tracker_quat_scalar_fieldname]))*100)[:5]+"%)")
print("\nFile 2 ("+astrometry_filename+") has "+str(file2_solns)+" solutions of "+str(len(astro_df[astrometry_right_asc_fieldname]))+" total ("+str((file2_solns/len(astro_df[astrometry_right_asc_fieldname]))*100)[:5]+"%)")
#compare and identify number of images with no matching solution
print("\nprocessing data...")
common_names = []
theta_err = []
st_ra = []
st_dec = []
astro_ra = []
astro_dec = []
q1_s = []
q1_v0 = []
q1_v1 = []
q1_v2 = []
nonzero = 0
r=1
for st_name in file1_soln_names:
the_name = st_name.split('/')
st_comp_name = the_name[-1].split('\\')[-1]
for astro_name in file2_soln_names:
the_name = astro_name.split('/')
astro_comp_name = the_name[-1].split('\\')[-1]
if st_comp_name == astro_comp_name:
common_names+=[st_comp_name]
# extract ST quat
q1_s += [st_df.loc[st_df[star_tracker_image_name_fieldname] == st_name, star_tracker_quat_scalar_fieldname].values[0]]
q1_v0 += [st_df.loc[st_df[star_tracker_image_name_fieldname] == st_name, star_tracker_quat_vec1_fieldname].values[0]]
q1_v1 += [st_df.loc[st_df[star_tracker_image_name_fieldname] == st_name, star_tracker_quat_vec2_fieldname].values[0]]
q1_v2 += [st_df.loc[st_df[star_tracker_image_name_fieldname] == st_name, star_tracker_quat_vec3_fieldname].values[0]]
# convert ST quat to RA/DEC
euler_matrix = np.zeros([1,3])
euler_matrix[0] = output_converter.conversion.convert_quaternion('ZXZ', q1_v0[-1], q1_v1[-1], q1_v2[-1], q1_s[-1], degrees=True)[2]
euler_matrix[0,0] = euler_matrix[0,0] - 90
euler_matrix[0,1] = 90 - euler_matrix[0,1]
euler_matrix[0,2] = euler_matrix[0,2] + 180
#print("------------------------")
#print(euler_matrix[:,0])
#print(euler_matrix[:,1])
#print(euler_matrix[:,2])
st_ra+= list(euler_matrix[:,0])
st_dec+= list(euler_matrix[:,1])
#print(st_ra)
#print(st_dec)
# convert ST RA/Dec to unit vector
x_st = r*cos(st_dec[-1]*deg2rad)*cos(st_ra[-1]*deg2rad)
y_st = r*cos(st_dec[-1]*deg2rad)*sin(st_ra[-1]*deg2rad)
z_st = r*sin(st_dec[-1]*deg2rad)
#extract astrometry RA/Dec
astro_ra += [astro_df.loc[astro_df[astrometry_image_name_fieldname] == astro_name, astrometry_right_asc_fieldname].values[0]]
astro_dec += [astro_df.loc[astro_df[astrometry_image_name_fieldname] == astro_name, astrometry_dec_fieldname].values[0]]
#wrap vals to more closely align with the star tracker's output
if astro_ra[-1] > 180: astro_ra[-1]=(astro_ra[-1]-360)
if astro_dec[-1] > 180: astro_dec[-1]=(astro_ra[-1]-360)
#print(astro_ra)
#print(astro_dec)
# convert astrometry RA/Dec to unit vector
x_astro = r*cos(astro_dec[-1]*deg2rad)*cos(astro_ra[-1]*deg2rad)
y_astro = r*cos(astro_dec[-1]*deg2rad)*sin(astro_ra[-1]*deg2rad)
z_astro = r*sin(astro_dec[-1]*deg2rad)
# calculate dot product
a_dot_b = x_st*x_astro+y_st*y_astro+z_st*z_astro
a_mag = sqrt(x_st**2+y_st**2+z_st**2)
b_mag = sqrt(x_astro**2+y_astro**2+z_astro**2)
#calculate delta angle
theta_err += [acos(a_dot_b/(a_mag*b_mag))*rad2deg]
print("\n "+str(len(common_names))+" common solutions identified between the files\n")
print("\n...processing complete!")
# save output
the_data = {'image_name':common_names,"ST_qs":q1_s,"ST_qv0":q1_v0,"ST_qv1":q1_v1,"ST_qv2":q1_v2,"ST_RA_deg":st_ra,"ST_Dev_deg":st_dec,"Astro_RA_deg":astro_ra,"Astro_Dec_deg":astro_dec,"delta_angle_deg":theta_err}
now = str(datetime.now())
now = now.split('.')
now = now[0]
now = now.replace(' ','_')
now = now.replace(':','-')
#write data
keys=sorted(the_data.keys())
with open(os.path.join(os.getcwd(), now+'_astrometry_compare.csv'),'w', newline='') as csv_file:
writer=csv.writer(csv_file)
writer.writerow(keys)
writer.writerows(zip(*[the_data[key] for key in keys]))
# plot
if enable_plots:
print("Plotting...")
n=0
plt.figure(n)
plt.hist(st_df[star_tracker_solvetime_fieldname], bins='auto')
plt.ylabel('#')
plt.xlabel('solve time(s)')
plt.title(plot_title_prefix+' star tracker solve time (s)')
plt.savefig(now+'_'+plot_filename_prefix+'time1_overall.jpg')
n+=1
plt.figure(n)
plt.hist(file1_solvetime_good, bins='auto')
plt.ylabel('#')
plt.xlabel('solve time(s)')
plt.title(plot_title_prefix+' star tracker successful solve time (s)')
plt.savefig(now+'_'+plot_filename_prefix+'time1_success.jpg')
n+=1
plt.figure(n)
plt.hist(file1_solvetime_bad, bins='auto')
plt.ylabel('#')
plt.xlabel('solve time(s)')
plt.title(plot_title_prefix+' star tracker unsuccessful solve time (s)')
plt.savefig(now+'_'+plot_filename_prefix+'time1_fail.jpg')
n+=1
plt.figure(n)
plt.hist(np.array(theta_err), bins='auto')
plt.ylabel('#')
plt.xlabel('error (deg)')
plt.title(plot_title_prefix+' total delta (deg)')
plt.savefig(now+'_'+plot_filename_prefix+'thetaerr.jpg')
n+=1
plt.figure(n)
plt.plot(np.array(st_ra),'o', label = "RA")
plt.plot(np.array(st_dec),'o', label = "Dec")
plt.ylabel('angle (deg)')
plt.xlabel('image')
plt.title(plot_title_prefix+' star tracker right ascension and declination (deg)')
plt.legend(loc="best")
plt.savefig(now+'_'+plot_filename_prefix+'st_ra_dec.jpg')
n+=1
plt.figure(n)
plt.plot(np.array(q1_s),'o')
plt.plot( | np.array(q1_v0) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
np.set_printoptions(precision=8, suppress=True)
import pandas as pd
import gym
import typing
from lib.visualization import plot_profit, plot_actions, plot_train_rewards, visualize_heatmap_cf
from lib.metric import roc_auc
import math
import os
import random
from sklearn.cluster import KMeans
class DirectReinforcement(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, *args, **kwargs):
super(DirectReinforcement, self).__init__()
def init_cfg(cfg):
for _field in cfg._fields:
setattr(self, _field, getattr(cfg, _field))
for arg in args:
init_cfg(arg)
np.random.seed(self.SEED)
random.seed(self.SEED)
data = np.load(self.train_data, allow_pickle=True)
self.price_data = data[:,0]
self.data_size = len(self.price_data)
input_shape = self.window
input_shape *= self.no_of_cluster
if self.WITH_EXTENDED_FEATURE:
self.news_sentiment = data[:,1]
self.news_embeddding = data[:,2]
self.eps_data = data[:,3]
self.scaling_factor = {
'return_std': np.std(self.price_data[1:]-self.price_data[:-1]),
'return_mean': np.mean(self.price_data[1:]-self.price_data[:-1]),
'eps_std': np.std([elem for elem in self.eps_data if elem != 0]),
'eps_mean': np.mean([elem for elem in self.eps_data if elem != 0]),
}
input_shape += self.window * self.sentimental_feature
input_shape += self.embedding_feature_len
input_shape += self.window * self.eps_feature
else:
self.scaling_factor = {
'return_std': np.std(self.price_data[1:]-self.price_data[:-1]),
'return_mean': np.mean(self.price_data[1:]-self.price_data[:-1]),
}
self.label = data[:,5]
if self.action_one_hot:
input_shape += 3
print("Input shape:", input_shape)
self.observation_space = gym.spaces.Box(
low=-1,
high=2,
shape=(input_shape,)
)
self.action_space = gym.spaces.Discrete(3)
self.memory = []
self.testing = False
self.validation = False
self.rewards = []
self.epoch_reward = 0
self.epoch_profit = []
self.test_starts_index = 0
self.val_starts_index = 0
self.test_steps = len(np.load(self.test_data, allow_pickle=True)[:,0])
def step(self, action):
"""
Take action, move agent to next position and make a trade action
Store the actiona and the new value
Get reward
Return: new state, reward and whether the data is done
"""
c_val = self.price_data[self.position]
self.y.append(self.label[self.position])
if action == 2: # sell / short:
self.action = self.SELL
self.short_actions.append([self.position, c_val])
self.y_hat.append("sell")
if self.testing:
share2sell_amount = int(self.shares_held * self.AMOUNT)
transaction_value = share2sell_amount*c_val
transaction_cost = self.TRANSACTION_FEE*transaction_value
transaction_tax = self.SELL_TRANSACTION_TAX*transaction_value
self.balance = self.balance + transaction_value - transaction_cost - transaction_tax
self.shares_held -= share2sell_amount
self.total_shares_sold += share2sell_amount
self.total_sales_value += share2sell_amount * c_val
# if share2sell_amount > 0:
self.trades_backtest = {
'shares': share2sell_amount,
'transaction_value': transaction_value,
'type': "sell"
}
elif action == 1 : # buy / long
self.action = self.BUY
self.long_actions.append([self.position, c_val])
self.y_hat.append("buy")
if self.testing:
share2buy_amount = int(int(self.balance / c_val) * self.AMOUNT)
transaction_value = share2buy_amount*c_val
transaction_cost = self.TRANSACTION_FEE*transaction_value
self.balance = self.balance - transaction_value - transaction_cost
self.shares_held += share2buy_amount
self.prime_cost += transaction_value/self.shares_held
# if share2buy_amount > 0:
self.trades_backtest = {
'shares': share2buy_amount,
'transaction_value': transaction_value,
'type': "buy"
}
else:
self.action = self.HOLD
self.y_hat.append("hold")
if self.testing:
self.trades_backtest = {
'shares': 0,
'transaction_value': 0,
'type': "hold"
}
if self.testing:
self.net_worth = self.balance + self.shares_held * c_val
self.LT_ACCOUNT_BALANCE = self.price_data[self.position]*self.INIT_NO_OF_SHARES
if self.net_worth > self.max_net_worth:
self.max_net_worth = self.net_worth
if self.net_worth < self.min_net_worth:
self.min_net_worth = self.net_worth
self._render_backtest()
if (self.position+1) < self.data_size:
state = [self.position, c_val, self.action]
self.memory.append(state)
self.position += 1
self.reward = self._get_reward()
self.epoch_reward += self.reward
self.epoch_profit.append(self.reward)
self.observation = self._next_observation_input()
else:
self.done = True
return self.observation, self.reward, self.done, {}
def reset(self):
if self.testing:
data = np.load(self.test_data, allow_pickle=True)
self.price_data = data[:,0]
self.data_size = len(self.price_data)
if self.WITH_EXTENDED_FEATURE:
self.news_sentiment = data[:,1]
self.news_embeddding = data[:,2]
self.eps_data = data[:,3]
self.date = data[:,4]
self.label = data[:,5]
# self.test_position = np.random.randint(self.window + 1, self.data_size - self.test_steps - 1, self.test_epochs)
# self.position = self.test_position[self.test_starts_index]
# self.test_end_position = self.test_position + self.test_steps
self.position = 0
self.test_end_position = self.position + self.test_steps
self.test_starts_index += 1
self.test_folder = self.folder + '/Test_' + str(self.test_starts_index)
if not os.path.exists(self.test_folder):
os.makedirs(self.test_folder)
self.INITIAL_ACCOUNT_BALANCE = self.price_data[self.position]*self.INIT_NO_OF_SHARES
print("Initial account balance:", self.INITIAL_ACCOUNT_BALANCE)
self.LT_ACCOUNT_BALANCE = self.price_data[self.position]*self.INIT_NO_OF_SHARES
self.balance = self.INITIAL_ACCOUNT_BALANCE
self.net_worth = self.INITIAL_ACCOUNT_BALANCE
self.shares_held = self.INIT_NO_OF_SHARES
self.prime_cost = 0
self.total_shares_sold = 0
self.total_sales_value = 0
self.trades_backtest = {}
self.max_net_worth = self.INITIAL_ACCOUNT_BALANCE
self.min_net_worth = self.INITIAL_ACCOUNT_BALANCE
self.render_storage = []
self.render_df_filepath = None
elif self.validation:
data = np.load(self.val_data, allow_pickle=True)
self.price_data = data[:,0]
self.data_size = len(self.price_data)
if self.WITH_EXTENDED_FEATURE:
self.news_sentiment = data[:,1]
self.news_embeddding = data[:,2]
self.eps_data = data[:,3]
self.label = data[:,5]
self.val_position = np.random.randint(self.window + 1, self.data_size - self.val_steps - 1, size=self.val_epochs)
self.position = self.val_position[self.val_starts_index]
self.val_starts_index += 1
else:
begin_idx = self.window + 1
end_idx = self.data_size - self.steps - 1
self.position = random.randint(begin_idx, end_idx)
self.memory = []
self.long_actions = []
self.short_actions = []
self.trades = []
self.long_prec = 0
self.short_prec = 0
self.reward = 0
self.rewards.append(self.epoch_reward)
self.action = 0
self.prev_action = 0
self.buy_flag = False
self.sell_flag = False
self.done = False
self.y = []
self.y_hat = []
self.observation = self._next_observation_input()
return self.observation
def render(self, mode='human', close=False):
"""
Gym function render the environment to the screen
"""
self._calculate_pnl(env_name=self.env_name, save=False)
self._calculate_roc()
self.reset()
return None
def _calculate_roc(self):
"""
Calculate the ROC/AUC score based on the action of the agent
"""
if self.testing:
visualize_heatmap_cf(self.y, self.y_hat, save_location=self.test_folder)
else:
visualize_heatmap_cf(self.y, self.y_hat, save_location=self.folder)
print('Area under the curve: {:0.5f}'.format(roc_auc(self.y, self.y_hat)))
def _calculate_pnl(self, env_name, save=True):
"""
Calculate the final PnL based on the actions of the agent with three different fee values (slippage)
"""
actions = | np.array([x[2] for x in self.memory]) | numpy.array |
# Set up the drake boilerplate system and vis (maybe just multibody plant without "manipulation station"?)
import time
import numpy as np
from pydrake.common import FindResourceOrThrow
from pydrake.common.eigen_geometry import AngleAxis, Quaternion
from pydrake.common.value import Value
from pydrake.geometry import DrakeVisualizer
from pydrake.geometry.render import (MakeRenderEngineVtk, RenderEngineVtkParams)
from pydrake.manipulation.planner import DifferentialInverseKinematicsIntegrator, DifferentialInverseKinematicsParameters
from pydrake.math import RigidTransform, RotationMatrix
from pydrake.multibody.parsing import Parser
from pydrake.multibody.plant import AddMultibodyPlantSceneGraph, MultibodyPlant
from pydrake.multibody.tree import JacobianWrtVariable
from pydrake.systems.analysis import Simulator
from pydrake.systems.controllers import InverseDynamicsController
from pydrake.systems.framework import DiagramBuilder, LeafSystem, BasicVector, LeafSystem_, BasicVector_, EventStatus
from pydrake.systems.primitives import Integrator, Demultiplexer, Multiplexer, ConstantVectorSource
from pydrake.systems.primitives import StateInterpolatorWithDiscreteDerivative
from pydrake.systems.scalar_conversion import TemplateSystem
from pydrake.trajectories import PiecewisePolynomial, PiecewiseQuaternionSlerp
def make_gripper_position_trajectory(X_G, times):
""" Constructs a gripper position trajectory from the plan "sketch" """
tl_ord = sorted(times.keys(), key=lambda k: times[k])
traj = PiecewisePolynomial.FirstOrderHold(
[times[tl_ord[0]], times[tl_ord[1]]],
np.vstack([X_G[tl_ord[0]].translation(), X_G[tl_ord[1]].translation()]).T
)
for l in tl_ord[2:]:
traj.AppendFirstOrderSegment(times[l], X_G[l].translation())
return traj
def make_gripper_orientation_trajectory(X_G, times):
""" Constructs a gripper orientation trajectory from the plant "sketch" """
traj = PiecewiseQuaternionSlerp()
for label, t in sorted(times.items(), key=lambda kv: kv[1]):
traj.Append(t, X_G[label].rotation())
return traj
def make_finger_trajectory(finger_vals, times):
relevant_times = [k for k, v in times.items() if k in finger_vals]
tl_ord = sorted(relevant_times, key=lambda k: times[k])
traj = PiecewisePolynomial.FirstOrderHold(
[times[tl_ord[0]], times[tl_ord[1]]],
np.hstack([[finger_vals[tl_ord[0]]],
[finger_vals[tl_ord[1]]]])
)
for l in tl_ord[2:]:
traj.AppendFirstOrderSegment(times[l], finger_vals[l])
return traj
def manual_pick_sketch(X_G_initial, X_O_initial, X_O_goal):
# Gripper Pose relative to object when in grasp
p_GgraspO = [0, 0, 0.15]
R_GgraspO = RotationMatrix.MakeXRotation(np.pi)
X_GgraspO = RigidTransform(R_GgraspO, p_GgraspO)
X_OGgrasp = X_GgraspO.inverse()
# Pregrasp is negative z in the gripper frame
X_GgraspGpregrasp = RigidTransform([0, 0.0, -0.08])
# TODO: Scoop this part out and feed in (Still need to ensure X_G_initial makes it into key though...)
X_G = {"initial": X_G_initial}
X_G["pick_start"] = X_O_initial.multiply(X_OGgrasp)
X_G["pick_end"] = X_G["pick_start"]
X_G["prepick"] = X_G["pick_start"].multiply(X_GgraspGpregrasp)
X_G["postpick"] = X_G["prepick"]
X_G["place_start"] = X_O_goal.multiply(X_OGgrasp)
X_G["place_end"] = X_G["place_start"]
X_G["preplace"] = X_G["place_start"].multiply(X_GgraspGpregrasp)
X_G["postplace"] = X_G["preplace"]
# Interpolate a halfway orientation by converting to axis angle and halving angle
X_GprepickGpreplace = X_G["prepick"].inverse().multiply(X_G["preplace"])
angle_axis = X_GprepickGpreplace.rotation().ToAngleAxis()
X_GprepickGclearance = RigidTransform(AngleAxis(angle=angle_axis.angle() / 2.0, axis=angle_axis.axis()),
X_GprepickGpreplace.translation() / 2.0 + np.array([0, 0.0, -0.5]))
X_G["clearance"] = X_G["prepick"].multiply(X_GprepickGclearance)
# Precise timings of trajectory
times = {"initial": 0}
X_GinitialGprepick = X_G["initial"].inverse().multiply(X_G["prepick"])
times["prepick"] = times["initial"] + 10.0 * np.linalg.norm(X_GinitialGprepick.translation())
# Allow some time for gripper to close
times["pick_start"] = times["prepick"] + 2.0
times["pick_end"] = times["pick_start"] + 2.0
times["postpick"] = times["pick_end"] + 2.0
time_to_from_clearance = 10.0 * np.linalg.norm(X_GprepickGclearance.translation())
times["clearance"] = times["postpick"] + time_to_from_clearance
times["preplace"] = times["clearance"] + time_to_from_clearance
times["place_start"] = times["preplace"] + 2.0
times["place_end"] = times["place_start"] + 2.0
times["postplace"] = times["place_end"] + 2.0
opened = np.array([0.08])
closed = np.array([0.00])
finger_vals = {"initial": opened,
"pick_start": opened,
"pick_end": closed,
"place_start": closed,
"place_end": opened,
"postplace": opened}
pos_traj = make_gripper_position_trajectory(X_G, times)
rot_traj = make_gripper_orientation_trajectory(X_G, times)
finger_traj = make_finger_trajectory(finger_vals, times)
return pos_traj, rot_traj, finger_traj
@TemplateSystem.define("TrajToRB_")
def TrajToRB_(T):
class Impl(LeafSystem_[T]):
def _construct(self, traj_pos, traj_rot, converter=None):
LeafSystem_[T].__init__(self, converter=converter)
self.traj_pos = traj_pos
self.traj_rot = traj_rot
self.DeclareAbstractOutputPort("RigidBod", Value[RigidTransform], self.CalcOutput)
def _construct_copy(self, other, converter=None):
Impl._construct(self, other.traj_pos, other.traj_rot, converter=converter)
def CalcOutput(self, context, output):
t = context.get_time()
pos_vec = self.traj_pos.value(t)
rot_mat_vec = self.traj_rot.value(t)
rb = RigidTransform(Quaternion(rot_mat_vec), pos_vec)
output.SetFrom(Value[RigidTransform](rb))
return Impl
@TemplateSystem.define("GripperTrajectoriesToPosition_")
def GripperTrajectoriesToPosition_(T):
class Impl(LeafSystem_[T]):
def _construct(self, plant, traj_hand, converter=None):
LeafSystem_[T].__init__(self, converter=converter)
self.plant = plant
self.gripper_body = plant.GetBodyByName("panda_hand")
self.left_finger_joint = plant.GetJointByName("panda_finger_joint1")
self.right_finger_joint = plant.GetJointByName("panda_finger_joint2")
self.traj_hand = traj_hand
self.plant_context = plant.CreateDefaultContext()
self.DeclareVectorOutputPort("finger_position", BasicVector_[T](2), self.CalcPositionOutput)
def _construct_copy(self, other, converter=None):
Impl._construct(self, other.plant, other.traj_hand, converter=converter)
def CalcPositionOutput(self, context, output):
t = context.get_time()
hand_command = self.traj_hand.value(t)
self.left_finger_joint.set_translation(self.plant_context, hand_command / 2.0)
self.right_finger_joint.set_translation(self.plant_context, hand_command / 2.0)
output.SetFromVector(self.plant.GetPositions(self.plant_context)[-2:])
return Impl
def add_named_system(builder, name, system):
""" Although the Drake docs *say* that DiagramBuilder.AddNamedSystem is supported in the python bindings,
that does not appear to be true. So i've implemented it here"""
s = builder.AddSystem(system)
s.set_name(name)
return s
def inverse_dynamics_standard(controller_plant: MultibodyPlant):
kp = np.full(9, 100)
ki = np.full(9, 1)
kd = 2 * np.sqrt(kp)
return InverseDynamicsController(controller_plant, kp, ki, kd, False)
class DifferentialIKSystem(LeafSystem):
def __init__(self, plant, diff_ik_func):
LeafSystem.__init__(self)
self._plant = plant
self._plant_context = plant.CreateDefaultContext()
self._panda = plant.GetModelInstanceByName("panda")
self.panda_start = plant.GetJointByName("panda_joint1").velocity_start()
self.panda_end = self.panda_start + 8 # TODO: Make this more robust/flexible
self._G = plant.GetBodyByName("panda_hand").body_frame()
self._W = plant.world_frame()
self._diff_ik_func = diff_ik_func
self.DeclareVectorInputPort("desired_spatial_vel", BasicVector(6))
self.DeclareVectorInputPort("current_pos", BasicVector(9))
self.DeclareVectorInputPort("estimated_vel", BasicVector(9))
self.DeclareVectorOutputPort("commanded_vel", BasicVector(9), self.CalcOutput)
def CalcOutput(self, context, output):
V_G_desired = self.GetInputPort("desired_spatial_vel").Eval(context)
q_now = self.GetInputPort("current_pos").Eval(context)
v_now = self.GetInputPort("estimated_vel").Eval(context)
self._plant.SetPositions(self._plant_context, self._panda)
J_G = self._plant.CalcJacobianSpatialVelocity(self._plant_context, JacobianWrtVariable.kQDot, self._G, [0, 0, 0], self._W, self._W)
J_G = J_G[:, self.panda_start:self.panda_end + 1] # Question: Am i now keeping the gripper terms around?
X_now = self._plant.CalcRelativeTransform(self._plant_context, self._W, self._G)
p_now = X_now.translation()
v = self._diff_ik_func(J_G, V_G_desired, q_now, v_now, p_now)
output.SetFromVector(v)
class FrameTracker(LeafSystem):
def __init__(self, plant, frame_name):
LeafSystem.__init__(self)
self.tracked_frame = plant.GetFrameByName(frame_name)
self._plant = plant
self.DeclareVectorOutputPort("frame_world_pos", BasicVector(3), self.CalcOutput)
def CalcOutput(self, context, output):
frame_world_trans = self.tracked_frame.CalcPoseInWorld(context).translation()
output.SetFromVector(frame_world_trans)
def panda_constrained_controller(V_d, diff_ik_func, panda, panda_plant):
b = DiagramBuilder()
ts = 1e-3
diff_ik_controller = b.AddSystem(DifferentialIKSystem(panda_plant, diff_ik_func))
integrator = b.AddSystem(Integrator(9))
inv_d = b.AddSystem(inverse_dynamics_standard(panda_plant))
est_state_vel_demux = b.AddSystem(Demultiplexer(np.array([9, 9])))
des_state_vel_mux = b.AddSystem(Multiplexer(np.array([9, 9])))
desired_vel_source = b.AddSystem(ConstantVectorSource(V_d))
b.Connect(panda_plant.get_state_output_port(panda), est_state_vel_demux.get_input_port())
b.Connect(desired_vel_source.get_output_port(), diff_ik_controller.GetInputPort("desired_spatial_vel"))
b.Connect(est_state_vel_demux.get_output_port(0), diff_ik_controller.GetInputPort("current_pos"))
b.Connect(est_state_vel_demux.get_output_port(1), diff_ik_controller.GetInputPort("estimated_vel"))
b.Connect(diff_ik_controller.GetOutputPort("commanded_vel"), integrator.get_input_port())
b.Connect(integrator.get_output_port(), des_state_vel_mux.get_input_port(0))
b.Connect(diff_ik_controller.GetOutputPort("commanded_vel"), des_state_vel_mux.get_input_port(1))
b.Connect(panda_plant.get_state_output_port(panda), inv_d.get_input_port_estimated_state())
b.Connect(des_state_vel_mux.get_output_port(), inv_d.get_input_port_desired_state())
b.ExportOutput(inv_d.get_output_port_control())
diagram = b.Build()
return diagram
def panda_traj_controller(traj_pos, traj_rot, traj_hand, panda_plant):
b = DiagramBuilder()
ts = 1e-3
### Add Systems
traj_to_rigid = add_named_system(b, "RB Conv", TrajToRB_[None](traj_pos, traj_rot))
hand_frame = panda_plant.GetFrameByName("panda_hand", control_only_panda)
ik_params = DifferentialInverseKinematicsParameters(num_positions=9, num_velocities=9)
ik = add_named_system(b, "Inverse Kinematics", DifferentialInverseKinematicsIntegrator(panda_plant, hand_frame, ts, ik_params))
diff_arm_demux = add_named_system(b, "Diff Arm Demux", Demultiplexer(np.array([7, 2])))
arm_hand_mux = add_named_system(b, "Arm-Hand Mux", Multiplexer(np.array([7, 2])))
s_interp = add_named_system(b, "State Interp", StateInterpolatorWithDiscreteDerivative(9, ts, True))
hand_comms = add_named_system(b, "GripperTraj", GripperTrajectoriesToPosition_[None](panda_plant, traj_hand))
kp = np.full(9, 100)
ki = np.full(9, 1)
kd = 2 * | np.sqrt(kp) | numpy.sqrt |
import numpy as np
from fragmenter import adjacency
from fragmenter import clusterings
from fragmenter import colormaps
from nibabel import freesurfer
# define clustering options
METHODS = ['gmm', 'k_means', 'spectral', 'ward']
class Fragment(object):
"""
Class to fragment the cortical surface into equal sized parcels.
Parameters:
- - - - -
n_clusters : int
number of parcels to generate
use_pretty_colors : bool
use gradient color scheme for viewing map
"""
def __init__(self, n_clusters, use_pretty_colors=True):
self.n_clusters = n_clusters
self.use_pretty_colors = use_pretty_colors
def fit(
self, vertices, faces, parcels=None, rois=None, size=False,
method='k_means'):
"""
Main surface fragmentation wrapper.
Parameters:
- - - - -
vertices : array
vertex coordinates
faces : array
list of faces
parcels : dictionary
mapping between region names and region indices
rois : list of strings
specific regions to fragment. If None, fragment all regions.
size : int
desired size of generated framents. If specified, overrides
n_clusters.
method : string
algorithm to use for generating parcels
"""
# make sure method exists in allowed algorithms
assert method in METHODS
assert isinstance(size, int)
# define function dictionary
clust_funcs = {
'gmm': clusterings.gmm,
'k_means': clusterings.k_means,
'spectral': clusterings.spectral_clustering,
'ward': clusterings.ward}
self.vertices = vertices
n_clusters = self.n_clusters
# if provided method is spectral,
# generate adjacency matrix
if method == 'spectral':
surf_adj = adjacency.SurfaceAdjacency(vertices, faces)
surf_adj.generate()
# if parcels and rois are None, just parcellate the whole cortex
if not parcels or not rois:
# if method is spectral, convert whole adjacency list to
# adjacency matrix
if method == 'spectral':
samples = surf_adj.filtration(
filter_indices=None, toArray=True)
else:
samples = vertices
if size:
n_clusters = np.int32(np.floor(
samples.shape[0]/size))
label = clust_funcs[method](n_clusters, samples)
# otherwise, if parcels AND rois are provided
# fragment on a region-by-region basis
else:
label = np.zeros((vertices.shape[0]))
# loop over regions
lmax = 0
for region in rois:
print(region)
# make sure the region has vertices
if np.any(parcels[region]):
# get region indices
parcel_idx = parcels[region]
# if method == spectral, regional adjacency matrix
if method == 'spectral':
parcel_samples = surf_adj.filtration(
filter_indices=parcel_idx, toArray=True)
# otherwise, extract region-specific
# vertex coordaintes
else:
parcel_samples = vertices[parcel_idx, :]
# make sure that the desired number of clusters does not
# exceed the number of samples to cluster
if size:
n_clusters = np.int32(np.ceil(
parcel_samples.shape[0]/size))
if n_clusters > parcel_samples.shape[0]:
n_clusters = 1
# apply clustering
clusters = clust_funcs[method](
n_clusters, parcel_samples)
# ensure that cluster ID is += by current cluster count
clusters += lmax
lmax += len(np.unique(clusters))
label[parcel_idx] = clusters
self.label_ = np.int32(label)
def write(self, output_name, to_file=False):
"""
Write the fragmented label file to FreeSurfer annotation file.
Parameters:
- - - - -
output_name: string
name of save file. Must contain desired file extension (e.g. '.annot', '.csv')
to_file: bool
indicate whether to create a txt or csv file instead
"""
# If labels are to be exported to csv or txt
if to_file:
if output_name.endswith(('.txt','.csv')):
| np.savetxt(output_name, self.label_, fmt='%5.0f') | numpy.savetxt |
"""
Augmenters that perform simple arithmetic changes.
Do not import directly from this file, as the categorization is not final.
Use instead::
from imgaug import augmenters as iaa
and then e.g.::
seq = iaa.Sequential([iaa.Add((-5, 5)), iaa.Multiply((0.9, 1.1))])
List of augmenters:
* Add
* AddElementwise
* AdditiveGaussianNoise
* AdditiveLaplaceNoise
* AdditivePoissonNoise
* Multiply
* MultiplyElementwise
* Dropout
* CoarseDropout
* ReplaceElementwise
* ImpulseNoise
* SaltAndPepper
* CoarseSaltAndPepper
* Salt
* CoarseSalt
* Pepper
* CoarsePepper
* Invert
* ContrastNormalization
* JpegCompression
"""
from __future__ import print_function, division, absolute_import
from PIL import Image as PIL_Image
import imageio
import tempfile
import numpy as np
import cv2
from . import meta
import imgaug as ia
from .. import parameters as iap
from .. import dtypes as iadt
def add_scalar(image, value):
"""Add a single scalar value or one scalar value per channel to an image.
This method ensures that ``uint8`` does not overflow during the addition.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: limited; tested (1)
* ``uint32``: no
* ``uint64``: no
* ``int8``: limited; tested (1)
* ``int16``: limited; tested (1)
* ``int32``: no
* ``int64``: no
* ``float16``: limited; tested (1)
* ``float32``: limited; tested (1)
* ``float64``: no
* ``float128``: no
* ``bool``: limited; tested (1)
- (1) Non-uint8 dtypes can overflow. For floats, this can result
in +/-inf.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
If `value` contains more than one value, the shape of the image is
expected to be ``(H,W,C)``.
value : number or ndarray
The value to add to the image. Either a single value or an array
containing exactly one component per channel, i.e. ``C`` components.
Returns
-------
ndarray
Image with value added to it.
"""
if image.size == 0:
return np.copy(image)
iadt.gate_dtypes(
image,
allowed=["bool",
"uint8", "uint16",
"int8", "int16",
"float16", "float32"],
disallowed=["uint32", "uint64", "uint128", "uint256",
"int32", "int64", "int128", "int256",
"float64", "float96", "float128",
"float256"],
augmenter=None)
if image.dtype.name == "uint8":
return _add_scalar_to_uint8(image, value)
return _add_scalar_to_non_uint8(image, value)
def _add_scalar_to_uint8(image, value):
# Using this LUT approach is significantly faster than using
# numpy-based adding with dtype checks (around 3-4x speedup) and is
# still faster than the simple numpy image+sample approach without LUT
# (about 10% at 64x64 and about 2x at 224x224 -- maybe dependent on
# installed BLAS libraries?)
is_single_value = (
ia.is_single_number(value)
or ia.is_np_scalar(value)
or (ia.is_np_array(value) and value.size == 1))
is_channelwise = not is_single_value
nb_channels = 1 if image.ndim == 2 else image.shape[-1]
value = np.clip(np.round(value), -255, 255).astype(np.int16)
value_range = np.arange(0, 256, dtype=np.int16)
if is_channelwise:
assert value.ndim == 1, (
"Expected `value` to be 1-dimensional, got %d-dimensional "
"data with shape %s." % (value.ndim, value.shape))
assert image.ndim == 3, (
"Expected `image` to be 3-dimensional when adding one value per "
"channel, got %d-dimensional data with shape %s." % (
image.ndim, image.shape))
assert image.shape[-1] == value.size, (
"Expected number of channels in `image` and number of components "
"in `value` to be identical. Got %d vs. %d." % (
image.shape[-1], value.size))
result = []
# TODO check if tile() is here actually needed
tables = np.tile(
value_range[np.newaxis, :],
(nb_channels, 1)
) + value[:, np.newaxis]
tables = np.clip(tables, 0, 255).astype(image.dtype)
for c, table in enumerate(tables):
result.append(cv2.LUT(image[..., c], table))
return np.stack(result, axis=-1)
else:
table = value_range + value
image_aug = cv2.LUT(
image,
iadt.clip_(table, 0, 255).astype(image.dtype))
if image_aug.ndim == 2 and image.ndim == 3:
image_aug = image_aug[..., np.newaxis]
return image_aug
def _add_scalar_to_non_uint8(image, value):
input_dtype = image.dtype
is_single_value = (
ia.is_single_number(value)
or ia.is_np_scalar(value)
or (ia.is_np_array(value) and value.size == 1))
is_channelwise = not is_single_value
nb_channels = 1 if image.ndim == 2 else image.shape[-1]
shape = (1, 1, nb_channels if is_channelwise else 1)
value = np.array(value).reshape(shape)
# We limit here the value range of the value parameter to the
# bytes in the image's dtype. This prevents overflow problems
# and makes it less likely that the image has to be up-casted,
# which again improves performance and saves memory. Note that
# this also enables more dtypes for image inputs.
# The downside is that the mul parameter is limited in its
# value range.
#
# We need 2* the itemsize of the image here to allow to shift
# the image's max value to the lowest possible value, e.g. for
# uint8 it must allow for -255 to 255.
itemsize = image.dtype.itemsize * 2
dtype_target = np.dtype("%s%d" % (value.dtype.kind, itemsize))
value = iadt.clip_to_dtype_value_range_(
value, dtype_target, validate=True)
# Itemsize is currently reduced from 2 to 1 due to clip no
# longer supporting int64, which can cause issues with int32
# samples (32*2 = 64bit).
# TODO limit value ranges of samples to int16/uint16 for
# security
image, value = iadt.promote_array_dtypes_(
[image, value],
dtypes=[image.dtype, dtype_target],
increase_itemsize_factor=1)
image = np.add(image, value, out=image, casting="no")
return iadt.restore_dtypes_(image, input_dtype)
def add_elementwise(image, values):
"""Add an array of values to an image.
This method ensures that ``uint8`` does not overflow during the addition.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: limited; tested (1)
* ``uint32``: no
* ``uint64``: no
* ``int8``: limited; tested (1)
* ``int16``: limited; tested (1)
* ``int32``: no
* ``int64``: no
* ``float16``: limited; tested (1)
* ``float32``: limited; tested (1)
* ``float64``: no
* ``float128``: no
* ``bool``: limited; tested (1)
- (1) Non-uint8 dtypes can overflow. For floats, this can result
in +/-inf.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
values : ndarray
The values to add to the image. Expected to have the same height
and width as `image` and either no channels or one channel or
the same number of channels as `image`.
Returns
-------
ndarray
Image with values added to it.
"""
iadt.gate_dtypes(
image,
allowed=["bool",
"uint8", "uint16",
"int8", "int16",
"float16", "float32"],
disallowed=["uint32", "uint64", "uint128", "uint256",
"int32", "int64", "int128", "int256",
"float64", "float96", "float128",
"float256"],
augmenter=None)
if image.dtype.name == "uint8":
return _add_elementwise_to_uint8(image, values)
return _add_elementwise_to_non_uint8(image, values)
def _add_elementwise_to_uint8(image, values):
# This special uint8 block is around 60-100% faster than the
# corresponding non-uint8 function further below (more speedup
# for smaller images).
#
# Also tested to instead compute min/max of image and value
# and then only convert image/value dtype if actually
# necessary, but that was like 20-30% slower, even for 224x224
# images.
#
if values.dtype.kind == "f":
values = np.round(values)
image = image.astype(np.int16)
values = np.clip(values, -255, 255).astype(np.int16)
image_aug = image + values
image_aug = np.clip(image_aug, 0, 255).astype(np.uint8)
return image_aug
def _add_elementwise_to_non_uint8(image, values):
# We limit here the value range of the value parameter to the
# bytes in the image's dtype. This prevents overflow problems
# and makes it less likely that the image has to be up-casted,
# which again improves performance and saves memory. Note that
# this also enables more dtypes for image inputs.
# The downside is that the mul parameter is limited in its
# value range.
#
# We need 2* the itemsize of the image here to allow to shift
# the image's max value to the lowest possible value, e.g. for
# uint8 it must allow for -255 to 255.
input_shape = image.shape
input_dtype = image.dtype
if image.ndim == 2:
image = image[..., np.newaxis]
if values.ndim == 2:
values = values[..., np.newaxis]
nb_channels = image.shape[-1]
itemsize = image.dtype.itemsize * 2
dtype_target = np.dtype("%s%d" % (values.dtype.kind, itemsize))
values = iadt.clip_to_dtype_value_range_(values, dtype_target,
validate=100)
if values.shape[2] == 1:
values = np.tile(values, (1, 1, nb_channels))
# Decreased itemsize from 2 to 1 here, see explanation in Add.
image, values = iadt.promote_array_dtypes_(
[image, values],
dtypes=[image.dtype, dtype_target],
increase_itemsize_factor=1)
image = np.add(image, values, out=image, casting="no")
image = iadt.restore_dtypes_(image, input_dtype)
if len(input_shape) == 2:
return image[..., 0]
return image
def multiply_scalar(image, multiplier):
"""Multiply an image by a single scalar or one scalar per channel.
This method ensures that ``uint8`` does not overflow during the
multiplication.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: limited; tested (1)
* ``uint32``: no
* ``uint64``: no
* ``int8``: limited; tested (1)
* ``int16``: limited; tested (1)
* ``int32``: no
* ``int64``: no
* ``float16``: limited; tested (1)
* ``float32``: limited; tested (1)
* ``float64``: no
* ``float128``: no
* ``bool``: limited; tested (1)
- (1) Non-uint8 dtypes can overflow. For floats, this can result in
+/-inf.
Note: tests were only conducted for rather small multipliers, around
``-10.0`` to ``+10.0``.
In general, the multipliers sampled from `multiplier` must be in a
value range that corresponds to the input image's dtype. E.g. if the
input image has dtype ``uint16`` and the samples generated from
`multiplier` are ``float64``, this function will still force all
samples to be within the value range of ``float16``, as it has the
same number of bytes (two) as ``uint16``. This is done to make
overflows less likely to occur.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
If `value` contains more than one value, the shape of the image is
expected to be ``(H,W,C)``.
multiplier : number or ndarray
The multiplier to use. Either a single value or an array
containing exactly one component per channel, i.e. ``C`` components.
Returns
-------
ndarray
Image, multiplied by `multiplier`.
"""
if image.size == 0:
return np.copy(image)
iadt.gate_dtypes(
image,
allowed=["bool",
"uint8", "uint16",
"int8", "int16",
"float16", "float32"],
disallowed=["uint32", "uint64", "uint128", "uint256",
"int32", "int64", "int128", "int256",
"float64", "float96", "float128",
"float256"],
augmenter=None)
if image.dtype.name == "uint8":
return _multiply_scalar_to_uint8(image, multiplier)
return _multiply_scalar_to_non_uint8(image, multiplier)
def _multiply_scalar_to_uint8(image, multiplier):
# Using this LUT approach is significantly faster than
# else-block code (more than 10x speedup) and is still faster
# than the simpler image*sample approach without LUT (1.5-3x
# speedup, maybe dependent on installed BLAS libraries?)
is_single_value = (
ia.is_single_number(multiplier)
or ia.is_np_scalar(multiplier)
or (ia.is_np_array(multiplier) and multiplier.size == 1))
is_channelwise = not is_single_value
nb_channels = 1 if image.ndim == 2 else image.shape[-1]
multiplier = np.float32(multiplier)
value_range = np.arange(0, 256, dtype=np.float32)
if is_channelwise:
assert multiplier.ndim == 1, (
"Expected `multiplier` to be 1-dimensional, got %d-dimensional "
"data with shape %s." % (multiplier.ndim, multiplier.shape))
assert image.ndim == 3, (
"Expected `image` to be 3-dimensional when multiplying by one "
"value per channel, got %d-dimensional data with shape %s." % (
image.ndim, image.shape))
assert image.shape[-1] == multiplier.size, (
"Expected number of channels in `image` and number of components "
"in `multiplier` to be identical. Got %d vs. %d." % (
image.shape[-1], multiplier.size))
result = []
# TODO check if tile() is here actually needed
tables = np.tile(
value_range[np.newaxis, :],
(nb_channels, 1)
) * multiplier[:, np.newaxis]
tables = np.clip(tables, 0, 255).astype(image.dtype)
for c, table in enumerate(tables):
arr_aug = cv2.LUT(image[..., c], table)
result.append(arr_aug)
return np.stack(result, axis=-1)
else:
table = value_range * multiplier
image_aug = cv2.LUT(
image, np.clip(table, 0, 255).astype(image.dtype))
if image_aug.ndim == 2 and image.ndim == 3:
image_aug = image_aug[..., np.newaxis]
return image_aug
def _multiply_scalar_to_non_uint8(image, multiplier):
# TODO estimate via image min/max values whether a resolution
# increase is necessary
input_dtype = image.dtype
is_single_value = (
ia.is_single_number(multiplier)
or ia.is_np_scalar(multiplier)
or (ia.is_np_array(multiplier) and multiplier.size == 1))
is_channelwise = not is_single_value
nb_channels = 1 if image.ndim == 2 else image.shape[-1]
shape = (1, 1, nb_channels if is_channelwise else 1)
multiplier = np.array(multiplier).reshape(shape)
# deactivated itemsize increase due to clip causing problems
# with int64, see Add
# mul_min = np.min(mul)
# mul_max = np.max(mul)
# is_not_increasing_value_range = (
# (-1 <= mul_min <= 1)
# and (-1 <= mul_max <= 1))
# We limit here the value range of the mul parameter to the
# bytes in the image's dtype. This prevents overflow problems
# and makes it less likely that the image has to be up-casted,
# which again improves performance and saves memory. Note that
# this also enables more dtypes for image inputs.
# The downside is that the mul parameter is limited in its
# value range.
itemsize = max(
image.dtype.itemsize,
2 if multiplier.dtype.kind == "f" else 1
) # float min itemsize is 2 not 1
dtype_target = np.dtype("%s%d" % (multiplier.dtype.kind, itemsize))
multiplier = iadt.clip_to_dtype_value_range_(
multiplier, dtype_target, validate=True)
image, multiplier = iadt.promote_array_dtypes_(
[image, multiplier],
dtypes=[image.dtype, dtype_target],
# increase_itemsize_factor=(
# 1 if is_not_increasing_value_range else 2)
increase_itemsize_factor=1
)
image = np.multiply(image, multiplier, out=image, casting="no")
return iadt.restore_dtypes_(image, input_dtype)
def multiply_elementwise(image, multipliers):
"""Multiply an image with an array of values.
This method ensures that ``uint8`` does not overflow during the addition.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: limited; tested (1)
* ``uint32``: no
* ``uint64``: no
* ``int8``: limited; tested (1)
* ``int16``: limited; tested (1)
* ``int32``: no
* ``int64``: no
* ``float16``: limited; tested (1)
* ``float32``: limited; tested (1)
* ``float64``: no
* ``float128``: no
* ``bool``: limited; tested (1)
- (1) Non-uint8 dtypes can overflow. For floats, this can result
in +/-inf.
Note: tests were only conducted for rather small multipliers, around
``-10.0`` to ``+10.0``.
In general, the multipliers sampled from `multipliers` must be in a
value range that corresponds to the input image's dtype. E.g. if the
input image has dtype ``uint16`` and the samples generated from
`multipliers` are ``float64``, this function will still force all
samples to be within the value range of ``float16``, as it has the
same number of bytes (two) as ``uint16``. This is done to make
overflows less likely to occur.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
multipliers : ndarray
The multipliers with which to multiply the image. Expected to have
the same height and width as `image` and either no channels or one
channel or the same number of channels as `image`.
Returns
-------
ndarray
Image, multiplied by `multipliers`.
"""
iadt.gate_dtypes(
image,
allowed=["bool",
"uint8", "uint16",
"int8", "int16",
"float16", "float32"],
disallowed=["uint32", "uint64", "uint128", "uint256",
"int32", "int64", "int128", "int256",
"float64", "float96", "float128", "float256"],
augmenter=None)
if multipliers.dtype.kind == "b":
# TODO extend this with some shape checks
image *= multipliers
return image
elif image.dtype.name == "uint8":
return _multiply_elementwise_to_uint8(image, multipliers)
return _multiply_elementwise_to_non_uint8(image, multipliers)
def _multiply_elementwise_to_uint8(image, multipliers):
# This special uint8 block is around 60-100% faster than the
# non-uint8 block further below (more speedup for larger images).
if multipliers.dtype.kind == "f":
# interestingly, float32 is here significantly faster than
# float16
# TODO is that system dependent?
# TODO does that affect int8-int32 too?
multipliers = multipliers.astype(np.float32, copy=False)
image_aug = image.astype(np.float32)
else:
multipliers = multipliers.astype(np.int16, copy=False)
image_aug = image.astype(np.int16)
image_aug = np.multiply(image_aug, multipliers, casting="no", out=image_aug)
return iadt.restore_dtypes_(image_aug, np.uint8, round=False)
def _multiply_elementwise_to_non_uint8(image, multipliers):
input_dtype = image.dtype
# TODO maybe introduce to stochastic parameters some way to
# get the possible min/max values, could make things
# faster for dropout to get 0/1 min/max from the binomial
# itemsize decrease is currently deactivated due to issues
# with clip and int64, see Add
mul_min = np.min(multipliers)
mul_max = np.max(multipliers)
# is_not_increasing_value_range = (
# (-1 <= mul_min <= 1) and (-1 <= mul_max <= 1))
# We limit here the value range of the mul parameter to the
# bytes in the image's dtype. This prevents overflow problems
# and makes it less likely that the image has to be up-casted,
# which again improves performance and saves memory. Note that
# this also enables more dtypes for image inputs.
# The downside is that the mul parameter is limited in its
# value range.
itemsize = max(
image.dtype.itemsize,
2 if multipliers.dtype.kind == "f" else 1
) # float min itemsize is 2
dtype_target = np.dtype("%s%d" % (multipliers.dtype.kind, itemsize))
multipliers = iadt.clip_to_dtype_value_range_(
multipliers, dtype_target,
validate=True, validate_values=(mul_min, mul_max))
if multipliers.shape[2] == 1:
# TODO check if tile() is here actually needed
nb_channels = image.shape[-1]
multipliers = np.tile(multipliers, (1, 1, nb_channels))
image, multipliers = iadt.promote_array_dtypes_(
[image, multipliers],
dtypes=[image, dtype_target],
increase_itemsize_factor=1
# increase_itemsize_factor=(
# 1 if is_not_increasing_value_range else 2)
)
image = np.multiply(image, multipliers, out=image, casting="no")
return iadt.restore_dtypes_(image, input_dtype)
def replace_elementwise_(image, mask, replacements):
"""Replace components in an image array with new values.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: no (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: no (2)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: no
* ``bool``: yes; tested
- (1) ``uint64`` is currently not supported, because
:func:`imgaug.dtypes.clip_to_dtype_value_range_()` does not
support it, which again is because numpy.clip() seems to not
support it.
- (2) `int64` is disallowed due to being converted to `float64`
by :func:`numpy.clip` since 1.17 (possibly also before?).
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
mask : ndarray
Mask of shape ``(H,W,[C])`` denoting which components to replace.
If ``C`` is provided, it must be ``1`` or match the ``C`` of `image`.
May contain floats in the interval ``[0.0, 1.0]``.
replacements : iterable
Replacements to place in `image` at the locations defined by `mask`.
This 1-dimensional iterable must contain exactly as many values
as there are replaced components in `image`.
Returns
-------
ndarray
Image with replaced components.
"""
iadt.gate_dtypes(
image,
allowed=["bool",
"uint8", "uint16", "uint32",
"int8", "int16", "int32",
"float16", "float32", "float64"],
disallowed=["uint64", "uint128", "uint256",
"int64", "int128", "int256",
"float96", "float128", "float256"],
augmenter=None)
# This is slightly faster (~20%) for masks that are True at many
# locations, but slower (~50%) for masks with few Trues, which is
# probably the more common use-case:
#
# replacement_samples = self.replacement.draw_samples(
# sampling_shape, random_state=rs_replacement)
#
# # round, this makes 0.2 e.g. become 0 in case of boolean
# # image (otherwise replacing values with 0.2 would
# # lead to True instead of False).
# if (image.dtype.kind in ["i", "u", "b"]
# and replacement_samples.dtype.kind == "f"):
# replacement_samples = np.round(replacement_samples)
#
# replacement_samples = iadt.clip_to_dtype_value_range_(
# replacement_samples, image.dtype, validate=False)
# replacement_samples = replacement_samples.astype(
# image.dtype, copy=False)
#
# if sampling_shape[2] == 1:
# mask_samples = np.tile(mask_samples, (1, 1, nb_channels))
# replacement_samples = np.tile(
# replacement_samples, (1, 1, nb_channels))
# mask_thresh = mask_samples > 0.5
# image[mask_thresh] = replacement_samples[mask_thresh]
input_shape = image.shape
if image.ndim == 2:
image = image[..., np.newaxis]
if mask.ndim == 2:
mask = mask[..., np.newaxis]
mask_thresh = mask > 0.5
if mask.shape[2] == 1:
nb_channels = image.shape[-1]
# TODO verify if tile() is here really necessary
mask_thresh = np.tile(mask_thresh, (1, 1, nb_channels))
# round, this makes 0.2 e.g. become 0 in case of boolean
# image (otherwise replacing values with 0.2 would lead to True
# instead of False).
if image.dtype.kind in ["i", "u", "b"] and replacements.dtype.kind == "f":
replacements = np.round(replacements)
replacement_samples = iadt.clip_to_dtype_value_range_(
replacements, image.dtype, validate=False)
replacement_samples = replacement_samples.astype(image.dtype, copy=False)
image[mask_thresh] = replacement_samples
if len(input_shape) == 2:
return image[..., 0]
return image
def invert(image, min_value=None, max_value=None):
"""Invert an array.
dtype support::
if (min_value=None and max_value=None)::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested
* ``bool``: yes; tested
if (min_value!=None or max_value!=None)::
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: no (1)
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: no (1)
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: no (1)
* ``float128``: no (2)
* ``bool``: no (3)
- (1) Not allowed due to numpy's clip converting from ``uint64`` to
``float64``.
- (2) Not allowed as int/float have to be increased in resolution
when using min/max values.
- (3) Not tested.
- (4) Makes no sense when using min/max values.
Parameters
----------
image : ndarray
Image array of shape ``(H,W,[C])``.
min_value : None or number, optional
Minimum of the value range of input images, e.g. ``0`` for ``uint8``
images. If set to ``None``, the value will be automatically derived
from the image's dtype.
max_value : None or number, optional
Maximum of the value range of input images, e.g. ``255`` for ``uint8``
images. If set to ``None``, the value will be automatically derived
from the image's dtype.
Returns
-------
ndarray
Inverted image.
"""
# when no custom min/max are chosen, all bool, uint, int and float dtypes
# should be invertable (float tested only up to 64bit)
# when chosing custom min/max:
# - bool makes no sense, not allowed
# - int and float must be increased in resolution if custom min/max values
# are chosen, hence they are limited to 32 bit and below
# - uint64 is converted by numpy's clip to float64, hence loss of accuracy
# - float16 seems to not be perfectly accurate, but still ok-ish -- was
# off by 10 for center value of range (float 16 min, 16), where float
# 16 min is around -65500
allow_dtypes_custom_minmax = {"uint8", "uint16", "uint32",
"int8", "int16", "int32",
"float16", "float32"}
min_value_dt, _, max_value_dt = \
iadt.get_value_range_of_dtype(image.dtype)
min_value = (min_value_dt
if min_value is None else min_value)
max_value = (max_value_dt
if max_value is None else max_value)
assert min_value >= min_value_dt, (
"Expected min_value to be above or equal to dtype's min "
"value, got %s (vs. min possible %s for %s)" % (
str(min_value), str(min_value_dt), image.dtype.name)
)
assert max_value <= max_value_dt, (
"Expected max_value to be below or equal to dtype's max "
"value, got %s (vs. max possible %s for %s)" % (
str(max_value), str(max_value_dt), image.dtype.name)
)
assert min_value < max_value, (
"Expected min_value to be below max_value, got %s "
"and %s" % (
str(min_value), str(max_value))
)
if min_value != min_value_dt or max_value != max_value_dt:
assert image.dtype.name in allow_dtypes_custom_minmax, (
"Can use custom min/max values only with the following "
"dtypes: %s. Got: %s." % (
", ".join(allow_dtypes_custom_minmax), image.dtype.name))
dtype_kind_to_invert_func = {
"b": _invert_bool,
"u": _invert_uint,
"i": _invert_int,
"f": _invert_float
}
func = dtype_kind_to_invert_func[image.dtype.kind]
return func(image, min_value, max_value)
def _invert_bool(arr, min_value, max_value):
assert min_value == 0 and max_value == 1, (
"min_value and max_value must be 0 and 1 for bool arrays. "
"Got %.4f and %.4f." % (min_value, max_value))
return ~arr
def _invert_uint(arr, min_value, max_value):
if min_value == 0 and max_value == np.iinfo(arr.dtype).max:
return max_value - arr
return _invert_by_distance(
np.clip(arr, min_value, max_value),
min_value, max_value
)
def _invert_int(arr, min_value, max_value):
# note that for int dtypes the max value is
# (-1) * min_value - 1
# e.g. -128 and 127 (min/max) for int8
# mapping example:
# [-4, -3, -2, -1, 0, 1, 2, 3]
# will be mapped to
# [ 3, 2, 1, 0, -1, -2, -3, -4]
# hence we can not simply compute the inverse as:
# after = (-1) * before
# but instead need
# after = (-1) * before - 1
# however, this exceeds the value range for the minimum value, e.g.
# for int8: -128 -> 128 -> 127, where 128 exceeds it. Hence, we must
# compute the inverse via a mask (extra step for the minimum)
# or we have to increase the resolution of the array. Here, a
# two-step approach is used.
if min_value == (-1) * max_value - 1:
mask = (arr == min_value)
# there is probably a one-liner here to do this, but
# ((-1) * (arr * ~mask) - 1) + mask * max_value
# has the disadvantage of inverting min_value to max_value - 1
# while
# ((-1) * (arr * ~mask) - 1) + mask * (max_value+1)
# ((-1) * (arr * ~mask) - 1) + mask * max_value + mask
# both sometimes increase the dtype resolution (e.g. int32 to int64)
n_min = np.sum(mask)
if n_min > 0:
arr[mask] = max_value
if n_min < arr.size:
arr[~mask] = (-1) * arr[~mask] - 1
return arr
else:
return _invert_by_distance(
np.clip(arr, min_value, max_value),
min_value, max_value
)
def _invert_float(arr, min_value, max_value):
if np.isclose(max_value, (-1)*min_value, rtol=0):
return (-1) * arr
return _invert_by_distance(
np.clip(arr, min_value, max_value),
min_value, max_value
)
def _invert_by_distance(arr, min_value, max_value):
arr_modify = arr
if arr.dtype.kind in ["i", "f"]:
arr_modify = iadt.increase_array_resolutions_([np.copy(arr)], 2)[0]
distance_from_min = np.abs(arr_modify - min_value) # d=abs(v-min)
arr_modify = max_value - distance_from_min # v'=MAX-d
# due to floating point inaccuracies, we might exceed the min/max
# values for floats here, hence clip this happens especially for
# values close to the float dtype's maxima
if arr.dtype.kind == "f":
arr_modify = np.clip(arr_modify, min_value, max_value)
if arr.dtype.kind in ["i", "f"]:
arr_modify = iadt.restore_dtypes_(
arr_modify, arr.dtype, clip=False)
return arr_modify
def compress_jpeg(image, compression):
"""Compress an image using jpeg compression.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
image : ndarray
Image of dtype ``uint8`` and shape ``(H,W,[C])``. If ``C`` is provided,
it must be ``1`` or ``3``.
compression : int
Strength of the compression in the interval ``[0, 100]``.
Returns
-------
ndarray
Input image after applying jpeg compression to it and reloading
the result into a new array. Same shape and dtype as the input.
"""
if image.size == 0:
return np.copy(image)
# The value range 1 to 95 is suggested by PIL's save() documentation
# Values above 95 seem to not make sense (no improvement in visual
# quality, but large file size).
# A value of 100 would mostly deactivate jpeg compression.
# A value of 0 would lead to no compression (instead of maximum
# compression).
# We use range 1 to 100 here, because this augmenter is about
# generating images for training and not for saving, hence we do not
# care about large file sizes.
maximum_quality = 100
minimum_quality = 1
assert image.dtype.name == "uint8", (
"Jpeg compression can only be applied to uint8 images. "
"Got dtype %s." % (image.dtype.name,))
assert 0 <= compression <= 100, (
"Expected compression to be in the interval [0, 100], "
"got %.4f." % (compression,))
has_no_channels = (image.ndim == 2)
is_single_channel = (image.ndim == 3 and image.shape[-1] == 1)
if is_single_channel:
image = image[..., 0]
assert has_no_channels or is_single_channel or image.shape[-1] == 3, (
"Expected either a grayscale image of shape (H,W) or (H,W,1) or an "
"RGB image of shape (H,W,3). Got shape %s." % (image.shape,))
# Map from compression to quality used by PIL
# We have valid compressions from 0 to 100, i.e. 101 possible
# values
quality = int(
np.clip(
np.round(
minimum_quality
+ (maximum_quality - minimum_quality)
* (1.0 - (compression / 101))
),
minimum_quality,
maximum_quality
)
)
image_pil = PIL_Image.fromarray(image)
with tempfile.NamedTemporaryFile(mode="wb+", suffix=".jpg") as f:
image_pil.save(f, quality=quality)
# Read back from file.
# We dont read from f.name, because that leads to PermissionDenied
# errors on Windows. We add f.seek(0) here, because otherwise we get
# `SyntaxError: index out of range` in PIL.
f.seek(0)
pilmode = "RGB"
if has_no_channels or is_single_channel:
pilmode = "L"
image = imageio.imread(f, pilmode=pilmode, format="jpeg")
if is_single_channel:
image = image[..., np.newaxis]
return image
class Add(meta.Augmenter):
"""
Add a value to all pixels in an image.
dtype support::
See :func:`imgaug.augmenters.arithmetic.add_scalar`.
Parameters
----------
value : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Value to add to all pixels.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, then a value from the discrete
interval ``[a..b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, then a value will be sampled per
image from that parameter.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Add(10)
Always adds a value of 10 to all channels of all pixels of all input
images.
>>> aug = iaa.Add((-10, 10))
Adds a value from the discrete interval ``[-10..10]`` to all pixels of
input images. The exact value is sampled per image.
>>> aug = iaa.Add((-10, 10), per_channel=True)
Adds a value from the discrete interval ``[-10..10]`` to all pixels of
input images. The exact value is sampled per image *and* channel,
i.e. to a red-channel it might add 5 while subtracting 7 from the
blue channel of the same image.
>>> aug = iaa.Add((-10, 10), per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, value=0, per_channel=False,
name=None, deterministic=False, random_state=None):
super(Add, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.value = iap.handle_continuous_param(
value, "value", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
self.per_channel = iap.handle_probability_param(
per_channel, "per_channel")
def _augment_batch(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
nb_channels_max = meta.estimate_max_number_of_channels(images)
rss = random_state.duplicate(2)
per_channel_samples = self.per_channel.draw_samples(
(nb_images,), random_state=rss[0])
value_samples = self.value.draw_samples(
(nb_images, nb_channels_max), random_state=rss[1])
gen = enumerate(zip(images, value_samples, per_channel_samples))
for i, (image, value_samples_i, per_channel_samples_i) in gen:
nb_channels = image.shape[2]
# Example code to directly add images via image+sample (uint8 only)
# if per_channel_samples_i > 0.5:
# result = []
# image = image.astype(np.int16)
# value_samples_i = value_samples_i.astype(np.int16)
# for c, value in enumerate(value_samples_i[0:nb_channels]):
# result.append(
# np.clip(
# image[..., c:c+1] + value, 0, 255
# ).astype(np.uint8))
# images[i] = np.concatenate(result, axis=2)
# else:
# images[i] = np.clip(
# image.astype(np.int16)
# + value_samples_i[0].astype(np.int16),
# 0, 255
# ).astype(np.uint8)
if per_channel_samples_i > 0.5:
value = value_samples_i[0:nb_channels]
else:
# the if/else here catches the case of the channel axis being 0
value = value_samples_i[0] if value_samples_i.size > 0 else []
batch.images[i] = add_scalar(image, value)
return batch
def get_parameters(self):
return [self.value, self.per_channel]
# TODO merge this with Add
class AddElementwise(meta.Augmenter):
"""
Add to the pixels of images values that are pixelwise randomly sampled.
While the ``Add`` Augmenter samples one value to add *per image* (and
optionally per channel), this augmenter samples different values per image
and *per pixel* (and optionally per channel), i.e. intensities of
neighbouring pixels may be increased/decreased by different amounts.
dtype support::
See :func:`imgaug.augmenters.arithmetic.add_elementwise`.
Parameters
----------
value : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional
Value to add to the pixels.
* If an int, exactly that value will always be used.
* If a tuple ``(a, b)``, then values from the discrete interval
``[a..b]`` will be sampled per image and pixel.
* If a list of integers, a random value will be sampled from the
list per image and pixel.
* If a ``StochasticParameter``, then values will be sampled per
image and pixel from that parameter.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.AddElementwise(10)
Always adds a value of 10 to all channels of all pixels of all input
images.
>>> aug = iaa.AddElementwise((-10, 10))
Samples per image and pixel a value from the discrete interval
``[-10..10]`` and adds that value to the respective pixel.
>>> aug = iaa.AddElementwise((-10, 10), per_channel=True)
Samples per image, pixel *and also channel* a value from the discrete
interval ``[-10..10]`` and adds it to the respective pixel's channel value.
Therefore, added values may differ between channels of the same pixel.
>>> aug = iaa.AddElementwise((-10, 10), per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, value=0, per_channel=False,
name=None, deterministic=False, random_state=None):
super(AddElementwise, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.value = iap.handle_continuous_param(
value, "value", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
self.per_channel = iap.handle_probability_param(
per_channel, "per_channel")
def _augment_batch(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
rss = random_state.duplicate(1+nb_images)
per_channel_samples = self.per_channel.draw_samples(
(nb_images,), random_state=rss[0])
gen = enumerate(zip(images, per_channel_samples, rss[1:]))
for i, (image, per_channel_samples_i, rs) in gen:
height, width, nb_channels = image.shape
sample_shape = (height,
width,
nb_channels if per_channel_samples_i > 0.5 else 1)
values = self.value.draw_samples(sample_shape, random_state=rs)
batch.images[i] = add_elementwise(image, values)
return batch
def get_parameters(self):
return [self.value, self.per_channel]
# TODO rename to AddGaussianNoise?
# TODO examples say that iaa.AdditiveGaussianNoise(scale=(0, 0.1*255)) samples
# the scale from the uniform dist. per image, but is that still the case?
# AddElementwise seems to now sample once for all images, which should
# lead to a single scale value.
class AdditiveGaussianNoise(AddElementwise):
"""
Add noise sampled from gaussian distributions elementwise to images.
This augmenter samples and adds noise elementwise, i.e. it can add
different noise values to neighbouring pixels and is comparable
to ``AddElementwise``.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the normal distribution from which the noise is sampled.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value from the interval
``[a, b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the normal distribution that generates the noise.
Must be ``>=0``. If ``0`` then `loc` will simply be added to all
pixels.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value from the interval
``[a, b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255)
Adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images.
The samples are drawn per image and pixel.
>>> aug = iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))
Adds gaussian noise from the distribution ``N(0, s)`` to images,
where ``s`` is sampled per image from the interval ``[0, 0.1*255]``.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=True)
Adds gaussian noise from the distribution ``N(0, 0.1*255)`` to images,
where the noise value is different per image and pixel *and* channel (e.g.
a different one for red, green and blue channels of the same pixel).
This leads to "colorful" noise.
>>> aug = iaa.AdditiveGaussianNoise(scale=0.1*255, per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, loc=0, scale=0, per_channel=False,
name=None, deterministic=False, random_state=None):
loc2 = iap.handle_continuous_param(
loc, "loc", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
scale2 = iap.handle_continuous_param(
scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
value = iap.Normal(loc=loc2, scale=scale2)
super(AdditiveGaussianNoise, self).__init__(
value, per_channel=per_channel, name=name,
deterministic=deterministic, random_state=random_state)
# TODO rename to AddLaplaceNoise?
class AdditiveLaplaceNoise(AddElementwise):
"""
Add noise sampled from laplace distributions elementwise to images.
The laplace distribution is similar to the gaussian distribution, but
puts more weight on the long tail. Hence, this noise will add more
outliers (very high/low values). It is somewhere between gaussian noise and
salt and pepper noise.
Values of around ``255 * 0.05`` for `scale` lead to visible noise (for
``uint8``).
Values of around ``255 * 0.10`` for `scale` lead to very visible
noise (for ``uint8``).
It is recommended to usually set `per_channel` to ``True``.
This augmenter samples and adds noise elementwise, i.e. it can add
different noise values to neighbouring pixels and is comparable
to ``AddElementwise``.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
loc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Mean of the laplace distribution that generates the noise.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value from the interval
``[a, b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Standard deviation of the laplace distribution that generates the noise.
Must be ``>=0``. If ``0`` then only `loc` will be used.
Recommended to be around ``255*0.05``.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value from the interval
``[a, b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.AdditiveLaplaceNoise(scale=0.1*255)
Adds laplace noise from the distribution ``Laplace(0, 0.1*255)`` to images.
The samples are drawn per image and pixel.
>>> aug = iaa.AdditiveLaplaceNoise(scale=(0, 0.1*255))
Adds laplace noise from the distribution ``Laplace(0, s)`` to images,
where ``s`` is sampled per image from the interval ``[0, 0.1*255]``.
>>> aug = iaa.AdditiveLaplaceNoise(scale=0.1*255, per_channel=True)
Adds laplace noise from the distribution ``Laplace(0, 0.1*255)`` to images,
where the noise value is different per image and pixel *and* channel (e.g.
a different one for the red, green and blue channels of the same pixel).
This leads to "colorful" noise.
>>> aug = iaa.AdditiveLaplaceNoise(scale=0.1*255, per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, loc=0, scale=0, per_channel=False,
name=None, deterministic=False, random_state=None):
loc2 = iap.handle_continuous_param(
loc, "loc", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
scale2 = iap.handle_continuous_param(
scale, "scale", value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
value = iap.Laplace(loc=loc2, scale=scale2)
super(AdditiveLaplaceNoise, self).__init__(
value,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state)
# TODO rename to AddPoissonNoise?
class AdditivePoissonNoise(AddElementwise):
"""
Add noise sampled from poisson distributions elementwise to images.
Poisson noise is comparable to gaussian noise, as e.g. generated via
``AdditiveGaussianNoise``. As poisson distributions produce only positive
numbers, the sign of the sampled values are here randomly flipped.
Values of around ``10.0`` for `lam` lead to visible noise (for ``uint8``).
Values of around ``20.0`` for `lam` lead to very visible noise (for
``uint8``).
It is recommended to usually set `per_channel` to ``True``.
This augmenter samples and adds noise elementwise, i.e. it can add
different noise values to neighbouring pixels and is comparable
to ``AddElementwise``.
dtype support::
See ``imgaug.augmenters.arithmetic.AddElementwise``.
Parameters
----------
lam : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Lambda parameter of the poisson distribution. Must be ``>=0``.
Recommended values are around ``0.0`` to ``10.0``.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value from the interval
``[a, b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.AdditivePoissonNoise(lam=5.0)
Adds poisson noise sampled from a poisson distribution with a ``lambda``
parameter of ``5.0`` to images.
The samples are drawn per image and pixel.
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0))
Adds poisson noise sampled from ``Poisson(x)`` to images, where ``x`` is
randomly sampled per image from the interval ``[0.0, 10.0]``.
>>> aug = iaa.AdditivePoissonNoise(lam=5.0, per_channel=True)
Adds poisson noise sampled from ``Poisson(5.0)`` to images,
where the values are different per image and pixel *and* channel (e.g. a
different one for red, green and blue channels for the same pixel).
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0), per_channel=True)
Adds poisson noise sampled from ``Poisson(x)`` to images,
with ``x`` being sampled from ``uniform(0.0, 10.0)`` per image and
channel. This is the *recommended* configuration.
>>> aug = iaa.AdditivePoissonNoise(lam=(0.0, 10.0), per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, lam=0, per_channel=False,
name=None, deterministic=False, random_state=None):
lam2 = iap.handle_continuous_param(
lam, "lam",
value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)
value = iap.RandomSign(iap.Poisson(lam=lam2))
super(AdditivePoissonNoise, self).__init__(
value,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state)
class Multiply(meta.Augmenter):
"""
Multiply all pixels in an image with a random value sampled once per image.
This augmenter can be used to make images lighter or darker.
dtype support::
See :func:`imgaug.augmenters.arithmetic.multiply_scalar`.
Parameters
----------
mul : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
The value with which to multiply the pixel values in each image.
* If a number, then that value will always be used.
* If a tuple ``(a, b)``, then a value from the interval ``[a, b]``
will be sampled per image and used for all pixels.
* If a list, then a random value will be sampled from that list per
image.
* If a ``StochasticParameter``, then that parameter will be used to
sample a new value per image.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Multiply(2.0)
Multiplies all images by a factor of ``2``, making the images significantly
brighter.
>>> aug = iaa.Multiply((0.5, 1.5))
Multiplies images by a random value sampled uniformly from the interval
``[0.5, 1.5]``, making some images darker and others brighter.
>>> aug = iaa.Multiply((0.5, 1.5), per_channel=True)
Identical to the previous example, but the sampled multipliers differ by
image *and* channel, instead of only by image.
>>> aug = iaa.Multiply((0.5, 1.5), per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, mul=1.0, per_channel=False,
name=None, deterministic=False, random_state=None):
super(Multiply, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.mul = iap.handle_continuous_param(
mul, "mul", value_range=None, tuple_to_uniform=True,
list_to_choice=True)
self.per_channel = iap.handle_probability_param(
per_channel, "per_channel")
def _augment_batch(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
nb_channels_max = meta.estimate_max_number_of_channels(images)
rss = random_state.duplicate(2)
per_channel_samples = self.per_channel.draw_samples(
(nb_images,), random_state=rss[0])
mul_samples = self.mul.draw_samples(
(nb_images, nb_channels_max), random_state=rss[1])
gen = enumerate(zip(images, mul_samples, per_channel_samples))
for i, (image, mul_samples_i, per_channel_samples_i) in gen:
nb_channels = image.shape[2]
# Example code to directly multiply images via image*sample
# (uint8 only) -- apparently slower than LUT
# if per_channel_samples_i > 0.5:
# result = []
# image = image.astype(np.float32)
# mul_samples_i = mul_samples_i.astype(np.float32)
# for c, mul in enumerate(mul_samples_i[0:nb_channels]):
# result.append(
# np.clip(
# image[..., c:c+1] * mul, 0, 255
# ).astype(np.uint8))
# images[i] = np.concatenate(result, axis=2)
# else:
# images[i] = np.clip(
# image.astype(np.float32)
# * mul_samples_i[0].astype(np.float32),
# 0, 255
# ).astype(np.uint8)
if per_channel_samples_i > 0.5:
mul = mul_samples_i[0:nb_channels]
else:
# the if/else here catches the case of the channel axis being 0
mul = mul_samples_i[0] if mul_samples_i.size > 0 else []
batch.images[i] = multiply_scalar(image, mul)
return batch
def get_parameters(self):
return [self.mul, self.per_channel]
# TODO merge with Multiply
class MultiplyElementwise(meta.Augmenter):
"""
Multiply image pixels with values that are pixelwise randomly sampled.
While the ``Multiply`` Augmenter uses a constant multiplier *per
image* (and optionally channel), this augmenter samples the multipliers
to use per image and *per pixel* (and optionally per channel).
dtype support::
See :func:`imgaug.augmenters.arithmetic.multiply_elementwise`.
Parameters
----------
mul : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
The value with which to multiply pixel values in the image.
* If a number, then that value will always be used.
* If a tuple ``(a, b)``, then a value from the interval ``[a, b]``
will be sampled per image and pixel.
* If a list, then a random value will be sampled from that list
per image and pixel.
* If a ``StochasticParameter``, then that parameter will be used to
sample a new value per image and pixel.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.MultiplyElementwise(2.0)
Multiply all images by a factor of ``2.0``, making them significantly
bighter.
>>> aug = iaa.MultiplyElementwise((0.5, 1.5))
Samples per image and pixel uniformly a value from the interval
``[0.5, 1.5]`` and multiplies the pixel with that value.
>>> aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True)
Samples per image and pixel *and channel* uniformly a value from the
interval ``[0.5, 1.5]`` and multiplies the pixel with that value. Therefore,
used multipliers may differ between channels of the same pixel.
>>> aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for 50 percent of all images.
"""
def __init__(self, mul=1.0, per_channel=False,
name=None, deterministic=False, random_state=None):
super(MultiplyElementwise, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.mul = iap.handle_continuous_param(
mul, "mul",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.per_channel = iap.handle_probability_param(per_channel,
"per_channel")
def _augment_batch(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
rss = random_state.duplicate(1+nb_images)
per_channel_samples = self.per_channel.draw_samples(
(nb_images,), random_state=rss[0])
is_mul_binomial = isinstance(self.mul, iap.Binomial) or (
isinstance(self.mul, iap.FromLowerResolution)
and isinstance(self.mul.other_param, iap.Binomial)
)
gen = enumerate(zip(images, per_channel_samples, rss[1:]))
for i, (image, per_channel_samples_i, rs) in gen:
height, width, nb_channels = image.shape
sample_shape = (height,
width,
nb_channels if per_channel_samples_i > 0.5 else 1)
mul = self.mul.draw_samples(sample_shape, random_state=rs)
# TODO let Binomial return boolean mask directly instead of [0, 1]
# integers?
# hack to improve performance for Dropout and CoarseDropout
# converts mul samples to mask if mul is binomial
if mul.dtype.kind != "b" and is_mul_binomial:
mul = mul.astype(bool, copy=False)
batch.images[i] = multiply_elementwise(image, mul)
return batch
def get_parameters(self):
return [self.mul, self.per_channel]
# TODO verify that (a, b) still leads to a p being sampled per image and not
# per batch
class Dropout(MultiplyElementwise):
"""
Set a fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. to set it to zero).
* If a float, then that value will be used for all images. A value
of ``1.0`` would mean that all pixels will be dropped
and ``0.0`` that no pixels will be dropped. A value of ``0.05``
corresponds to ``5`` percent of all pixels being dropped.
* If a tuple ``(a, b)``, then a value ``p`` will be sampled from
the interval ``[a, b]`` per image and be used as the pixel's
dropout probability.
* If a ``StochasticParameter``, then this parameter will be used to
determine per pixel whether it should be *kept* (sampled value
of ``>0.5``) or shouldn't be kept (sampled value of ``<=0.5``).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Dropout(0.02)
Drops ``2`` percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
Drops in each image a random fraction of all pixels, where the fraction
is uniformly sampled from the interval ``[0.0, 0.05]``.
>>> aug = iaa.Dropout(0.02, per_channel=True)
Drops ``2`` percent of all pixels in a channelwise fashion, i.e. it is
unlikely for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
Identical to the previous example, but the `per_channel` feature is only
active for ``50`` percent of all images.
"""
def __init__(self, p=0, per_channel=False,
name=None, deterministic=False, random_state=None):
# TODO add list as an option
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
assert len(p) == 2, (
"Expected 'p' given as an iterable to contain exactly 2 values, "
"got %d." % (len(p),))
assert p[0] < p[1], (
"Expected 'p' given as iterable to contain exactly 2 values (a, b) "
"with a < b. Got %.4f and %.4f." % (p[0], p[1]))
assert 0 <= p[0] <= 1.0 and 0 <= p[1] <= 1.0, (
"Expected 'p' given as iterable to only contain values in the "
"interval [0.0, 1.0], got %.4f and %.4f." % (p[0], p[1]))
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception(
"Expected p to be float or int or StochasticParameter, got %s." % (
type(p),))
super(Dropout, self).__init__(
p2,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state)
# TODO add similar cutout augmenter
# TODO invert size_p and size_percent so that larger values denote larger
# areas being dropped instead of the opposite way around
class CoarseDropout(MultiplyElementwise):
"""
Set rectangular areas within images to zero.
In contrast to ``Dropout``, these areas can have larger sizes.
(E.g. you might end up with three large black rectangles in an image.)
Note that the current implementation leads to correlated sizes,
so if e.g. there is any thin and high rectangle that is dropped, there is
a high likelihood that all other dropped areas are also thin and high.
This method is implemented by generating the dropout mask at a
lower resolution (than the image has) and then upsampling the mask
before dropping the pixels.
This augmenter is similar to Cutout. Usually, cutout is defined as an
operation that drops exactly one rectangle from an image, while here
``CoarseDropout`` can drop multiple rectangles (with some correlation
between the sizes of these rectangles).
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero) in
the lower-resolution dropout mask.
* If a float, then that value will be used for all pixels. A value
of ``1.0`` would mean, that all pixels will be dropped. A value
of ``0.0`` would lead to no pixels being dropped.
* If a tuple ``(a, b)``, then a value ``p`` will be sampled from
the interval ``[a, b]`` per image and be used as the dropout
probability.
* If a ``StochasticParameter``, then this parameter will be used to
determine per pixel whether it should be *kept* (sampled value
of ``>0.5``) or shouldn't be kept (sampled value of ``<=0.5``).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
size_px : None or int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask in absolute pixel dimensions.
Note that this means that *lower* values of this parameter lead to
*larger* areas being dropped (as any pixel in the lower resolution
image will correspond to a larger area at the original resolution).
* If ``None`` then `size_percent` must be set.
* If an integer, then that size will always be used for both height
and width. E.g. a value of ``3`` would lead to a ``3x3`` mask,
which is then upsampled to ``HxW``, where ``H`` is the image size
and ``W`` the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be
sampled from the discrete interval ``[a..b]``. The dropout mask
will then be generated at size ``MxN`` and upsampled to ``HxW``.
* If a ``StochasticParameter``, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : None or float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the dropout
mask *in percent* of the input image.
Note that this means that *lower* values of this parameter lead to
*larger* areas being dropped (as any pixel in the lower resolution
image will correspond to a larger area at the original resolution).
* If ``None`` then `size_px` must be set.
* If a float, then that value will always be used as the percentage
of the height and width (relative to the original size). E.g. for
value ``p``, the mask will be sampled from ``(p*H)x(p*W)`` and
later upsampled to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be
sampled from the interval ``(a, b)`` and used as the size
fractions, i.e the mask size will be ``(m*H)x(n*W)``.
* If a ``StochasticParameter``, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
min_size : int, optional
Minimum height and width of the low resolution mask. If
`size_percent` or `size_px` leads to a lower value than this,
`min_size` will be used instead. This should never have a value of
less than ``2``, otherwise one may end up with a ``1x1`` low resolution
mask, leading easily to the whole image being dropped.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5)
Drops ``2`` percent of all pixels on a lower-resolution image that has
``50`` percent of the original image's size, leading to dropped areas that
have roughly ``2x2`` pixels size.
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_percent=(0.05, 0.5))
Generates a dropout mask at ``5`` to ``50`` percent of each input image's
size. In that mask, ``0`` to ``5`` percent of all pixels are marked as
being dropped. The mask is afterwards projected to the input image's
size to apply the actual dropout operation.
>>> aug = iaa.CoarseDropout((0.0, 0.05), size_px=(2, 16))
Same as the previous example, but the lower resolution image has ``2`` to
``16`` pixels size. On images of e.g. ``224x224` pixels in size this would
lead to fairly large areas being dropped (height/width of ``224/2`` to
``224/16``).
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=True)
Drops ``2`` percent of all pixels at ``50`` percent resolution (``2x2``
sizes) in a channel-wise fashion, i.e. it is unlikely for any pixel to
have all channels set to zero (black pixels).
>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=0.5)
Same as the previous example, but the `per_channel` feature is only active
for ``50`` percent of all images.
"""
def __init__(self, p=0, size_px=None, size_percent=None, per_channel=False,
min_size=4,
name=None, deterministic=False, random_state=None):
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
assert len(p) == 2, (
"Expected 'p' given as an iterable to contain exactly 2 values, "
"got %d." % (len(p),))
assert p[0] < p[1], (
"Expected 'p' given as iterable to contain exactly 2 values (a, b) "
"with a < b. Got %.4f and %.4f." % (p[0], p[1]))
assert 0 <= p[0] <= 1.0 and 0 <= p[1] <= 1.0, (
"Expected 'p' given as iterable to only contain values in the "
"interval [0.0, 1.0], got %.4f and %.4f." % (p[0], p[1]))
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, "
"got %s." % (type(p),))
if size_px is not None:
p3 = iap.FromLowerResolution(other_param=p2, size_px=size_px,
min_size=min_size)
elif size_percent is not None:
p3 = iap.FromLowerResolution(other_param=p2, size_percent=size_percent,
min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
super(CoarseDropout, self).__init__(
p3,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state)
class ReplaceElementwise(meta.Augmenter):
"""
Replace pixels in an image with new values.
dtype support::
See :func:`imgaug.augmenters.arithmetic.replace_elementwise_`.
Parameters
----------
mask : float or tuple of float or list of float or imgaug.parameters.StochasticParameter
Mask that indicates the pixels that are supposed to be replaced.
The mask will be binarized using a threshold of ``0.5``. A value
of ``1`` then indicates a pixel that is supposed to be replaced.
* If this is a float, then that value will be used as the
probability of being a ``1`` in the mask (sampled per image and
pixel) and hence being replaced.
* If a tuple ``(a, b)``, then the probability will be uniformly
sampled per image from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image and pixel.
* If a ``StochasticParameter``, then this parameter will be used to
sample a mask per image.
replacement : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
The replacement to use at all locations that are marked as ``1`` in
the mask.
* If this is a number, then that value will always be used as the
replacement.
* If a tuple ``(a, b)``, then the replacement will be sampled
uniformly per image and pixel from the interval ``[a, b]``.
* If a list, then a random value will be sampled from that list
per image and pixel.
* If a ``StochasticParameter``, then this parameter will be used
sample replacement values per image and pixel.
per_channel : bool or float or imgaug.parameters.StochasticParameter, optional
Whether to use (imagewise) the same sample(s) for all
channels (``False``) or to sample value(s) for each channel (``True``).
Setting this to ``True`` will therefore lead to different
transformations per image *and* channel, otherwise only per image.
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as ``True``.
If it is a ``StochasticParameter`` it is expected to produce samples
with values between ``0.0`` and ``1.0``, where values ``>0.5`` will
lead to per-channel behaviour (i.e. same as ``True``).
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = ReplaceElementwise(0.05, [0, 255])
Replaces ``5`` percent of all pixels in each image by either ``0``
or ``255``.
>>> import imgaug.augmenters as iaa
>>> aug = ReplaceElementwise(0.1, [0, 255], per_channel=0.5)
For ``50%`` of all images, replace ``10%`` of all pixels with either the
value ``0`` or the value ``255`` (same as in the previous example). For
the other ``50%`` of all images, replace *channelwise* ``10%`` of all
pixels with either the value ``0`` or the value ``255``. So, it will be
very rare for each pixel to have all channels replaced by ``255`` or
``0``.
>>> import imgaug.augmenters as iaa
>>> import imgaug.parameters as iap
>>> aug = ReplaceElementwise(0.1, iap.Normal(128, 0.4*128), per_channel=0.5)
Replace ``10%`` of all pixels by gaussian noise centered around ``128``.
Both the replacement mask and the gaussian noise are sampled channelwise
for ``50%`` of all images.
>>> import imgaug.augmenters as iaa
>>> import imgaug.parameters as iap
>>> aug = ReplaceElementwise(
>>> iap.FromLowerResolution(iap.Binomial(0.1), size_px=8),
>>> iap.Normal(128, 0.4*128),
>>> per_channel=0.5)
Replace ``10%`` of all pixels by gaussian noise centered around ``128``.
Sample the replacement mask at a lower resolution (``8x8`` pixels) and
upscale it to the image size, resulting in coarse areas being replaced by
gaussian noise.
"""
def __init__(self, mask, replacement, per_channel=False,
name=None, deterministic=False, random_state=None):
super(ReplaceElementwise, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.mask = iap.handle_probability_param(
mask, "mask", tuple_to_uniform=True, list_to_choice=True)
self.replacement = iap.handle_continuous_param(replacement,
"replacement")
self.per_channel = iap.handle_probability_param(per_channel,
"per_channel")
def _augment_batch(self, batch, random_state, parents, hooks):
if batch.images is None:
return batch
images = batch.images
nb_images = len(images)
rss = random_state.duplicate(1+2*nb_images)
per_channel_samples = self.per_channel.draw_samples(
(nb_images,), random_state=rss[0])
gen = enumerate(zip(images, per_channel_samples, rss[1::2], rss[2::2]))
for i, (image, per_channel_i, rs_mask, rs_replacement) in gen:
height, width, nb_channels = image.shape
sampling_shape = (height,
width,
nb_channels if per_channel_i > 0.5 else 1)
mask_samples = self.mask.draw_samples(sampling_shape,
random_state=rs_mask)
# TODO add separate per_channels for mask and replacement
# TODO add test that replacement with per_channel=False is not
# sampled per channel
if per_channel_i <= 0.5:
nb_channels = image.shape[-1]
replacement_samples = self.replacement.draw_samples(
(int(np.sum(mask_samples[:, :, 0])),),
random_state=rs_replacement)
# important here to use repeat instead of tile. repeat
# converts e.g. [0, 1, 2] to [0, 0, 1, 1, 2, 2], while tile
# leads to [0, 1, 2, 0, 1, 2]. The assignment below iterates
# over each channel and pixel simultaneously, *not* first
# over all pixels of channel 0, then all pixels in
# channel 1, ...
replacement_samples = np.repeat(replacement_samples,
nb_channels)
else:
replacement_samples = self.replacement.draw_samples(
(int( | np.sum(mask_samples) | numpy.sum |
#!/usr/bin/env python
#
# Copyright 2015, 2016 <NAME> (original version)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import math
import roslib; roslib.load_manifest('ur_driver')
import rospy
import actionlib
from openpyxl import Workbook
from control_msgs.msg import *
from trajectory_msgs.msg import *
from sensor_msgs.msg import JointState
from math import pi
import numpy as np
import tensorflow as tf
import numpy as np
from scipy.integrate import odeint
from math import exp
# From Files
from object_detection import IntelRealsense
from universal_robot_kinematics import invKine
from kinematics import fwd_kin
from last_kalman_filter import *
IntelRealsense = IntelRealsense()
JOINT_NAMES = ['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint',
'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
home = [0, -pi/2, pi/2, 0, pi/2, pi]
straight = [0, -pi/2, 0, -pi/2, 0, 0]
client = None
# <NAME>
#### Input Tensors ####
## Common Input ##
s = tf.placeholder(tf.float64,name='s')
tau = tf.placeholder(tf.float64,name='tau')
xg = tf.placeholder(tf.float64,name='xg')
yg = tf.placeholder(tf.float64,name='yg')
zg = tf.placeholder(tf.float64,name='zg')
## joints ##
g = (tf.placeholder(tf.float64,name='g1'),
tf.placeholder(tf.float64,name='g2'),
tf.placeholder(tf.float64,name='g3'),
tf.placeholder(tf.float64,name='g4'),
tf.placeholder(tf.float64,name='g5'),
tf.placeholder(tf.float64,name='g6'))
q = (tf.placeholder(tf.float64,name='q1'),
tf.placeholder(tf.float64,name='q2'),
tf.placeholder(tf.float64,name='q3'),
tf.placeholder(tf.float64,name='q4'),
tf.placeholder(tf.float64,name='q5'),
tf.placeholder(tf.float64,name='q6'))
qd = (tf.placeholder(tf.float64,name='qd1'),
tf.placeholder(tf.float64,name='qd2'),
tf.placeholder(tf.float64,name='qd3'),
tf.placeholder(tf.float64,name='qd4'),
tf.placeholder(tf.float64,name='qd5'),
tf.placeholder(tf.float64,name='q06'))
q0 = (tf.placeholder(tf.float64,name='q01'),
tf.placeholder(tf.float64,name='q02'),
tf.placeholder(tf.float64,name='q03'),
tf.placeholder(tf.float64,name='q04'),
tf.placeholder(tf.float64,name='q05'),
tf.placeholder(tf.float64,name='q06'))
def canoSystem(tau,t):
alpha_s = 4
s = exp(-tau*alpha_s*t)
return s
def dmp(g,q,qd,tau,s,q0,W,Name = "DMP"):
alpha = tf.constant(25,dtype=tf.float64)
beta = alpha/4
w,c,h = W
n_gaussian = w.shape[0]
with tf.name_scope(Name):
w_tensor = tf.constant(w,dtype=tf.float64,name='w')
c_tensor = tf.constant(c,dtype=tf.float64,name='c')
h_tensor = tf.constant(h,dtype=tf.float64,name='h')
with tf.name_scope('s'):
s_tensor = s*tf.ones(n_gaussian,dtype=tf.float64)
smc_pow = tf.pow(s_tensor-c_tensor,2)
h_smc_pow = tf.math.multiply(smc_pow,(-h_tensor))
with tf.name_scope('psi'):
psi = tf.math.exp(h_smc_pow)
sum_psi = tf.math.reduce_sum(psi,0)
wpsi = tf.math.multiply(w_tensor,psi)
wpsis = tf.math.reduce_sum(wpsi*s,0)
with tf.name_scope('fs'):
fs =wpsis/sum_psi
qdd = alpha*(beta*(g-q)-tau*qd)+fs*(g-q0)
return qdd
#### Movement Library #####
dmps = [{},{},{},{},{},{}]
for i in range(15):
path = 'Demonstration/Demo{}/Weights/'.format(i+1)
for j in range(6): ### j = joint number
path_j = path+'Joint{}/'.format(j+1)
w = np.load(path_j+'w.npy')
w = np.reshape(w,(len(w),))
c = np.load(path_j+'c.npy')
c = np.reshape(c,(len(c),))
h = np.load(path_j+'h.npy')
h = np.reshape(h,(len(h),))
W = (w,c,h)
# def dmp(g,q,qd,tau,s,q0,W,Name = "DMP"):
dmps[j]['{}_{}'.format(j+1,i+1)] =tf.reshape(dmp(g[j], q[j], qd[j], tau, s, q0[j], W, Name="DMP{}_{}".format(j+1,i+1)),(1,))
##### Final Catesian Position of Demonstration) #####
demo_x = np.array([-8.15926729e-01, -0.75961731, -0.3964087, -0.29553788, -0.04094927, -0.14693912, -0.41827111, -8.16843140e-01, -0.09284764, -0.57153495, -0.67251442, -0.36517125, -7.62308039e-01, -0.78029185, -6.57512038e-01])
demo_y = np.array([-2.96043917e-01, -0.18374539, 0.6690932, 0.21733157, 0.78624892, 0.7281835, -0.66857267, -2.92201916e-01, -0.77947085, -0.28442803, 0.36890422, -0.41997883, -1.20031233e-01, -0.19321253, -1.05877890e-01])
demo_z = np.array([-3.97988321e-03, 0.35300285, 0.13734106, 0.1860831, 0.06178831, 0.06178831, 0.10958549, -5.64177448e-03, 0.0383235, 0.33788756, 0.30410704, 0.47738503, 8.29937352e-03, 0.17253172, 3.62063583e-01])
#### Contributin Functions ####
with tf.name_scope("Con"):
xg_ref = tf.constant(demo_x, dtype=tf.float64,name="x_con")
yg_ref = tf.constant(demo_y, dtype=tf.float64,name="y_con")
zg_ref = tf.constant(demo_z, dtype=tf.float64,name="z_con")
xg2 = tf.pow(xg_ref-xg, 2)
yg2 = tf.pow(yg_ref-yg, 2)
zg2 = tf.pow(zg_ref-zg, 2)
sum = xg2+yg2+zg2
con = 1.9947114020071635 * tf.math.exp(-0.5*sum/0.4472135954999579) # Normal Distribution
#### Gating Network #####
dmp_joint = []
dmpNet = []
for i in range(len(dmps)):
values = list(dmps[i].values())
joint = tf.concat(values, axis=0)
with tf.name_scope('DMPNet{}'.format(i+1)):
dmpNet_i = tf.reduce_sum(tf.math.multiply(joint,con),axis=0)/tf.reduce_sum(con, axis=0)
dmpNet.append(dmpNet_i)
# <NAME>
def move_dmp_path(path_from_ode,time_from_ode):
g = FollowJointTrajectoryGoal()
g.trajectory = JointTrajectory()
g.trajectory.joint_names = JOINT_NAMES
try:
for via in range(0,len(path_from_ode)):
joint_update = path_from_ode[via][0:6]
joint_update[0:5] = joint_update[0:5] - (joint_update[0:5]>math.pi)*2*math.pi + (joint_update[0:5]<-math.pi)*2*math.pi
# print('Step %d %s' % (via,joint_update))
g.trajectory.points.append(JointTrajectoryPoint(positions=joint_update, velocities=[0]*6, time_from_start=rospy.Duration(time_from_ode[via])))
client.send_goal(g)
client.wait_for_result()
except KeyboardInterrupt:
client.cancel_goal()
raise
except:
raise
def set_home(set_position=home, set_duration=10):
g = FollowJointTrajectoryGoal()
g.trajectory = JointTrajectory()
g.trajectory.joint_names = JOINT_NAMES
try:
g.trajectory.points = [JointTrajectoryPoint(positions=set_position, velocities=[0]*6, time_from_start=rospy.Duration(set_duration))]
client.send_goal(g)
client.wait_for_result()
except KeyboardInterrupt:
client.cancel_goal()
raise
except:
raise
# def cost_func(out_invKine):
# out_invKine[0:5,:] = out_invKine[0:5,:] - (out_invKine[0:5,:]>math.pi)*2*math.pi + (out_invKine[0:5,:]<-math.pi)*2*math.pi
# # print('inverse pingpong %s' %out_invKine)
# weight = [1, 1.2, 1.2, 1, 1, 1]
# weight = np.resize(weight,(6,8))
# cost = np.multiply(np.square(out_invKine), weight)
# cost = np.sum(cost, axis=0)
# # print('cost %s' %cost)
# index_minimum = np.argmin(cost)
# print('index minimum %s' %index_minimum)
# return [joint[0,index_minimum] for joint in out_invKine]
def cost_func(out_invKine):
print('Pre-Inverse Kinematics : %s' %out_invKine)
mean = [-0.12973529, -1.17866925, 1.6847758, -0.60829703, 1.53953145, 3.1315828]
out_invKine[0:5,:] = out_invKine[0:5,:] - (out_invKine[0:5,:]>math.pi)*2*math.pi + (out_invKine[0:5,:]<-math.pi)*2*math.pi
print('Inverse Kinematics : %s' %out_invKine)
mean = np.resize(mean,(6,8))
cost = np.square( np.add(out_invKine, mean) )
print('Pre-Cost : %s' %cost)
weight = [1.5, 1.5, 1.5, 1, 1, 1]
weight = np.resize(weight,(6,8))
cost = | np.multiply(cost, weight) | numpy.multiply |
"""
Small demonstration of the hlines and vlines plots.
"""
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as rnd
def f(t):
s1 = | np.sin(2 * np.pi * t) | numpy.sin |
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
"""
This module defines the :doc:`uproot.source.cursor.Cursor`, which maintains
a thread-local pointer into a :doc:`uproot.source.chunk.Chunk` and performs
the lowest level of interpretation (numbers, strings, raw arrays, etc.).
"""
import datetime
import struct
import sys
import numpy
import uproot
_printable_characters = (
"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM"
"NOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ "
)
_raw_double32 = struct.Struct(">f")
_raw_float16 = struct.Struct(">BH")
# https://github.com/jblomer/root/blob/ntuple-binary-format-v1/tree/ntuple/v7/doc/specifications.md#basic-types
_rntuple_string_length = struct.Struct("<I")
_rntuple_datetime = struct.Struct("<Q")
class Cursor:
"""
Args:
index (int): Global seek position in the ROOT file or local position
in an uncompressed :doc:`uproot.source.chunk.Chunk`.
origin (int): Zero-point for numerical keys in ``refs``.
refs (None or dict): References to data already read in
:doc:`uproot.deserialization.read_object_any`.
Represents a seek point in a ROOT file, which may be held for later
reference or advanced while interpreting data from a
:doc:`uproot.source.chunk.Chunk`.
A cursor also holds references to previously read data that might be
requested by :doc:`uproot.deserialization.read_object_any`.
"""
def __init__(self, index, origin=0, refs=None):
self._index = index
self._origin = origin
self._refs = refs
def __repr__(self):
if self._origin == 0:
o = ""
else:
o = f", origin={self._origin}"
if self._refs is None or len(self._refs) == 0:
r = ""
elif self._refs is None or len(self._refs) < 3:
r = ", {} refs: {}".format(
len(self._refs), ", ".join(str(x) for x in self._refs)
)
else:
r = ", {} refs: {}...".format(
len(self._refs), ", ".join(str(x) for x in list(self._refs)[:3])
)
return f"Cursor({self._index}{o}{r})"
@property
def index(self):
"""
Global seek position in the ROOT file or local position in an
uncompressed :doc:`uproot.source.chunk.Chunk`.
"""
return self._index
@property
def origin(self):
"""
Zero-point for numerical keys in
:ref:`uproot.source.cursor.Cursor.refs`.
"""
return self._origin
@property
def refs(self):
"""
References to data already read in
:doc:`uproot.deserialization.read_object_any`.
"""
if self._refs is None:
self._refs = {}
return self._refs
def displacement(self, other=None):
"""
The number of bytes between this :doc:`uproot.source.cursor.Cursor`
and its :ref:`uproot.source.cursor.Cursor.origin` (if None)
or the ``other`` :doc:`uproot.source.cursor.Cursor` (if provided).
If the displacement is positive, ``self`` is later in the file than the
``origin`` or ``other``; if negative, it is earlier.
"""
if other is None:
return self._index - self._origin
else:
return self._index - other._index
def copy(self, link_refs=True):
"""
Returns a copy of this :doc:`uproot.source.cursor.Cursor`. If
``link_refs`` is True, any :ref:`uproot.source.cursor.Cursor.refs`
will be *referenced*, rather than *copied*.
"""
if link_refs or self._refs is None:
return Cursor(self._index, origin=self._origin, refs=self._refs)
else:
return Cursor(self._index, origin=self._origin, refs=dict(self._refs))
def move_to(self, index):
"""
Move the :ref:`uproot.source.cursor.Cursor.index` to a specified seek
position.
"""
self._index = index
def skip(self, num_bytes):
"""
Move the :ref:`uproot.source.cursor.Cursor.index` forward
``num_bytes``.
"""
self._index += num_bytes
def skip_after(self, obj):
"""
Move the :ref:`uproot.source.cursor.Cursor.index` just after an object
that has a starting ``obj.cursor`` and an expected ``obj.num_bytes``.
"""
start_cursor = getattr(obj, "cursor", None)
num_bytes = getattr(obj, "num_bytes", None)
if (
start_cursor is None
or not isinstance(start_cursor, Cursor)
or num_bytes is None
):
raise TypeError(
"Cursor.skip_after can only be used on an object with a "
"`cursor` and `num_bytes`, not {}".format(type(obj))
)
self._index = start_cursor.index + num_bytes
def skip_over(self, chunk, context):
"""
Args:
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
context (dict): Auxiliary data used in deserialization.
Move the :ref:`uproot.source.cursor.Cursor.index` to a seek position
beyond the serialized data for an object that can be interpreted with
:doc:`uproot.deserialization.numbytes_version`.
Returns True if successful (cursor has moved), False otherwise (cursor
has NOT moved).
"""
num_bytes, version, is_memberwise = uproot.deserialization.numbytes_version(
chunk, self, context, move=False
)
if num_bytes is None:
return False
else:
self._index += num_bytes
return True
def fields(self, chunk, format, context, move=True):
"""
Args:
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
format (``struct.Struct``): Specification to interpret the bytes of
data.
context (dict): Auxiliary data used in deserialization.
move (bool): If True, move the
:ref:`uproot.source.cursor.Cursor.index` past the fields;
otherwise, leave it where it is.
Interpret data at this :ref:`uproot.source.cursor.Cursor.index` with a
specified format. Returns a tuple of data whose types and length are
determined by the ``format``.
"""
start = self._index
stop = start + format.size
if move:
self._index = stop
return format.unpack(chunk.get(start, stop, self, context))
def field(self, chunk, format, context, move=True):
"""
Args:
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
format (``struct.Struct``): Specification to interpret the bytes of
data.
context (dict): Auxiliary data used in deserialization.
move (bool): If True, move the
:ref:`uproot.source.cursor.Cursor.index` past the fields;
otherwise, leave it where it is.
Interpret data at this :ref:`uproot.source.cursor.Cursor.index` with a
format that only specifies one field, returning a single item instead of
a tuple.
"""
start = self._index
stop = start + format.size
if move:
self._index = stop
return format.unpack(chunk.get(start, stop, self, context))[0]
def double32(self, chunk, context, move=True):
"""
Args:
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
context (dict): Auxiliary data used in deserialization.
move (bool): If True, move the
:ref:`uproot.source.cursor.Cursor.index` past the fields;
otherwise, leave it where it is.
Interpret data at this :ref:`uproot.source.cursor.Cursor.index` as
ROOT's ``Double32_t`` type, returning the Python ``float``.
"""
# https://github.com/root-project/root/blob/e87a6311278f859ca749b491af4e9a2caed39161/io/io/src/TBufferFile.cxx#L448-L464
start = self._index
stop = start + _raw_double32.size
if move:
self._index = stop
return _raw_double32.unpack(chunk.get(start, stop, self, context))[0]
def float16(self, chunk, num_bits, context, move=True):
"""
Args:
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
num_bits (int): Number of bits in the mantissa.
context (dict): Auxiliary data used in deserialization.
move (bool): If True, move the
:ref:`uproot.source.cursor.Cursor.index` past the fields;
otherwise, leave it where it is.
Interpret data at this :ref:`uproot.source.cursor.Cursor.index` as
ROOT's ``Float16_t`` type, returning the Python ``float``.
"""
# https://github.com/root-project/root/blob/e87a6311278f859ca749b491af4e9a2caed39161/io/io/src/TBufferFile.cxx#L432-L442
# https://github.com/root-project/root/blob/e87a6311278f859ca749b491af4e9a2caed39161/io/io/src/TBufferFile.cxx#L482-L499
start = self._index
stop = start + _raw_float16.size
if move:
self._index = stop
exponent, mantissa = _raw_float16.unpack(chunk.get(start, stop, self, context))
out = numpy.array([exponent], numpy.int32)
out <<= 23
out |= (mantissa & ((1 << (num_bits + 1)) - 1)) << (23 - num_bits)
out = out.view(numpy.float32)
if (1 << (num_bits + 1) & mantissa) != 0:
out = -out
return out.item()
def byte(self, chunk, context, move=True):
"""
Args:
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
context (dict): Auxiliary data used in deserialization.
move (bool): If True, move the
:ref:`uproot.source.cursor.Cursor.index` past the fields;
otherwise, leave it where it is.
Interpret data at this :ref:`uproot.source.cursor.Cursor.index` as a raw
byte.
"""
out = chunk.get(self._index, self._index + 1, self, context)
if move:
self._index += 1
return out
def bytes(self, chunk, length, context, move=True):
"""
Args:
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
length (int): Number of bytes to retrieve.
context (dict): Auxiliary data used in deserialization.
move (bool): If True, move the
:ref:`uproot.source.cursor.Cursor.index` past the fields;
otherwise, leave it where it is.
Interpret data at this :ref:`uproot.source.cursor.Cursor.index` as raw
bytes with a given ``length``.
"""
start = self._index
stop = start + length
if move:
self._index = stop
return chunk.get(start, stop, self, context)
def array(self, chunk, length, dtype, context, move=True):
"""
Args:
chunk (:doc:`uproot.source.chunk.Chunk`): Buffer of contiguous data
from the file :doc:`uproot.source.chunk.Source`.
length (int): Number of bytes to retrieve.
dtype (``numpy.dtype``): Data type for the array.
context (dict): Auxiliary data used in deserialization.
move (bool): If True, move the
:ref:`uproot.source.cursor.Cursor.index` past the fields;
otherwise, leave it where it is.
Interpret data at this :ref:`uproot.source.cursor.Cursor.index` as a
one-dimensional array with a given ``length`` and ``dtype``.
"""
start = self._index
stop = start + length * dtype.itemsize
if move:
self._index = stop
return numpy.frombuffer(chunk.get(start, stop, self, context), dtype=dtype)
_u1 = | numpy.dtype("u1") | numpy.dtype |
# -*- coding: iso-8859-15 -*-
import os, re, sys
import numpy as np, scipy.sparse as sp, scipy.stats as stats
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import GridSearchCV, ParameterGrid, StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC, SVC
from sklearn.externals import joblib
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
BASE_PATH = '/'.join(CURRENT_PATH.split('/')[:-1])
DATA_PATH = BASE_PATH + '/datasets/data'
def _write_in_file(fname, content, mode='w', makedirs_recursive=True):
dir_ = '/'.join(fname.split('/')[:-1])
if not os.path.isdir(dir_) and makedirs_recursive:
os.makedirs(dir_)
with open(fname, mode) as f:
f.write(content)
def report_model_selection_results(negation_id, lexicon_id, analyzer,
word_ngram_range, char_ngram_range,
lowercase, max_df, min_df, binary,
algo, C, cv_score,
corpus):
line = '{negation_id}\t{lexicon_id}\t{analyzer}\t'.\
format(negation_id=negation_id, lexicon_id=lexicon_id,
analyzer=analyzer)
line += '({min_w},{max_w})\t({min_c},{max_c})\t'.\
format(min_w=word_ngram_range[0], max_w=word_ngram_range[1],
min_c=char_ngram_range[0], max_c=char_ngram_range[1])
line += '%s\t' % ('True' if lowercase else 'False')
line += '%.2f\t' % max_df
line += '%i\t' % min_df
line += '%s\t' % ('True' if binary else 'False')
line += '%s\t' % algo
line += '%.10f\t' % C
line += '%.4f\n' % cv_score
fname = CURRENT_PATH + '/%s-model-selection-results.tsv' % corpus
with open(fname, 'a') as f:
f.write(line)
def vectorize_tweet_collection(fname, analyzer, ngram_range, lowercase,
max_df, min_df, binary, split_underscore=True,
return_vectorizer=False):
"""Vectoriza una colección de tweets utilizando el esquema Tf-Idf.
Retorna la matriz documentos-términos calculada utilizando el esquema Tf-Idf.
La matriz retornada es dispersa, de tipo csr (scipy.sparse.csr_matrix).
paráms:
fname: str
Nombre de archivo que contiene la colección de tweets.
split_underscore: bool
Divide una palabra que tiene el prefijo NEG_. Es decir, separa la
palabra removiendo el guion bajo.
NOTA: este parámetro es válido si analyzer == 'char'
"""
vectorizer = TfidfVectorizer(analyzer=analyzer,
ngram_range=ngram_range,
lowercase=lowercase,
max_df=max_df,
min_df=min_df, binary=binary)
tweets = []
with open(fname) as f:
for tweet in f:
t = tweet.rstrip('\n').decode('utf-8')
if analyzer == 'char' and split_underscore:
t = t.replace(u'_', u' ').strip()
tweets.append(t)
if not return_vectorizer:
return vectorizer.fit_transform(tweets)
else:
return vectorizer.fit_transform(tweets), vectorizer
def perform_grid_search(estimator, features, target_labels,
param_grid='default', n_jobs=4):
# las siguientes probabilidades se calcularon de los resultados
# consignados en 'intertass-model-selection-results.tsv'
C_values = np.random.choice(np.power(2., np.arange(-5, 10, dtype=float)),
size=6,
replace=False,
p=[0.02, 0.016, 0.104, 0.146, 0.081, 0.119, 0.214,
0.147, 0.059, 0.027, 0.019, 0.012, 0.014,
0.011, 0.011])
C_values = np.sort(C_values)
if isinstance(param_grid, str) and param_grid == 'default':
param_grid = {'C': C_values}
clf = GridSearchCV(estimator=estimator,
param_grid=param_grid,
scoring='accuracy',
n_jobs=n_jobs,
cv=5,
refit=False)
clf.fit(features, target_labels)
return clf.best_params_, clf.best_score_
def build_vectorization_based_classifiers(corpus):
"""Método principal para construir clasificadores basados en vectorización.
paráms:
corpus: str
"""
corpus = corpus.lower()
##################
# ngram settings #
##################
word_ngram_range = [(1, i) for i in xrange(1, 5)]
char_ngram_range = [(i, j)
for i in xrange(2, 6) for j in xrange(2, 6) if i < j]
ngram_params = ParameterGrid({'analyzer': ['word', 'char', 'both'],
'word_ngram_idx': range(len(word_ngram_range)),
'char_ngram_idx': range(len(char_ngram_range))})
ngram_settings = []
for params in ngram_params:
if params['analyzer'] == 'word' and params['char_ngram_idx'] == 0:
ngram_settings.append('analyzer:word-word_idx:%i-char_idx:%i' %
(params['word_ngram_idx'], -1))
elif params['analyzer'] == 'char' and params['word_ngram_idx'] == 0:
ngram_settings.append('analyzer:char-word_idx:%i-char_idx:%i' %
(-1, params['char_ngram_idx']))
elif params['analyzer'] == 'both':
ngram_settings.append('analyzer:both-word_idx:%i-char_idx:%i' %
(params['word_ngram_idx'], params['char_ngram_idx']))
ngram_params = None
###################
# global settings #
###################
model_selection = ParameterGrid({'ngram_settings': ngram_settings,
'lowercase': [True, False],
'max_df': [.85, .9],
'min_df': [1, 2, 4],
'binary': [True, False]})
corpus_path = DATA_PATH + '/train/' + corpus
for negation_id in os.listdir(corpus_path):
negation_path = corpus_path + '/' + negation_id
if not os.path.isdir(negation_path):
continue
fname = negation_path + '/tweets.txt'
target_labels = np.loadtxt(negation_path + '/target-labels.dat',
dtype=int)
lexicons = []
for metaftures_fname in os.listdir(negation_path):
if re.match(r'metafeatures-lexicon-(?:[0-9]+)\.tsv$', metaftures_fname):
lexicons.append(
'-'.join(metaftures_fname.rstrip('.tsv').split('-')[1:3]))
for lexicon_id in lexicons:
metaftures_fname = negation_path + '/metafeatures-%s.tsv' % lexicon_id
metafeatures = np.loadtxt(metaftures_fname, dtype=float, delimiter='\t')
metafeatures = sp.csr_matrix(metafeatures)
random_idx = np.random.choice(len(model_selection),
size=41, replace=False)
for idx in random_idx:
params = model_selection[idx]
m = re.match('analyzer:([a-z]+)-word_idx:(-?[0-9]+)-char_idx:(-?[0-9]+)',
params['ngram_settings'])
analyzer = m.group(1)
w_idx = int(m.group(2))
c_idx = int(m.group(3))
ngram_range = None
ngrams_features = None
analyzers = ['word', 'char'] if analyzer == 'both' else [analyzer,]
for analyzer in analyzers:
if analyzer == 'word':
ngram_range = word_ngram_range[w_idx]
else:
ngram_range = char_ngram_range[c_idx]
features_ = vectorize_tweet_collection(fname=fname,
analyzer=analyzer,
ngram_range=ngram_range,
lowercase=params['lowercase'],
max_df=params['max_df'],
min_df=params['min_df'],
binary=params['binary'])
if ngrams_features is None:
ngrams_features = features_
else:
ngrams_features = sp.hstack([ngrams_features, features_],
format='csr')
features = sp.hstack([metafeatures, ngrams_features], format='csr')
algorithms = ['LinearSVC', 'LogisticRegression']
algo = np.random.choice(algorithms, p=[.37, .63])
estimator = LinearSVC() if algo == 'LinearSVC' else LogisticRegression()
best_params, best_score = perform_grid_search(
estimator=estimator,
features=features,
target_labels=target_labels)
report_model_selection_results(
negation_id=negation_id,
lexicon_id=lexicon_id,
analyzer=m.group(1),
word_ngram_range=word_ngram_range[w_idx] if w_idx != -1 else (-1, -1),
char_ngram_range=char_ngram_range[c_idx] if c_idx != -1 else (-1, -1),
lowercase=params['lowercase'],
max_df=params['max_df'],
min_df=params['min_df'],
binary=params['binary'],
algo=algo,
C=best_params['C'],
cv_score=best_score,
corpus=corpus)
def prepare_level_one_data(corpus, n_classifiers=100):
"""Prepara los datos de nivel 'uno' que utilizarán los 'ensembles'.
Los datos de nivel 'cero' corresponden a los datos originales provistos para
entrenar modelos de clasificación supervisada. Entonces, las predicciones que
se realizan durante la respectiva validación cruzada, se utilizan para entrenar
los 'ensembles'; es a esto a que llamamos datos de nivel 'uno'.
Referencias:
[1] http://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/stacked-ensembles.html
[2] https://www.kaggle.com/general/18793 ("Strategy A")
paráms:
corpus: str
n_classifiers: int
Utilizar las predicciones de los mejores 'n' clasificadores para
preparar los datos de nivel uno.
Esta función, además de preparar los datos de nivel uno, realiza la persisten-
cia tanto de los clasificadores como de los 'vectorizadores'.
"""
corpus = corpus.lower()
corpus_path = DATA_PATH + '/train/' + corpus
# cargar los resultados de selección de modelos
model_selection_results = np.loadtxt(
CURRENT_PATH + '/%s-model-selection-results.tsv' % corpus,
dtype=str, delimiter='\t')
# los resultados entonces se ordenan descendentemente,
# obteniéndose los respectivos índices
indexes = np.argsort(np.array(model_selection_results[:,-1], dtype=float))[::-1]
indexes = indexes[:n_classifiers]
persistence_path = BASE_PATH + '/model_persistence/%s' % corpus
if not os.path.isdir(persistence_path):
os.makedirs(persistence_path)
level_one_data_path = CURRENT_PATH + '/level-one-data/%s' % corpus
if not os.path.isdir(level_one_data_path):
os.makedirs(level_one_data_path)
for idx in indexes:
# Leer parámetros
tmp = model_selection_results[idx,:]
negation_id = tmp[0]
lexicon_id = tmp[1]
analyzer = tmp[2]
word_ngram_range =\
tuple([int(i) for i in re.sub('[\(\)]', '', tmp[3]).split(',')])
char_ngram_range =\
tuple([int(i) for i in re.sub('[\(\)]', '', tmp[4]).split(',')])
lowercase = True if tmp[5] == 'True' else False
max_df = float(tmp[6])
min_df = int(tmp[7])
binary = True if tmp[8] == 'True' else False
algo = tmp[9]
C = float(tmp[10])
temp = None
# Cargar colección de documentos, "ground truth" y "metafeatures"
negation_path = corpus_path + '/' + negation_id
if not os.path.isdir(negation_path):
continue
fname = negation_path + '/tweets.txt'
target_labels = np.loadtxt(negation_path + '/target-labels.dat',
dtype=int)
metaftures_fname = negation_path + '/metafeatures-%s.tsv' % lexicon_id
metafeatures = np.loadtxt(metaftures_fname, dtype=float, delimiter='\t')
metafeatures = sp.csr_matrix(metafeatures)
# Vectorizar colección de documentos
ngram_range = None
ngrams_features = None
analyzers = ['word', 'char'] if analyzer == 'both' else [analyzer,]
for analyzer in analyzers:
ngram_range = word_ngram_range if analyzer == 'word' else char_ngram_range
features_, vectorizer =\
vectorize_tweet_collection(fname=fname,
analyzer=analyzer,
ngram_range=ngram_range,
lowercase=lowercase,
max_df=max_df,
min_df=min_df,
binary=binary,
return_vectorizer=True)
if ngrams_features is None:
ngrams_features = features_
else:
ngrams_features = sp.hstack([ngrams_features, features_],
format='csr')
vectorizer_fname = '%s-%s-%i_%i-%s-%.2f-%i-%s.pkl' %\
(negation_id, analyzer,
ngram_range[0], ngram_range[1],
tmp[5], max_df, min_df, tmp[8])
vectorizer_fname = persistence_path + '/vectorizers/' + vectorizer_fname
# realizar persistencia del 'vectorizer'
if not os.path.isfile(vectorizer_fname):
joblib.dump(vectorizer, vectorizer_fname)
features = sp.hstack([metafeatures, ngrams_features], format='csr')
skf = list(StratifiedKFold(n_splits=5, shuffle=False, random_state=None).\
split(np.zeros(features.shape[0], dtype=float), target_labels))
class_label_prediction = np.zeros(features.shape[0], dtype=int)
class_proba_prediction = np.zeros((features.shape[0],
np.unique(target_labels).shape[0]),
dtype=float)
for train_index, test_index in skf:
X_train = features[train_index]
y_train = target_labels[train_index]
clf = LinearSVC(C=C) if algo == 'LinearSVC' else LogisticRegression(C=C)
clf.fit(X_train, y_train)
X_test = features[test_index]
y_test = target_labels[test_index]
class_label_prediction[test_index] = clf.predict(X_test)
if algo == 'LogisticRegression':
class_proba_prediction[test_index] = clf.predict_proba(X_test)
class_label_fname = level_one_data_path + '/clf_%i-label.tsv' % idx
class_proba_fname = level_one_data_path + '/clf_%i-proba.tsv' % idx
np.savetxt(fname=class_label_fname, X=class_label_prediction, fmt='%i',
delimiter='\t')
if algo == 'LogisticRegression':
np.savetxt(fname=class_proba_fname, X=class_proba_prediction,
fmt='%.4f', delimiter='\t')
# realizar persistencia del clasificador
clf_fname = persistence_path + '/classifiers/' + 'clf_%i.pkl' % idx
if not os.path.isfile(clf_fname):
clf = LinearSVC(C=C) if algo == 'LinearSVC' else LogisticRegression(C=C)
clf.fit(features, target_labels)
joblib.dump(clf, clf_fname)
_write_in_file(
fname=CURRENT_PATH + '/%s-model-selection-filtered-results.tsv' % corpus,
content='\t'.join(['%i' % idx,] + model_selection_results[idx,:].tolist()) + '\n',
mode='a')
def find_low_correlated_combinations(corpus, n_classifiers=50):
"""Encuentra las combinaciones de más baja correlación.
paráms:
corpus: str
n_classifiers: int
Límite de clasificadores que pueden constituir una combinación.
Nota: los datos de nivel uno deben haber sido generados; esto es, debió
haberse ejecutado el método 'prepare_level_one_data'.
"""
corpus = corpus.lower()
level_one_data_path = CURRENT_PATH + '/level-one-data/%s' % corpus
filtered_results = np.loadtxt(
CURRENT_PATH + '/%s-model-selection-filtered-results.tsv' % corpus,
dtype=str, delimiter='\t', usecols=(0, 10))
logit_results =\
filtered_results[np.where(filtered_results[:,1] == 'LogisticRegression')]
low_correlated_combinations = {
1: {'filtered_results': [[i] for i in xrange(filtered_results.shape[0])],
'logit_results': [[i] for i in xrange(logit_results.shape[0])]}}
output_fname = CURRENT_PATH +\
'/%s-model-selection-low-correlated-combinations.tsv' % corpus
for i in xrange(2, n_classifiers + 1):
for which_results_to_use in low_correlated_combinations[i-1].iterkeys():
results = filtered_results
all_clf_ids = range(filtered_results.shape[0])
if which_results_to_use == 'logit_results':
results = logit_results
all_clf_ids = range(logit_results.shape[0])
correlation_results = []
prev_results = low_correlated_combinations[i-1][which_results_to_use]
for prev_rslt in prev_results:
for j in all_clf_ids:
if j in prev_rslt or (i == 2 and prev_rslt[0] > j):
continue
# calcular la correlación entre todos
# los miembros de la combinación
tmp = prev_rslt[:]
tmp.append(j)
pearson_correlation = []
for y in xrange(len(tmp)):
labels_y = np.loadtxt(
level_one_data_path + '/clf_%s-label.tsv' % results[tmp[y],0],
dtype=int)
for z in xrange(len(tmp)):
if z <= y:
continue
labels_z = np.loadtxt(
level_one_data_path + '/clf_%s-label.tsv' % results[tmp[z],0],
dtype=int)
pearson_correlation.append(stats.pearsonr(labels_y,
labels_z)[0])
correlation_results.append([tmp, np.mean(pearson_correlation),
np.std(pearson_correlation)])
correlation_results = np.array(correlation_results)
min_crltn = np.amin(correlation_results[:,1])
min_crltn_indexes = np.where(correlation_results[:,1] == min_crltn)[0]
min_crltn_idx = np.argmin(correlation_results[min_crltn_indexes, 2])
lowest_crltn = correlation_results[min_crltn_indexes[min_crltn_idx]]
if i not in low_correlated_combinations.keys():
low_correlated_combinations[i] = {}
low_correlated_combinations[i][which_results_to_use] = [lowest_crltn[0],]
output_str = '\t'.join(['%i' % i,
'both' if which_results_to_use == 'filtered_results' else 'logit',
','.join([results[clf_id,0] for clf_id in lowest_crltn[0]]),
'%.4f' % lowest_crltn[1],
'%.4f' % lowest_crltn[2]])
if not os.path.isfile(output_fname):
_write_in_file(output_fname,
'#n\talgo\tclf_ids\tavg_correlation\tcorrelation_std\n')
_write_in_file(output_fname, output_str + '\n', 'a')
def search_for_the_best_second_level_classifiers(corpus):
"""Buscar las mejores configuraciones de los clasificadores de segundo nivel.
paráms:
corpus: str
Por otra parte, se listan los pre-requisitos para entrenar los clasificadores
de segundo nivel:
1. Haber generado los datos de nivel uno; función 'prepare_level_one_data'
2. Haber encontrado las combinaciones de clasificadores con la más baja
correlación, función 'find_low_correlated_combinations'
"""
corpus = corpus.lower()
level_one_data_path = CURRENT_PATH + '/level-one-data/%s' % corpus
persistence_path = BASE_PATH + '/model_persistence/%s/stackers' % corpus
if not os.path.isdir(persistence_path):
os.makedirs(persistence_path)
target_labels = None
for dir_ in os.listdir(DATA_PATH + '/train/%s' % corpus):
if os.path.isfile(DATA_PATH + '/train/%s/%s/target-labels.dat' % (corpus, dir_)):
target_labels =\
np.loadtxt(DATA_PATH + '/train/%s/%s/target-labels.dat' % (corpus, dir_),
dtype=int)
break
if target_labels is None:
raise Exception('No se encontró un "ground truth".')
n_classes = np.unique(target_labels).shape[0]
# leer las mejores configuraciones de modelos
filtered_results = np.loadtxt(
CURRENT_PATH + '/%s-model-selection-filtered-results.tsv' % corpus,
dtype=str, delimiter='\t', usecols=(0, 10))
logit_results =\
filtered_results[np.where(filtered_results[:,1] == 'LogisticRegression')]
low_crltd_combinations = np.loadtxt(
CURRENT_PATH + '/%s-model-selection-low-correlated-combinations.tsv' % corpus,
dtype=str, delimiter='\t', usecols=(0, 1, 2))
filtered_combi =\
low_crltd_combinations[np.where(low_crltd_combinations[:,1] == 'both')]
logit_combi =\
low_crltd_combinations[np.where(low_crltd_combinations[:,1] == 'logit')]
# determinar el número máximo de clasificadores
# que pueden constituir una combinación
n_classifiers = int(low_crltd_combinations[-1,0])
low_crltd_combinations = None
# reordenar los arreglos, dejando solo los id's
filtered_results = filtered_results[:n_classifiers,0]
logit_results = logit_results[:n_classifiers,0]
filtered_combi = np.array(filtered_combi[-1, 2].split(','), dtype=str)
logit_combi = np.array(logit_combi[-1, 2].split(','), dtype=str)
# archivo donde guardar los resultados
output_fname = CURRENT_PATH + '/%s-model-selection-ensemble-results.tsv' % corpus
_write_in_file(output_fname,
'\t'.join(['#n_classifiers',
'ensemble_method',
'selection_method',
'algo',
'clf_ids',
'stacking_algo',
'hyperparameters',
'CV_score']) + '\n',
mode='w')
for i in xrange(2, n_classifiers+1):
for selection_method in ['low_crltn', 'best_ranked']:
# unweighted average
results = logit_results
if selection_method == 'low_crltn':
results = logit_combi
clf_ids = results[:i]
class_proba = {}
for j in xrange(i):
class_proba[j] = np.loadtxt(
level_one_data_path + '/clf_%s-proba.tsv' % clf_ids[j],
dtype=float, delimiter='\t')
predicted_class_labels = []
for j in xrange(target_labels.shape[0]):
matrix = None
for k in class_proba.iterkeys():
vector = class_proba[k][j,:].reshape(1, n_classes)
if matrix is None:
matrix = vector
else:
matrix = np.vstack((matrix, vector))
predicted_class_labels.append(np.argmax(np.mean(matrix, axis=0)))
predicted_class_labels = np.array(predicted_class_labels, dtype=int)
output_str = '\t'.join(['%i' % i,
'unweighted_average',
selection_method,
'logit',
','.join(clf_ids),
'(None)',
'(None)',
'%.4f' % accuracy_score(target_labels,
predicted_class_labels)
])
_write_in_file(output_fname, output_str + '\n', mode='a')
# stacking
results = filtered_results
if selection_method == 'low_crltn':
results = filtered_combi
clf_ids = results[:i]
matrix = None
for j in xrange(i):
vector = np.loadtxt(
level_one_data_path + '/clf_%s-label.tsv' % clf_ids[j],
dtype=int).reshape(target_labels.shape[0], 1)
if matrix is None:
matrix = vector
else:
matrix = np.hstack((matrix, vector))
stacking_algos = {
'logit': {'estimator': LogisticRegression(),
'param_grid': {'C': np.logspace(-3, 2, 6)}
},
'SVM_rbf': {'estimator': SVC(),
'param_grid': {'kernel': ['rbf',],
'C': np.logspace(-3, 2, 6),
'gamma': np.logspace(-3, 2, 6)}
},
}
if i >= 7:
stacking_algos['rf'] = {
'estimator': RandomForestClassifier(),
'param_grid': {
'n_estimators': np.array([10, 20, 40, 100]),
'criterion': ['gini', 'entropy'],
'max_features': np.arange(2,int(np.round(np.sqrt(i),0))+1)
}
}
stacking_cv_results = []
for algo in stacking_algos.iterkeys():
estimator = stacking_algos[algo]['estimator']
param_grid = stacking_algos[algo]['param_grid']
best_params, best_score =\
perform_grid_search(estimator=estimator,
features=matrix,
target_labels=target_labels,
param_grid=param_grid,
n_jobs=3)
params_str = []
for param in param_grid.iterkeys():
value = best_params[param]
if isinstance(value, int):
value = '%i' % value
elif isinstance(value, float):
value = '%.10f' % value
else:
value = str(value)
params_str.append('%s:%s' % (param, value))
else:
params_str = ';'.join(params_str)
stacking_cv_results.append([algo, params_str, best_score])
stacking_cv_results = | np.array(stacking_cv_results) | numpy.array |
from scipy.io.wavfile import read
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "Times New Roman"
import pysptk
try:
from .peakdetect import peakdetect
from .GCI import SE_VQ_varF0, IAIF, get_vq_params
except:
from peakdetect import peakdetect
from GCI import SE_VQ_varF0, IAIF, get_vq_params
PATH=os.path.dirname(os.path.abspath(__file__))
sys.path.append('../')
from utils import dynamic2static, save_dict_kaldimat, get_dict
from scipy.integrate import cumtrapz
from tqdm import tqdm
import pandas as pd
import torch
from script_mananger import script_manager
class Glottal:
"""
Compute features based on the glottal source reconstruction from sustained vowels and continuous speech.
For continuous speech, the features are computed over voiced segments
Nine descriptors are computed:
1. Variability of time between consecutive glottal closure instants (GCI)
2. Average opening quotient (OQ) for consecutive glottal cycles-> rate of opening phase duration / duration of glottal cycle
3. Variability of opening quotient (OQ) for consecutive glottal cycles-> rate of opening phase duration /duration of glottal cycle
4. Average normalized amplitude quotient (NAQ) for consecutive glottal cycles-> ratio of the amplitude quotient and the duration of the glottal cycle
5. Variability of normalized amplitude quotient (NAQ) for consecutive glottal cycles-> ratio of the amplitude quotient and the duration of the glottal cycle
6. Average H1H2: Difference between the first two harmonics of the glottal flow signal
7. Variability H1H2: Difference between the first two harmonics of the glottal flow signal
8. Average of Harmonic richness factor (HRF): ratio of the sum of the harmonics amplitude and the amplitude of the fundamental frequency
9. Variability of HRF
Static or dynamic matrices can be computed:
Static matrix is formed with 36 features formed with (9 descriptors) x (4 functionals: mean, std, skewness, kurtosis)
Dynamic matrix is formed with the 9 descriptors computed for frames of 200 ms length with a time-shift of 50 ms.
Notes:
1. The fundamental frequency is computed using the RAPT algorithm.
>>> python glottal.py <file_or_folder_audio> <file_features> <dynamic_or_static> <plots (true, false)> <format (csv, txt, npy, kaldi, torch)>
Examples command line:
>>> python glottal.py "../audios/001_a1_PCGITA.wav" "glottalfeaturesAst.txt" "static" "true" "txt"
>>> python glottal.py "../audios/098_u1_PCGITA.wav" "glottalfeaturesUst.csv" "static" "true" "csv"
>>> python glottal.py "../audios/098_u1_PCGITA.wav" "glottalfeaturesUst.ark" "dynamic" "true" "kaldi"
>>> python glottal.py "../audios/098_u1_PCGITA.wav" "glottalfeaturesUst.pt" "dynamic" "true" "torch"
Examples directly in Python
>>> from disvoice.glottal import Glottal
>>> glottal=Glottal()
>>> file_audio="../audios/001_a1_PCGITA.wav"
>>> features=glottal.extract_features_file(file_audio, static, plots=True, fmt="numpy")
>>> features2=glottal.extract_features_file(file_audio, static, plots=True, fmt="dataframe")
>>> features3=glottal.extract_features_file(file_audio, dynamic, plots=True, fmt="torch")
>>> path_audios="../audios/"
>>> features1=glottal.extract_features_path(path_audios, static, plots=False, fmt="numpy")
>>> features2=glottal.extract_features_path(path_audios, static, plots=False, fmt="torch")
>>> features3=glottal.extract_features_path(path_audios, static, plots=False, fmt="dataframe")
"""
def __init__(self):
self.size_frame=0.2
self.size_step=0.05
self.head=["var GCI", "avg NAQ", "std NAQ", "avg QOQ", "std QOQ", "avg H1H2", "std H1H2", "avg HRF", "std HRF"]
def plot_glottal(self, data_audio,fs,GCI, glottal_flow, glottal_sig):
"""Plots of the glottal features
:param data_audio: speech signal.
:param fs: sampling frequency
:param GCI: glottal closure instants
:param glottal_flow: glottal flow
:param glottal_sig: reconstructed glottal signal
:returns: plots of the glottal features.
"""
fig, ax=plt.subplots(3, sharex=True)
t=np.arange(0, float(len(data_audio))/fs, 1.0/fs)
if len(t)>len(data_audio):
t=t[:len(data_audio)]
elif len(t)<len(data_audio):
data_audio=data_audio[:len(t)]
ax[0].plot(t, data_audio, 'k')
ax[0].set_ylabel('Amplitude', fontsize=12)
ax[0].set_xlim([0, t[-1]])
ax[0].grid(True)
ax[1].plot(t, glottal_sig, color='k', linewidth=2.0, label="Glottal flow signal")
amGCI=[glottal_sig[int(k-2)] for k in GCI]
GCI=GCI/fs
ax[1].plot(GCI, amGCI, 'bo', alpha=0.5, markersize=8, label="GCI")
GCId=np.diff(GCI)
ax[1].set_ylabel("Glottal flow", fontsize=12)
ax[1].text(t[2],-0.8, "Avg. time consecutive GCI:"+str(np.round(np.mean(GCId)*1000,2))+" ms")
ax[1].text(t[2],-1.05, "Std. time consecutive GCI:"+str(np.round(np.std(GCId)*1000,2))+" ms")
ax[1].set_xlabel('Time (s)', fontsize=12)
ax[1].set_xlim([0, t[-1]])
ax[1].set_ylim([-1.1, 1.1])
ax[1].grid(True)
ax[1].legend(ncol=2, loc=2)
ax[2].plot(t, glottal_flow, color='k', linewidth=2.0)
ax[2].set_ylabel("Glotal flow derivative", fontsize=12)
ax[2].set_xlabel('Time (s)', fontsize=12)
ax[2].set_xlim([0, t[-1]])
ax[2].grid(True)
plt.show()
def extract_glottal_signal(self, x, fs):
"""Extract the glottal flow and the glottal flow derivative signals
:param x: data from the speech signal.
:param fs: sampling frequency
:returns: glottal signal
:returns: derivative of the glottal signal
:returns: glottal closure instants
>>> from scipy.io.wavfile import read
>>> glottal=Glottal()
>>> file_audio="../audios/001_a1_PCGITA.wav"
>>> fs, data_audio=read(audio)
>>> glottal, g_iaif, GCIs=glottal.extract_glottal_signal(data_audio, fs)
"""
winlen=int(0.025*fs)
winshift=int(0.005*fs)
x=x- | np.mean(x) | numpy.mean |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = | N.array([1,1,1]) | numpy.array |
import sys
import numpy as np
import pandas as pd
import openmdao.api as om
from wisdem.commonse import gravity
eps = 1e-3
# Convenience functions for computing McDonald's C and F parameters
def chsMshc(x):
return np.cosh(x) * np.sin(x) - np.sinh(x) * np.cos(x)
def chsPshc(x):
return np.cosh(x) * np.sin(x) + np.sinh(x) * np.cos(x)
def carterFactor(airGap, slotOpening, slotPitch):
"""Return Carter factor
(based on Langsdorff's empirical expression)
See page 3-13 Boldea Induction machines Chapter 3
"""
gma = (2 * slotOpening / airGap) ** 2 / (5 + 2 * slotOpening / airGap)
return slotPitch / (slotPitch - airGap * gma * 0.5)
# ---------------
def carterFactorMcDonald(airGap, h_m, slotOpening, slotPitch):
"""Return Carter factor using Carter's equation
(based on Schwartz-Christoffel's conformal mapping on simplified slot geometry)
This code is based on Eq. B.3-5 in Appendix B of McDonald's thesis.
It is used by PMSG_arms and PMSG_disc.
h_m : magnet height (m)
b_so : stator slot opening (m)
tau_s : Stator slot pitch (m)
"""
mu_r = 1.06 # relative permeability (probably for neodymium magnets, often given as 1.05 - GNS)
g_1 = airGap + h_m / mu_r # g
b_over_a = slotOpening / (2 * g_1)
gamma = 4 / np.pi * (b_over_a * np.arctan(b_over_a) - np.log(np.sqrt(1 + b_over_a ** 2)))
return slotPitch / (slotPitch - gamma * g_1)
# ---------------
def carterFactorEmpirical(airGap, slotOpening, slotPitch):
"""Return Carter factor using Langsdorff's empirical expression"""
sigma = (slotOpening / airGap) / (5 + slotOpening / airGap)
return slotPitch / (slotPitch - sigma * slotOpening)
# ---------------
def carterFactorSalientPole(airGap, slotWidth, slotPitch):
"""Return Carter factor for salient pole rotor
Where does this equation come from? It's different from other approximations above.
Original code:
tau_s = np.pi * dia / S # slot pitch
b_s = tau_s * b_s_tau_s # slot width
b_t = tau_s - b_s # tooth width
K_C1 = (tau_s + 10 * g_a) / (tau_s - b_s + 10 * g_a) # salient pole rotor
slotPitch - slotWidth == toothWidth
"""
return (slotPitch + 10 * airGap) / (slotPitch - slotWidth + 10 * airGap) # salient pole rotor
# ---------------------------------
def array_seq(q1, b, c, Total_number):
Seq = np.array([1, 0, 0, 1, 0])
diff = Total_number * 5 / 6
G = | np.prod(Seq.shape) | numpy.prod |
from . import GeneExpressionDataset
from .anndataset import AnnDatasetFromAnnData, DownloadableAnnDataset
import torch
import pickle
import os
import numpy as np
import pandas as pd
import anndata
class AnnDatasetKeywords(GeneExpressionDataset):
def __init__(self, data, select_genes_keywords=[]):
super().__init__()
if isinstance(data, str):
anndataset = anndata.read(data)
else:
anndataset = data
idx_and_gene_names = [
(idx, gene_name) for idx, gene_name in enumerate(list(anndataset.var.index))
]
for keyword in select_genes_keywords:
idx_and_gene_names = [
(idx, gene_name)
for idx, gene_name in idx_and_gene_names
if keyword.lower() in gene_name.lower()
]
gene_indices = np.array([idx for idx, _ in idx_and_gene_names])
gene_names = np.array([gene_name for _, gene_name in idx_and_gene_names])
expression_mat = np.array(anndataset.X[:, gene_indices].todense())
select_cells = expression_mat.sum(axis=1) > 0
expression_mat = expression_mat[select_cells, :]
select_genes = (expression_mat > 0).mean(axis=0) > 0.21
gene_names = gene_names[select_genes]
expression_mat = expression_mat[:, select_genes]
print("Final dataset shape :", expression_mat.shape)
self.populate_from_data(X=expression_mat, gene_names=gene_names)
class ZhengDataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
zheng = anndata.read(os.path.join(current_dir, "zheng_gemcode_control.h5ad"))
super(ZhengDataset, self).__init__(zheng, select_genes_keywords=["ercc"])
class MacosDataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
macos = anndata.read(os.path.join(current_dir, "macosko_dropseq_control.h5ad"))
super(MacosDataset, self).__init__(macos, select_genes_keywords=["ercc"])
class KleinDataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
klein = anndata.read(
os.path.join(current_dir, "klein_indrops_control_GSM1599501.h5ad")
)
super(KleinDataset, self).__init__(klein, select_genes_keywords=["ercc"])
class Sven1Dataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
svens = anndata.read(
os.path.join(current_dir, "svensson_chromium_control.h5ad")
)
sven1 = svens[svens.obs.query('sample == "20311"').index]
super(Sven1Dataset, self).__init__(sven1, select_genes_keywords=["ercc"])
class Sven2Dataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
svens = anndata.read(
os.path.join(current_dir, "svensson_chromium_control.h5ad")
)
sven2 = svens[svens.obs.query('sample == "20312"').index]
super(Sven2Dataset, self).__init__(sven2, select_genes_keywords=["ercc"])
class AnnDatasetRNA(GeneExpressionDataset):
def __init__(self, data, n_genes=100):
super().__init__()
if isinstance(data, str):
anndataset = anndata.read(data)
else:
anndataset = data
# Select RNA genes
idx_and_gene_names = [
(idx, gene_name)
for idx, gene_name in enumerate(list(anndataset.var.index))
if "ercc" not in gene_name.lower()
]
gene_indices = | np.array([idx for idx, _ in idx_and_gene_names]) | numpy.array |
#!/usr/bin/python3
# -*- coding=utf-8 -*-
import numpy as np
import copy
from scipy.special import expit, softmax
def yolo3_head(predictions, anchors, num_classes, input_dims):
"""
YOLO Head to process predictions from YOLO models
:param num_classes: Total number of classes
:param anchors: YOLO style anchor list for bounding box assignment
:param input_dims: Input dimensions of the image
:param predictions: A list of three tensors with shape (N, 19, 19, 255), (N,38, 38, 255) and (N, 76, 76, 255)
:return: A tensor with the shape (N, num_boxes, 85)
"""
assert len(predictions) == len(anchors)//3, 'anchor numbers does not match prediction.'
if len(predictions) == 3: # assume 3 set of predictions is YOLOv3
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]
elif len(predictions) == 2: # 2 set of predictions is YOLOv3-tiny
anchor_mask = [[3,4,5], [0,1,2]]
else:
raise ValueError('Unsupported prediction length: {}'.format(len(predictions)))
results = []
for i, prediction in enumerate(predictions):
results.append(_yolo3_head(prediction, num_classes, anchors[anchor_mask[i]], input_dims))
return np.concatenate(results, axis=1)
def _yolo3_head(prediction, num_classes, anchors, input_dims):
batch_size = np.shape(prediction)[0]
stride = input_dims[0] // np.shape(prediction)[1]
grid_size = input_dims[0] // stride
num_anchors = len(anchors)
prediction = np.reshape(prediction,
(batch_size, num_anchors * grid_size * grid_size, num_classes + 5))
box_xy = expit(prediction[:, :, :2]) # t_x (box x and y coordinates)
objectness = expit(prediction[:, :, 4]) # p_o (objectness score)
objectness = np.expand_dims(objectness, 2) # To make the same number of values for axis 0 and 1
grid = np.arange(grid_size)
a, b = np.meshgrid(grid, grid)
x_offset = np.reshape(a, (-1, 1))
y_offset = np.reshape(b, (-1, 1))
x_y_offset = np.concatenate((x_offset, y_offset), axis=1)
x_y_offset = np.tile(x_y_offset, (1, num_anchors))
x_y_offset = np.reshape(x_y_offset, (-1, 2))
x_y_offset = np.expand_dims(x_y_offset, 0)
box_xy += x_y_offset
# Log space transform of the height and width
anchors = [(a[0] / stride, a[1] / stride) for a in anchors]
anchors = np.tile(anchors, (grid_size * grid_size, 1))
anchors = np.expand_dims(anchors, 0)
box_wh = np.exp(prediction[:, :, 2:4]) * anchors
# Sigmoid class scores
class_scores = expit(prediction[:, :, 5:])
#class_scores = softmax(prediction[:, :, 5:], axis=-1)
# Resize detection map back to the input image size
box_xy *= stride
box_wh *= stride
# Convert centoids to top left coordinates
box_xy -= box_wh / 2
return np.concatenate([box_xy, box_wh, objectness, class_scores], axis=2)
def yolo3_postprocess_np(yolo_outputs, image_shape, anchors, num_classes, model_image_size, max_boxes=100, confidence=0.1, iou_threshold=0.4):
predictions = yolo3_head(yolo_outputs, anchors, num_classes, input_dims=model_image_size)
boxes, classes, scores = yolo3_handle_predictions(predictions,
max_boxes=max_boxes,
confidence=confidence,
iou_threshold=iou_threshold)
boxes = yolo3_adjust_boxes(boxes, image_shape, model_image_size)
return boxes, classes, scores
def yolo3_handle_predictions(predictions, max_boxes=100, confidence=0.1, iou_threshold=0.4):
boxes = predictions[:, :, :4]
box_confidences = np.expand_dims(predictions[:, :, 4], -1)
box_class_probs = predictions[:, :, 5:]
box_scores = box_confidences * box_class_probs
box_classes = np.argmax(box_scores, axis=-1)
box_class_scores = np.max(box_scores, axis=-1)
pos = np.where(box_class_scores >= confidence)
boxes = boxes[pos]
classes = box_classes[pos]
scores = box_class_scores[pos]
# Boxes, Classes and Scores returned from NMS
n_boxes, n_classes, n_scores = nms_boxes(boxes, classes, scores, iou_threshold, confidence=confidence)
if n_boxes:
boxes = np.concatenate(n_boxes)
classes = np.concatenate(n_classes)
scores = np.concatenate(n_scores)
boxes, classes, scores = filter_boxes(boxes, classes, scores, max_boxes)
return boxes, classes, scores
else:
return [], [], []
def filter_boxes(boxes, classes, scores, max_boxes):
'''
Sort the prediction boxes according to score
and only pick top "max_boxes" ones
'''
# sort result according to scores
sorted_indices = np.argsort(scores)
sorted_indices = sorted_indices[::-1]
nboxes = boxes[sorted_indices]
nclasses = classes[sorted_indices]
nscores = scores[sorted_indices]
# only pick max_boxes
nboxes = nboxes[:max_boxes]
nclasses = nclasses[:max_boxes]
nscores = nscores[:max_boxes]
return nboxes, nclasses, nscores
def soft_nms_boxes(boxes, classes, scores, iou_threshold, confidence=0.1, is_soft=True, use_exp=False, sigma=0.5):
nboxes, nclasses, nscores = [], [], []
for c in set(classes):
# handle data for one class
inds = np.where(classes == c)
b = boxes[inds]
c = classes[inds]
s = scores[inds]
# make a data copy to avoid breaking
# during nms operation
b_nms = copy.deepcopy(b)
c_nms = copy.deepcopy(c)
s_nms = copy.deepcopy(s)
while len(s_nms) > 0:
# pick the max box and store, here
# we also use copy to persist result
i = np.argmax(s_nms, axis=-1)
nboxes.append(copy.deepcopy(b_nms[i]))
nclasses.append(copy.deepcopy(c_nms[i]))
nscores.append(copy.deepcopy(s_nms[i]))
# swap the max line and last line
b_nms[[i,-1],:] = b_nms[[-1,i],:]
c_nms[[i,-1]] = c_nms[[-1,i]]
s_nms[[i,-1]] = s_nms[[-1,i]]
# get box coordinate and area
x = b_nms[:, 0]
y = b_nms[:, 1]
w = b_nms[:, 2]
h = b_nms[:, 3]
areas = w * h
# check IOU
xx1 = np.maximum(x[-1], x[:-1])
yy1 = np.maximum(y[-1], y[-1])
xx2 = np.minimum(x[-1] + w[-1], x[:-1] + w[:-1])
yy2 = np.minimum(y[-1] + h[-1], y[:-1] + h[:-1])
w1 = np.maximum(0.0, xx2 - xx1 + 1)
h1 = np.maximum(0.0, yy2 - yy1 + 1)
inter = w1 * h1
iou = inter / (areas[-1] + areas[:-1] - inter)
# drop the last line since it has been record
b_nms = b_nms[:-1]
c_nms = c_nms[:-1]
s_nms = s_nms[:-1]
if is_soft:
# Soft-NMS
if use_exp:
# score refresh formula:
# score = score * exp(-(iou^2)/sigma)
s_nms = s_nms * np.exp(-(iou * iou) / sigma)
else:
# score refresh formula:
# score = score * (1 - iou) if iou > threshold
depress_mask = np.where(iou > iou_threshold)[0]
s_nms[depress_mask] = s_nms[depress_mask]*(1-iou[depress_mask])
keep_mask = np.where(s_nms >= confidence)[0]
else:
# normal Hard-NMS
keep_mask = np.where(iou <= iou_threshold)[0]
# keep needed box for next loop
b_nms = b_nms[keep_mask]
c_nms = c_nms[keep_mask]
s_nms = s_nms[keep_mask]
# reformat result for output
nboxes = [np.array(nboxes)]
nclasses = [np.array(nclasses)]
nscores = [np.array(nscores)]
return nboxes, nclasses, nscores
def nms_boxes(boxes, classes, scores, iou_threshold, confidence=0.1):
nboxes, nclasses, nscores = [], [], []
for c in set(classes):
inds = np.where(classes == c)
b = boxes[inds]
c = classes[inds]
s = scores[inds]
x = b[:, 0]
y = b[:, 1]
w = b[:, 2]
h = b[:, 3]
areas = w * h
order = s.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = | np.maximum(x[i], x[order[1:]]) | numpy.maximum |
import numpy as np
from sklearn.utils import shuffle as skshuffle
from sklearn.metrics import roc_auc_score
import scipy.sparse as sp
import networkx as nx
# Some Utilities
def get_minibatches(X, mb_size, shuffle=True):
"""
Generate minibatches from given dataset for training.
Params:
-------
X: np.array of M x 3
Contains the triplets from dataset. The entities and relations are
translated to its unique indices.
mb_size: int
Size of each minibatch.
shuffle: bool, default True
Whether to shuffle the dataset before dividing it into minibatches.
Returns:
--------
mb_iter: generator
Example usage:
--------------
mb_iter = get_minibatches(X_train, mb_size)
for X_mb in mb_iter:
// do something with X_mb, the minibatch
"""
minibatches = []
X_shuff = np.copy(X)
if shuffle:
X_shuff = skshuffle(X_shuff)
for i in range(0, X_shuff.shape[0], mb_size):
yield X_shuff[i:i + mb_size]
def sample_negatives(X, n_e):
"""
Perform negative sampling by corrupting head or tail of each triplets in
dataset.
Params:
-------
X: int matrix of M x 3, where M is the (mini)batch size
First column contains index of head entities.
Second column contains index of relationships.
Third column contains index of tail entities.
n_e: int
Number of entities in dataset.
Returns:
--------
X_corr: int matrix of M x 3, where M is the (mini)batch size
Similar to input param X, but at each column, either first or third col
is subtituted with random entity.
"""
M = X.shape[0]
corr = np.random.randint(n_e, size=M)
e_idxs = | np.random.choice([0, 2], size=M) | numpy.random.choice |
import numpy as np
#import bethy_fapar as fapar
class photosynthesis():
def __init__(self,datashape=None):
'''
Class initialisation and setup of parameters
'''
if datashape == None:
self.data = np.zeros([100])
self.Tc = np.ones([100])*25
self.C3 = np.ones([100]).astype(bool)
self.Ipar = (np.arange(100)/100.) * 2000. * 1e-6
self.Lcarbon = np.ones([100]) * 1
self.Rcarbon = np.ones([100]) * 1
self.Scarbon = np.ones([100]) * 1
self.pft = np.array(['C3 grass']*100)
# zero C in K
self.zeroC = 273.15
# gas constant J mol-1 K-1
self.R_gas = 8.314
# oxygen concentration
self.Ox = 0.21 # mol(O2)mol(air)-1
self.O2 = 0.23 # Atmospheric concentration of oxygen (kg O2/kg air)
# energy content of PAR quanta
self.EPAR = 220. # kJmol-1
# ratio of dark respiration to PVM at 25 C
self.FRDC3 = 0.011
self.FRDC4 = 0.042
# scaling for GammaStar
self.GammaStarScale = 1.7e-6
# Effective quantum efficiency C4
self.ALC4 = 0.04
# Curvature parameter (C4)
self.Theta = 0.83
self.molarMassAir_kg = 28.97e-3
self.molarMassCO2_kg = 44.011e-3
self.co2SpecificGravity = self.molarMassCO2_kg/self.molarMassAir_kg
self.variables()
self.defaults()
self.initialise()
def test1(self):
'''
low light, span a temperature range, normal CO2
'''
self.Ipar = np.ones_like(self.data) * 200. * 1e-6
self.co2_ppmv = 390.
self.Tc = np.arange(100) - 30.
self.initialise()
self.defaults()
self.photosynthesis()
import pylab as plt
plt.clf()
plt.plot(self.Tc,self.Wc * 1e6,label='Wc')
plt.plot(self.Tc,self.Wl * 1e6,label='Wl')
plt.plot(self.Tc,self.We * 1e6,label='We')
plt.plot(self.Tc,self.W * 1e6,label='W')
plt.legend()
def photosynthesis(self):
'''
Uses:
self.Tc : canopy (leaf) temperature (C)
self.C3 : array of True ('C3') or False ('C4')
self.Ipar : incident PAR (mol m-2 s-1)
self.Lcarbon : leaf C pool (kg C m-2)
self.Rcarbon : root C pool (kg C m-2)
self.Scarbon : respiring stem C pool (kg C m-2)
'''
self.leafPhotosynthesis()
self.canopyPhotosynthesis()
def variables(self):
'''
Set some items that might be driven from a control file
Generates:
self.theta : mean soil moisture concentration in the root zone,
self.thetac : Critical volumetric SMC (cubic m per cubic m of soil)
self.thetaw : Volumetric wilting point (cubic m per cubic m of soil)
'''
self.thetaw = 0.136328
self.thetac = 0.242433
self.theta = np.ones_like(self.data)
self.m_air = 28.966
self.co2_ppmv = 383.
def initialise(self):
'''
Initialise some items that might be driven from a control file
Uses:
self.data : data sizing array
Generates:
self.theta : mean soil moisture concentration in the root zone,
self.co2c : Canopy level CO2 concentration (kg CO2/kg air).
self.pstar : Surface pressure (Pa)
self.m_co2 : molecular weight of CO2
self.m_air : molecular weight of dry air
'''
self.m_co2 = self.m_air * self.epco2
self.co2_mmr = self.co2_ppmv * self.m_co2 / self.m_air * 1.0e-6
self.co2c = self.co2_mmr*1.
def defaults(self):
'''
Uses:
self.C3 : array of True ('C3') or False ('C4')
self.Tc : canopy (leaf) temperature (C)
Generates:
self.data : data sizing array
self.epco2 : Ratio of molecular weights of CO2 and dry air.
self.epo2 : Ratio of molecular weights of O2 and dry air.
self.Oa : Partial pressume of O2 in the atmosphere
self.ne : constant for Vcmax (mol CO2 m-2 s-1 kg C (kg N)-1)
self.Q10_leaf: Q10 dependence leaf
self.Q10_rs : Q10 dependence rs
self.Q10_Kc : Q10 dependence Kc: CO2
self.Q10_Ko : Q10 dependence Ko: O2
self.Kc : Michaelis-Menten paramemeter for CO2
self.Ko : Michaelis-Menten paramemeter for O2
self.beta1 : colimitation coefficients
self.beta2 : colimitation coefficients
self.nl : leaf nitrogen
self.Gamma : CO2 compensation point in the absence of mitochindrial
respiration (Pa)
self.tau : Rubisco specificity for CO2 relative to O2
self.kappao3 : ratio of leaf resistance for O3 to leaf resistance to water vapour
self.Tupp : PFT-specific parameter ranges: upper (C)
self.Tlow : PFT-specific parameter ranges: lower (C)
self.Fo3_crit: critical value of Ozone stress limitation
self.a : Ozone factor
self.k : PAR extinction coefficient
self.alpha : quantum efficiency mol CO2 [mol PAR photons]-1
self.omega : leaf PAR scattering coefficient
self.fdr : dark respiration coefficient
self.rg : growth respiration coefficient
self.n0 : top leaf N concentration (kg N [kg C]-1)
self.nrl : ratio of N conc in roots and leaves
self.nsl : ratio of N conc in stems and leaves
self.Vcmax25 : maximum rate of carboxylation of Rubisco (mol CO2 m-2 s-1)
at 25 C
self.Vcmax : maximum rate of carboxylation of Rubisco (mol CO2 m-2 s-1)
self.fc : temperature factors for Vcmax
self.aws : ratio of total stem C to respiring stem C
self.gamma0 : minimum leaf turnover rate (360 days-1)
self.dm : rate of change of turnover with soil moisture
stress (360 days-1)
self.dt : rate of change of turnover with T (360 days K)-1
self.moff : threshold soil mositure stress
self.toff : threshold temperature (K)
self.gammap : rate of leaf growth (360 days)-1
self.gammav : disturbance rate (360 days-1)
self.gammar : root biomass turnover rate (360 days-1)
self.gammaw : woody biomass turnover rate (360 days-1)
self.Lmax : maximum LAI
self.Lmin : minimum LAI
self.sigmal : specific leaf density (kg C m-2 per unit LAI)
self.awl : allometric coefficient
self.bwl : allometric exponent
self.etasl : ratio of live stemwood to LAI * height
self.dt : time interval
self.ratio : Ratio of leaf resistance for CO2 to leaf resistance for H2O.
self.glmin : minimum stomatal conductance
'''
self.dt = 1.0
self.data = np.zeros_like(self.C3).astype(float)
self.glmin = 1.0e-10
self.pstar = 101e3
self.epco2 = 1.5194
self.epo2 = 1.106
self.ratio=1.6
#==============Jules/ triffid parameters
# default self.Q10_leaf, self.Q10_rs etc.
self.Q10_leaf = 2.0
self.Q10_rs = 0.57
self.Q10_Kc = 2.1
self.Q10_Ko = 1.2
# leaf nitrogen/Vcmax terms
# default for self.ne mol CO2 m-2 s-1 kg C (kg N)-1
self.n0 = np.zeros_like(self.data) + 0.060
self.n0[self.pft == 'Broadleaf tree'] = 0.046
self.n0[self.pft == 'Needleleaf tree'] = 0.033
self.n0[self.pft == 'C3 grass'] = 0.073
self.ne = 0.0008*np.ones_like(self.data)
self.ne[~self.C3] = 0.0004
self.nl = self.n0*np.ones_like(self.data)
# CO2 compensation point
self.Oa = 0.21 * self.pstar # assuming 21% of atmosphere is O2
self.tau = 2600.*self.Q10_rs**(0.1*(self.Tc-25.))
self.Gamma = (self.Oa/(2.*self.tau))*np.ones_like(self.data)
self.Gamma[~self.C3] = 0.
# colimitation coefficients:
self.beta1 = 0.83
self.beta2 = 0.93
# use larger values here
self.beta1 = 0.999
self.beta2 = 0.999
# ratio of leaf resistance for O3 to leaf resistance to water vapour
self.kappao3 = 1.67
# leaf T limits (C)
self.Tupp = np.zeros_like(self.data) + 36.0
self.Tlow = np.zeros_like(self.data)
self.Tlow[self.pft == 'Needleleaf tree'] = -10.0
self.Tlow[self.pft == 'C4 grass'] = 13.0
self.Tupp[self.pft == 'Needleleaf tree'] = 26.0
self.Tupp[self.pft == 'C4 grass'] = 45.0
self.Vcmax25 = self.ne * self.nl
self.ft = self.Q10_leaf ** (0.1 * (self.Tc-25.))
self.Vcmax = self.Vcmax25 * self.ft / ((1.0+np.exp(0.3*(self.Tc-self.Tupp)))\
*(1.0+np.exp(0.3*(self.Tlow-self.Tc))))
# O3 terms
self.Fo3_crit = np.zeros_like(self.data) + 1.6
self.Fo3_crit[self.pft == 'C3 grass'] = 5.0
self.Fo3_crit[self.pft == 'C4 grass'] = 5.0
self.a = np.zeros_like(self.data) + 0.04
self.a[self.pft == 'Needleleaf tree'] = 0.02
self.a[self.pft == 'C3 grass'] = 0.25
self.a[self.pft == 'C4 grass'] = 0.13
self.a[self.pft == 'Shrub'] = 0.03
self.k = np.zeros_like(self.data) + 0.5
self.alpha = np.zeros_like(self.data) + 0.08
self.alpha[self.pft == 'C3 grass'] = 0.12
self.alpha[self.pft == 'C4 grass'] = 0.06
self.omega = np.zeros_like(self.data) + 0.15
self.omega[self.pft == 'C4 grass'] = 0.17
self.fdr = np.zeros_like(self.data) + 0.015
self.fdr[self.pft == 'C4 grass'] = 0.025
self.rg = np.zeros_like(self.data) + 0.25
self.nrl = | np.zeros_like(self.data) | numpy.zeros_like |
from atm import reference
import numpy as np
from utils import geo
def calc_atm_loss(freq_hz, gas_path_len_m=0, rain_path_len_m=0, cloud_path_len_m=0, atmosphere=None, pol_angle=0,
el_angle=0):
"""
Ref:
ITU-R P.676-11(09/2016) Attenuation by atmospheric gases
ITU-R P.840-6 (09/2013) Attenuation due to clouds and fog
ITU-R P.838-3 (03/2005) Specific attenuation model for rain for use in
prediction methods
Ported from MATLAB Code
<NAME>
16 March 2021
:param freq_hz: Frequency [Hz]
:param gas_path_len_m: Path length for gas loss [m] [default = 0]
:param rain_path_len_m: Path length for rain loss [m] [default = 0]
:param cloud_path_len_m: Path length for cloud loss [m] [default = 0]
:param atmosphere: atm.reference.Atmosphere object (if not provided, standard atmosphere will be generated)
:param pol_angle: Polarization angle [radians], 0 for Horizontal, pi/2 for Vertical, between 0 and pi for slant.
[default = 0]
:param el_angle: Elevation angle of the path under test [default = 0]
:return: loss along the path due to atmospheric absorption [dB, one-way]
"""
if atmosphere is None:
# Default atmosphere is the standard atmosphere at sea level, with no
# fog/clouds or rain.
atmosphere = reference.get_standard_atmosphere(0)
# Compute loss coefficients
if np.any(gas_path_len_m > 0):
coeff_ox, coeff_water = get_gas_loss_coeff(freq_hz, atmosphere.press, atmosphere.water_vapor_press,
atmosphere.temp)
coeff_gas = coeff_ox + coeff_water
else:
coeff_gas = 0
if np.any(rain_path_len_m > 0) and np.any(atmosphere.rainfall) > 0:
coeff_rain = get_rain_loss_coeff(freq_hz, pol_angle, el_angle, atmosphere.rainfall)
else:
coeff_rain = 0
if np.any(cloud_path_len_m > 0) and np.any(atmosphere.cloud_dens) > 0:
coeff_cloud = get_fog_loss_coeff(freq_hz, atmosphere.cloud_dens, atmosphere.temp)
else:
coeff_cloud = 0
# Compute loss components
loss_gass_db = coeff_gas * gas_path_len_m / 1.0e3
loss_rain_db = coeff_rain * rain_path_len_m / 1.0e3
loss_cloud_db = coeff_cloud * cloud_path_len_m / 1.0e3
return loss_gass_db + loss_rain_db + loss_cloud_db
def calc_zenith_loss(freq_hz, alt_start_m=0, zenith_angle_deg=0):
"""
# Computes the cumulative loss from alt_start [m] to zenith (100 km
# altitude), for the given frequencies (freq) in Hz and angle from zenith
# zenith_angle, in degrees.
#
# Does not account for refraction of the signal as it travels through the
# atmosphere; assumes a straight line propagation at the given zenith
# angle.
Ported from MATLAB Code
<NAME>
17 March 2021
:param freq_hz: Carrier frequency [Hz]
:param alt_start_m: Starting altitude [m]
:param zenith_angle_deg: Angle between line of sight and zenith (straight up) [deg]
:return zenith_loss: Cumulative loss to the edge of the atmosphere [dB]
:return zenith_loss_o: Cumulative loss due to dry air [dB]
:return zenith_loss_w: Cumulative loss due to water vapor [dB]
"""
# Add a new first dimension to all the inputs (if they're not scalar)
if np.size(freq_hz) > 1:
freq_hz = np.expand_dims(freq_hz, axis=0)
if np.size(alt_start_m) > 1:
alt_start_m = np.expand_dims(alt_start_m, axis=0)
if np.size(zenith_angle_deg) > 1:
zenith_angle_deg = np.expand_dims(zenith_angle_deg, axis=0)
# Make Altitude Layers
# From ITU-R P.676-11(12/2017), layers should be set at exponential intervals
num_layers = 922 # Used for ceiling of 100 km
layer_delta = .0001*np.exp(np.arange(num_layers)/100) # Layer thicknesses [km], eq 21
layer_delta = np.reshape(layer_delta, (num_layers, 1))
layer_top = np.cumsum(layer_delta) # [km]
layer_bottom = layer_top - layer_delta # [km]
layer_mid = (layer_top+layer_bottom)/2
# Drop layers below alt_start
alt_start_km = alt_start_m / 1e3
layer_mask = layer_top >= min(alt_start_km)
layer_bottom = layer_bottom[layer_mask]
layer_mid = layer_mid[layer_mask]
layer_top = layer_top[layer_mask]
# Lookup standard atmosphere for each band
atmosphere = reference.get_standard_atmosphere(layer_mid*1e3)
# Compute loss coefficient for each band
ao, aw = get_gas_loss_coeff(freq_hz, atmosphere.P, atmosphere.e, atmosphere.T)
# Account for off-nadir paths and partial layers
el_angle_deg = 90 - zenith_angle_deg
layer_delta_eff = geo.compute_slant_range(max(layer_bottom, alt_start_km), layer_top, el_angle_deg, True)
np.place(layer_delta_eff, layer_top <= alt_start_km, 0) # Set all layers below alt_start_km to zero
# Zenith Loss by Layer (loss to pass through each layer)
zenith_loss_by_layer_oxygen = ao*layer_delta_eff
zenith_loss_by_layer_water = aw*layer_delta_eff
# Cumulative Zenith Loss
# Loss from ground to the bottom of each layer
zenith_loss_o = np.squeeze(np.sum(zenith_loss_by_layer_oxygen, axis=0))
zenith_loss_w = np.squeeze(np.sum(zenith_loss_by_layer_water, axis=0))
zenith_loss = zenith_loss_o + zenith_loss_w
return zenith_loss, zenith_loss_o, zenith_loss_w
def get_rain_loss_coeff(freq_hz, pol_angle_rad, el_angle_rad, rainfall_rate):
"""
Computes the rain loss coefficient given a frequency, polarization,
elevation angle, and rainfall rate, according to ITU-R P.838-3, 2005.
Ported from MATLAB Code
<NAME>
16 March 2021
:param freq_hz: Propagation Frequency [Hz]
:param pol_angle_rad: Polarization angle [radians], 0 = Horizontal and pi/2 is Vertical. Slanted polarizations will
have a value 0 and pi.
:param el_angle_rad: Propagation path elevation angle [radians]
:param rainfall_rate: Rainfall rate [mm/hr]
:return: Loss coefficient [dB/km] caused by rain.
"""
# Add a new first dimension to all the inputs (if they're not scalar)
if np.size(freq_hz) > 1:
freq_hz = np.expand_dims(freq_hz, axis=0)
if np.size(pol_angle_rad) > 1:
pol_angle_rad = np.expand_dims(pol_angle_rad, axis=0)
if np.size(el_angle_rad) > 1:
el_angle_rad = np.expand_dims(el_angle_rad, axis=0)
if np.size(rainfall_rate) > 1:
rainfall_rate = np.expand_dims(rainfall_rate, axis=0)
# Coeffs for kh
a = np.array([-5.3398, -0.35351, -0.23789, -0.94158])
b = np.array([-0.10008, 1.26970, 0.86036, 0.64552])
c = np.array([1.13098, 0.454, 0.15354, 0.16817])
m = -0.18961
ck = 0.71147
log_kh = np.squeeze(np.sum(a * np.exp(-((np.log10(freq_hz / 1e9) - b) / c) ** 2), axis=0)
+ m * np.log10(freq_hz / 1e9) + ck)
kh = 10**log_kh
# Coeffs for kv
a = np.array([-3.80595, -3.44965, -0.39902, 0.50167])
b = np.array([0.56934, -0.22911, 0.73042, 1.07319])
c = np.array([0.81061, 0.51059, 0.11899, 0.27195])
m = -0.16398
ck = 0.63297
log_kv = np.squeeze(np.sum(a * np.exp(-((np.log10(freq_hz / 1e9) - b) / c) ** 2), axis=0)
+ m * np.log10(freq_hz / 1e9) + ck)
kv = 10**log_kv
# Coeffs for ah
a = np.array([-0.14318, 0.29591, 0.32177, -5.37610, 16.1721])
b = np.array([1.82442, 0.77564, 0.63773, -0.96230, -3.29980])
c = np.array([-0.55187, 0.19822, 0.13164, 1.47828, 3.43990])
m = 0.67849
ca = -1.95537
ah = np.squeeze(np.sum(a * np.exp(-((np.log10(freq_hz / 1e9) - b) / c) ** 2), axis=0)
+ m * np.log10(freq_hz / 1e9) + ca)
# Coeffs for av
a = np.array([-0.07771, 0.56727, -0.20238, -48.2991, 48.5833])
b = np.array([2.33840, 0.95545, 1.14520, 0.791669, 0.791459])
c = np.array([-0.76284, 0.54039, 0.26809, 0.116226, 0.116479])
m = -0.053739
ca = 0.83433
av = np.squeeze(np.sum(a * np.exp(-((np.log10(freq_hz / 1e9) - b) / c) ** 2), axis=0)
+ m * np.log10(freq_hz / 1e9) + ca)
# Account for Polarization and Elevation Angles
k = .5*(kh + kv + (kh-kv) * np.cos(el_angle_rad) ** 2 * np.cos(2 * pol_angle_rad))
a = (kh * ah + kv * av + (kh*ah-kv*av) * np.cos(el_angle_rad) ** 2 * np.cos(2 * pol_angle_rad)) / (2 * k)
return k*rainfall_rate**a
def get_fog_loss_coeff(f, cloud_dens, temp_k=None):
"""
Implement the absorption loss coefficient due to clouds and fog, as a function of the frequency, cloud density,
and temperature, according to ITU-R P.840-7 (2017).
Ported from MATLAB Code
<NAME>
16 March 2021
:param f: Propagation Frequencies [Hz]
:param cloud_dens: Cloud/fog density [g/m^3]
:param temp_k: Atmospheric temperature [K]
:return: Loss coefficient [dB/km]
"""
if temp_k is None:
atmosphere = reference.get_standard_atmosphere()
temp_k = atmosphere.temp
# Cloud Liquid Water Specific Attenuation Coefficient
theta = 300 / temp_k
e0 = 77.66+103.3*(theta-1)
e1 = 0.0671*e0
e2 = 3.52
fp = 20.20-146*(theta-1)+316*(theta-1)**2
fs = 39.8*fp
e_prime = (e0-e1)/(1+((f/1e9)/fp)**2)+(e1-e2)/(1+((f/1e9)/fs)**2)+e2
e_prime_prime = (f/1e9)*(e0-e1)/(fp*(1+(f/1e9/fp)**2))+((f/1e9)*(e1-e2)/(fs*(1+((f/1e9)/fs)**2)))
eta = (2+e_prime)/e_prime_prime
kl = .819*(f/1e9)/(e_prime_prime*(1+eta**2))
# Cloud attenuation
return kl * cloud_dens
def get_gas_loss_coeff(freq_hz, press, water_vapor_press, temp):
"""
Implement the atmospheric loss coefficients from Annex 1 of ITU-R P.676-11 (12/2017)
If array inputs are specified, then array results are given for alphaO and alphaW.
:param freq_hz: Propagation Frequencies [Hz]
:param press: Dry Air Pressure [hPa]
:param water_vapor_press: Water Vapor Partial Pressure [hPa]
:param temp: Temperature [K]
:return coeff_ox: Gas loss coefficient due to oxygen [dB/km]
:return coeff_water: Gas loss coefficient due to water vapor [dB/km]
"""
# Determine largest dimension in use
if np.size(freq_hz) > 1:
freq_hz = np.expand_dims(freq_hz, axis=0)
if np.size(press) > 1:
press = | np.expand_dims(press, axis=0) | numpy.expand_dims |
import numpy as np
import argparse
import time
from safe_agents.policies import MLP
from open_safety.envs.balance_bot_env import BalanceBotEnv
#from open_safety_gym.envs.kart_env import KartEnv
#from open_safety_gym.envs.hoverboard_env import HoverboardEnv
import skimage
import skimage.io as sio
def get_fitness(agent, env, epds, get_cost=True, max_steps=1000, save_frames=False):
#epd_rewards = []
#epd_costs = []
total_steps = 0
sum_reward = 0
sum_cost = 0
exp_id = str(int(time.time()))
for epd in range(epds):
steps = 0
done = False
obs = env.reset()
while not done and steps < max_steps:
action = agent.forward(obs)
if len(action.shape) > 1:
action = action.squeeze()
obs, reward, done, info = env.step(action)
sum_reward += reward
sum_cost += info["cost"]
steps += 1
if save_frames:
img = env.render()[2]
sio.imsave("./frames/id{}_epd{}_step{}.png".format(exp_id, epd, steps), img)
total_steps += steps
sum_reward /= max_steps * epds
sum_cost /= max_steps * epds
return sum_reward, sum_cost, total_steps
def get_elite_mean(population, reward, cost, cost_constraint=2.5, pure_rewards=None, rh=False):
if not(rh):
adjusted_cost = [max([cost_constraint, elem]) for elem in cost]
cost_fitness_agent = [[cost, fit, agent.parameters]
for a_cost, cost, fit, agent in \
sorted(zip(adjusted_cost, cost, pure_rewards, population),\
key = lambda trip: [-trip[0], trip[2]], reverse=True)]
fitness = [elem[1] for elem in cost_fitness_agent]
cost = [elem[0] for elem in cost_fitness_agent]
population = [elem[2] for elem in cost_fitness_agent]
else:
fitness_agent = [[fit, agent.parameters, my_cost, r] \
for fit, agent, my_cost, r in \
sorted(zip(reward, population, cost, pure_rewards),\
key = lambda trip: [trip[0]], reverse=True)
]
fitness = [elem[0] for elem in fitness_agent]
cost = [elem[2] for elem in fitness_agent]
population = [elem[1] for elem in fitness_agent]
my_rewards = [elem[3] for elem in fitness_agent]
keep = int(0.125 * len(population))
elite_pop = population[:keep]
elite_cost = cost[:keep]
elite_fitness = fitness[:keep]
if rh:
elite_rewards = my_rewards[:keep]
else:
elite_rewards = fitness[:keep]
print("population mean cost, rewards: {:.3e}, {:.3e}".format(\
np.mean(cost), np.mean(pure_rewards)))
print("elite mean cost, rewards: {:.3e}, {:.3e}".format(\
np.mean(elite_cost), np.mean(elite_rewards)))
param_sum = elite_pop[0]
for agent_idx in range(1,keep):
param_sum += elite_pop[agent_idx]
param_means = param_sum / keep
return [param_means, np.mean(cost), | np.mean(elite_cost) | numpy.mean |
"""
Created on Thu Jan 26 17:04:11 2017
@author: <NAME>, <EMAIL>
"""
#%matplotlib inline
import numpy as np
import pandas as pd
import dicom
import os
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
import scipy.ndimage # added for scaling
import cv2
import time
import glob
from skimage import measure, morphology, segmentation
import SimpleITK as sitk
RESIZE_SPACING = [2,2,2] # z, y, x (x & y MUST be the same)
RESOLUTION_STR = "2x2x2"
img_rows = 448
img_cols = 448 # global values
DO_NOT_USE_SEGMENTED = True
#STAGE = "stage1"
STAGE_DIR_BASE = "../input/%s/" # on one cluster we had input_shared
LUNA_MASKS_DIR = "../luna/data/original_lung_masks/"
luna_subset = 0 # initial
LUNA_BASE_DIR = "../luna/data/original_lungs/subset%s/" # added on AWS; data as well
LUNA_DIR = LUNA_BASE_DIR % luna_subset
CSVFILES = "../luna/data/original_lungs/CSVFILES/%s"
LUNA_ANNOTATIONS = CSVFILES % "annotations.csv"
LUNA_CANDIDATES = CSVFILES % "candidates.csv"
# Load the scans in given folder path (loads the most recent acquisition)
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#slices.sort(key = lambda x: int(x.InstanceNumber))
acquisitions = [x.AcquisitionNumber for x in slices]
vals, counts = np.unique(acquisitions, return_counts=True)
vals = vals[::-1] # reverse order so the later acquisitions are first (the np.uniques seems to always return the ordered 1 2 etc.
counts = counts[::-1]
## take the acquistions that has more entries; if these are identical take the later entrye
acq_val_sel = vals[np.argmax(counts)]
##acquisitions = sorted(np.unique(acquisitions), reverse=True)
if len(vals) > 1:
print ("WARNING ##########: MULTIPLE acquisitions & counts, acq_val_sel, path: ", vals, counts, acq_val_sel, path)
slices2= [x for x in slices if x.AcquisitionNumber == acq_val_sel]
slices = slices2
## ONE path includes 2 acquisitions (2 sets), take the latter acquiisiton only whihch cyupically is better than the first/previous ones.
## example of the '../input/stage1/b8bb02d229361a623a4dc57aa0e5c485'
#slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v 8, BUG should be float
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def get_3d_data_slices(slices): # get data in Hunsfield Units
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images )
image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0
# Convert to Hounsfield units (HU)
# The intercept is usually -1024
for slice_number in range(len(slices)): # from v 8
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1: # added 16 Jan 2016, evening
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
### slope can differ per slice -- so do it individually (case in point black_tset, slices 95 vs 96)
### Changes/correction - 31.01.2017
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
MARKER_INTERNAL_THRESH = -400
MARKER_FRAME_WIDTH = 9 # 9 seems OK for the half special case ...
def generate_markers(image):
#Creation of the internal Marker
useTestPlot = False
if useTestPlot:
timg = image
plt.imshow(timg, cmap='gray')
plt.show()
add_frame_vertical = True
if add_frame_vertical: # add frame for potentially closing the lungs that touch the edge, but only vertically
fw = MARKER_FRAME_WIDTH # frame width (it looks that 2 is the minimum width for the algorithms implemented here, namely the first 2 operations for the marker_internal)
xdim = image.shape[1]
#ydim = image.shape[0]
img2 = np.copy(image)
#y3 = ydim // 3
img2 [:, 0] = -1024
img2 [:, 1:fw] = 0
img2 [:, xdim-1:xdim] = -1024
img2 [:, xdim-fw:xdim-1] = 0
marker_internal = img2 < MARKER_INTERNAL_THRESH
else:
marker_internal = image < MARKER_INTERNAL_THRESH # was -400
useTestPlot = False
if useTestPlot:
timg = marker_internal
plt.imshow(timg, cmap='gray')
plt.show()
correct_edges2 = False ## NOT a good idea - no added value
if correct_edges2:
marker_internal[0,:] = 0
marker_internal[:,0] = 0
#marker_internal[:,1] = True
#marker_internal[:,2] = True
marker_internal[511,:] = 0
marker_internal[:,511] = 0
marker_internal = segmentation.clear_border(marker_internal, buffer_size=0)
marker_internal_labels = measure.label(marker_internal)
areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
external_a = ndimage.binary_dilation(marker_internal, iterations=10) # was 10
external_b = ndimage.binary_dilation(marker_internal, iterations=55) # was 55
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
# Some of the starting Code is taken from ArnavJain, since it's more readable then my own
def generate_markers_3d(image):
#Creation of the internal Marker
marker_internal = image < -400
marker_internal_labels = np.zeros(image.shape).astype(np.int16)
for i in range(marker_internal.shape[0]):
marker_internal[i] = segmentation.clear_border(marker_internal[i])
marker_internal_labels[i] = measure.label(marker_internal[i])
#areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas = [r.area for i in range(marker_internal.shape[0]) for r in measure.regionprops(marker_internal_labels[i])]
for i in range(marker_internal.shape[0]):
areas = [r.area for r in measure.regionprops(marker_internal_labels[i])]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels[i]):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[i, coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
# 3x3 structuring element with connectivity 1, used by default
struct1 = ndimage.generate_binary_structure(2, 1)
struct1 = struct1[np.newaxis,:,:] # expand by z axis .
external_a = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=10)
external_b = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=55)
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
BINARY_CLOSING_SIZE = 7 #was 7 before final; 5 for disk seems sufficient - for safety let's go with 6 or even 7
def seperate_lungs(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct)
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512))) ### was -2000
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def rescale_n(n,reduce_factor):
return max( 1, int(round(n / reduce_factor)))
def seperate_lungs_cv2(image): # for increased speed
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#image_size = image.shape[0]
reduce_factor = 512 / image.shape[0]
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
useTestPlot = False
if useTestPlot:
timg = sobel_gradient
plt.imshow(timg, cmap='gray')
plt.show()
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
if useTestPlot:
timg = marker_external
plt.imshow(timg, cmap='gray')
plt.show()
#Reducing the image created by the Watershed algorithm to its outline
#wsize = rescale_n(3,reduce_factor) # THIS IS TOO SMALL, dynamically adjusting the size for the watersehed algorithm
outline = ndimage.morphological_gradient(watershed, size=(3,3)) # original (3,3), (wsize, wsize) is too small to create an outline
outline = outline.astype(bool)
outline_u = outline.astype(np.uint8) #added
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
use_reduce_factor = True
if use_reduce_factor:
blackhat_struct = ndimage.iterate_structure(blackhat_struct, rescale_n(8,reduce_factor)) # dyanmically adjust the number of iterattions; original was 8
else:
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct_cv2 = blackhat_struct.astype(np.uint8)
#Perform the Black-Hat
#outline += ndimage.black_tophat(outline, structure=blackhat_struct) # original slow
#outline1 = outline + (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool)
#outline2 = outline + ndimage.black_tophat(outline, structure=blackhat_struct)
#np.array_equal(outline1,outline2) # True
outline += (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool) # fats
if useTestPlot:
timg = outline
plt.imshow(timg, cmap='gray')
plt.show()
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
if useTestPlot:
timg = lungfilter
plt.imshow(timg, cmap='gray')
plt.show()
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure2 = morphology.disk(2) # used to fill the gaos/holes close to the border (otherwise the large sttructure would create a gap by the edge)
if use_reduce_factor:
structure3 = morphology.disk(rescale_n(BINARY_CLOSING_SIZE,reduce_factor)) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
else:
structure3 = morphology.disk(BINARY_CLOSING_SIZE) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
##lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, ORIGINAL iterations=3) # was structure=np.ones((5,5))
lungfilter2 = ndimage.morphology.binary_closing(lungfilter, structure=structure2, iterations=3) # ADDED
lungfilter3 = ndimage.morphology.binary_closing(lungfilter, structure=structure3, iterations=3)
lungfilter = np.bitwise_or(lungfilter2, lungfilter3)
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
#image.shape
#segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512)).astype(np.int16)) # was -2000 someone suggested 30
segmented = np.where(lungfilter == 1, image, -2000*np.ones(image.shape).astype(np.int16)) # was -2000 someone suggested 30
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def seperate_lungs_3d(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers_3d(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, axis=2)
sobel_filtered_dy = ndimage.sobel(image, axis=1)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(1,3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct = blackhat_struct[np.newaxis,:,:]
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct) # very long time
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
structure = structure[np.newaxis,:,:]
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones(marker_internal.shape))
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def get_slice_location(dcm):
return float(dcm[0x0020, 0x1041].value)
def thru_plane_position(dcm):
"""Gets spatial coordinate of image origin whose axis
is perpendicular to image plane.
"""
orientation = tuple((float(o) for o in dcm.ImageOrientationPatient))
position = tuple((float(p) for p in dcm.ImagePositionPatient))
rowvec, colvec = orientation[:3], orientation[3:]
normal_vector = np.cross(rowvec, colvec)
slice_pos = np.dot(position, normal_vector)
return slice_pos
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
spacing = np.array(list(spacing))
#scan[2].SliceThickness
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') ### early orig modified
return image, new_spacing
def segment_all(stage, part=0, processors=1, showSummaryPlot=True): # stage added to simplify the stage1 and stage2 calculations
count = 0
STAGE_DIR = STAGE_DIR_BASE % stage
folders = glob.glob(''.join([STAGE_DIR,'*']))
if len(folders) == 0:
print ("ERROR, check directory, no folders found in: ", STAGE_DIR )
for folder in folders:
count += 1
if count % processors == part: # do this part in this process, otherwise skip
path = folder
slices = load_scan(path)
image_slices = get_3d_data_slices(slices)
#mid = len(image_slices) // 2
#img_sel = mid
useTestPlot = False
if useTestPlot:
print("Shape before segmenting\t", image_slices.shape)
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
start = time.time()
resampleImages = True
if resampleImages:
image_resampled, spacing = resample(image_slices, slices, RESIZE_SPACING) # let's start wkith this small resolutuion for workign our the system (then perhaps 2, 0.667, 0.667)
print("Shape_before_&_after_resampling\t", image_slices.shape,image_resampled.shape)
if useTestPlot:
plt.imshow(image_slices[image_slices.shape[0]//2], cmap=plt.cm.bone)
plt.show()
plt.imshow(image_resampled[image_resampled.shape[0]//2], cmap=plt.cm.bone)
np.max(image_slices)
np.max(image_resampled)
np.min(image_slices)
np.min(image_resampled)
plt.show()
image_slices = image_resampled
shape = image_slices.shape
l_segmented = np.zeros(shape).astype(np.int16)
l_lungfilter = np.zeros(shape).astype(np.bool)
l_outline = np.zeros(shape).astype(np.bool)
l_watershed = np.zeros(shape).astype(np.int16)
l_sobel_gradient = np.zeros(shape).astype(np.float32)
l_marker_internal = np.zeros(shape).astype(np.bool)
l_marker_external = np.zeros(shape).astype(np.bool)
l_marker_watershed = np.zeros(shape).astype(np.int16)
# start = time.time()
i=0
for i in range(shape[0]):
l_segmented[i], l_lungfilter[i], l_outline[i], l_watershed[i], l_sobel_gradient[i], l_marker_internal[i], l_marker_external[i], l_marker_watershed[i] = seperate_lungs_cv2(image_slices[i])
print("Rescale & Seg time, and path: ", ((time.time() - start)), path )
if useTestPlot:
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(l_segmented.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
img_sel_i = shape[0] // 2
# Show some slice in the middle
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
path_rescaled = path.replace(stage, ''.join([stage, "_", RESOLUTION_STR]), 1)
path_segmented = path.replace(stage, ''.join([stage, "_segmented_", RESOLUTION_STR]), 1)
path_segmented_crop = path.replace(stage, ''.join([stage, "_segmented_", RESOLUTION_STR, "_crop"]), 1)
np.savez_compressed (path_rescaled, image_slices)
np.savez_compressed (path_segmented, l_segmented)
mask = l_lungfilter.astype(np.int8)
regions = measure.regionprops(mask) # this measures the largest region and is a bug when the mask is not the largest region !!!
bb = regions[0].bbox
#print(bb)
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
dx = 0 # could be reduced
## have to reduce dx as for istance at least image the lungs stretch right to the border evebn without cropping
## namely for '../input/stage1/be57c648eb683a31e8499e278a89c5a0'
crop_max_ratio_z = 0.6 # 0.8 is to big make_submit2(45, 1)
crop_max_ratio_y = 0.4
crop_max_ratio_x = 0.6
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
mask_shape= mask.shape
image_shape = l_segmented.shape
mask_volume = zlen*ylen*zlen /(mask_shape[0] * mask_shape[1] * mask_shape[2])
mask_volume_thresh = 0.08 # anything below is too small (maybe just one half of the lung or something very small0)
mask_volume_check = mask_volume > mask_volume_thresh
# print ("Mask Volume: ", mask_volume )
### DO NOT allow the mask to touch x & y ---> if it does it is likely a wrong one as for:
## folders[3] , path = '../input/stage1/9ba5fbcccfbc9e08edcfe2258ddf7
maskOK = False
if bxy_min >0 and bxy_max < 512 and mask_volume_check and zlen/mask_shape[0] > crop_max_ratio_z and ylen/mask_shape[1] > crop_max_ratio_y and xlen/mask_shape[2] > crop_max_ratio_x:
## square crop and at least dx elements on both sides on x & y
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
if bxy_min == 0 or bxy_max == 512:
# Mask to bigg, auto-correct
print("The following mask likely too big, autoreducing by:", dx)
bxy_min = np.max((bxy_min, dx))
bxy_max = np.min ((bxy_max, mask_shape[1] - dx))
image = l_segmented[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
mask = mask[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
#maskOK = True
print ("Shape, cropped, bbox ", mask_shape, mask.shape, bb)
elif bxy_min> 0 and bxy_max < 512 and mask_volume_check and zlen/mask.shape[0] > crop_max_ratio_z:
## cut on z at least
image = l_segmented[bb[0]:bb[3], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[bb[0]:bb[3], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask too small, NOT auto-cropping x-y: shape, cropped, bbox, ratios, violume:", mask_shape, image.shape, bb, path, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
else:
image = l_segmented[0:mask_shape[0], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[0:mask_shape[0], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask wrong, NOT auto-cropping: shape, cropped, bbox, ratios, volume:", mask_shape, image.shape, bb, path, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
if showSummaryPlot:
img_sel_i = shape[0] // 2
# Show some slice in the middle
useSeparatePlots = False
if useSeparatePlots:
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
else:
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(image_slices[img_sel_i],cmap=plt.cm.bone)
ax[1].imshow(l_segmented[img_sel_i],cmap=plt.cm.bone)
plt.show()
# Show some slice in the middle
#plt.imshow(image[image.shape[0] // 2], cmap='gray') # don't show it for simpler review
#plt.show()
np.savez_compressed(path_segmented_crop, image)
#print("Mask count: ", count)
#print ("Shape: ", image.shape)
return part, processors, count
# the following 3 functions to read LUNA files are from: https://www.kaggle.com/arnavkj95/data-science-bowl-2017/candidate-generation-and-luna16-preprocessing/notebook
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
'''
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
#image = lung_img
#spacing = new_spacing
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
#get middel x-,y-, and z-worldcoordinate of the nodule
#radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% ....
radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net .
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#x = y = z = -2
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
'''
This function takes the path to a '.mhd' file as input and
is used to create the nodule masks and segmented lungs after
rescaling to 1mm size in all directions. It saved them in the .npz
format. It also takes the list of nodule locations in that CT Scan as
input.
'''
def load_scans_masks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
sids = []
scans = []
masks = []
cnt = 0
skipped = 0
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
#useAll = True
if (len(cands) > 0 or useAll):
sids.append(seriesuid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask = mask_z['arr_0']
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids
def load_scans_masks_or_blanks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
candidates = pd.read_csv(LUNA_CANDIDATES)
candidates_false = candidates[candidates["class"] == 0] # only select the false candidates
candidates_true = candidates[candidates["class"] == 1] # only select the false candidates
sids = []
scans = []
masks = []
blankids = [] # class/id whether scan is with nodule or without, 0 - with, 1 - without
cnt = 0
skipped = 0
#file=files[7]
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
ctrue = candidates_true[seriesuid == candidates_true.seriesuid]
cfalse = candidates_false[seriesuid == candidates_false.seriesuid]
#useAll = True
blankid = 1 if (len(cands) == 0 and len(ctrue) == 0 and len(cfalse) > 0) else 0
skip_nodules_entirely = False # was False
use_only_nodules = False # was True
if skip_nodules_entirely and blankid ==0:
## manual switch to generate extra data for the corrupted set
print("Skipping nodules (skip_nodules_entirely) ", seriesuid)
skipped += 1
elif use_only_nodules and (len(cands) == 0):
## manual switch to generate only nodules data due lack of time and repeat etc time pressures
print("Skipping blanks (use_only_nodules) ", seriesuid)
skipped += 1
else: # NORMAL operations
if (len(cands) > 0 or
(blankid >0) or
useAll):
sids.append(seriesuid)
blankids.append(blankid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
#mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask_z = np.load(''.join((path_segmented + '_nodule_mask_wblanks' + '.npz')))
mask = mask_z['arr_0']
testPlot = False
if testPlot:
maskcheck_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
maskcheck = maskcheck_z['arr_0']
f, ax = plt.subplots(1, 2, figsize=(10,5))
ax[0].imshow(np.sum(np.abs(maskcheck), axis=0),cmap=plt.cm.gray)
ax[1].imshow(np.sum(np.abs(mask), axis=0),cmap=plt.cm.gray)
#ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules and non-blank entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids, blankids
#return scans, masks, sids # not yet, old style
def load_scans_masks_no_nodules(luna_subset, use_unsegmented=True): # load only the ones that do not contain nodules
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
sids = []
scans = []
masks = []
cnt = 0
skipped = 0
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
#useAll = True
if (len(cands)):
print("Skipping entry with nodules ", seriesuid)
skipped += 1
else:
sids.append(seriesuid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask = mask_z['arr_0']
scans.append(scan)
masks.append(mask)
cnt += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
def normalize(image):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
PIXEL_MEAN = 0.028 ## for LUNA subset 0 and our preprocessing, only with nudels was 0.028, all was 0.020421744071562546 (in the tutorial they used 0.25)
def zero_center(image):
image = image - PIXEL_MEAN
return image
def load_scans(path): # function used for testing
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key=lambda x: int(x.InstanceNumber))
return np.stack([s.pixel_array for s in slices])
def get_scans(df,scans_list):
scans=np.stack([load_scans(scan_folder+df.id[i_scan[0]])[i_scan[1]] for i_scan in scans_list])
scans=process_scans(scans)
view_scans(scans)
return(scans)
def process_scans(scans): # used for tesing
scans1=np.zeros((scans.shape[0],1,img_rows,img_cols))
for i in range(scans.shape[0]):
img=scans[i,:,:]
img = 255.0 / np.amax(img) * img
img =img.astype(np.uint8)
img =cv2.resize(img, (img_rows, img_cols))
scans1[i,0,:,:]=img
return (scans1)
only_with_nudels = True
def convert_scans_and_masks(scans, masks, only_with_nudels):
flattened1 = [val for sublist in scans for val in sublist[1:-1]] # skip one element at the beginning and at the end
scans1 = np.stack(flattened1)
flattened1 = [val for sublist in masks for val in sublist[1:-1]] # skip one element at the beginning and at the end
masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count>0]
masks1 = masks1[nudels_pix_count>0] # 493 -- circa 5 % with nudeles oters without
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans = zero_center(scans)
masks = np.copy(masks1)
## if needed do the resize here ....
img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
img_cols = scans.shape[2]
scans1=np.zeros((scans.shape[0],1,img_rows,img_cols))
for i in range(scans.shape[0]):
img=scans[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
scans1[i,0,:,:]=img
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
#scans = [scans[i]]
#masks = [masks[i]]
def convert_scans_and_masks_xd_ablanks(scans, masks, blankids, only_with_nudels, dim=3):
# reuse scan to reduce memory footprint
dim_orig = dim
add_blank_spacing_size = dim * 8 #### use 4 for [0 - 3] and 8 for [4 - 7] ???initial trial (should perhaps be just dim ....)
#skip = dim // 2 # old
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = [] # 3 layers
#scan = scans[0]
for scan in scans: ##TEMP
tmp = []
#i = 1
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
blanks_per_axis = 4 # skip border
crop = 16
dx = (img_cols - 2 * crop) // (blanks_per_axis + 2)
dy = (img_rows - 2 * crop) // (blanks_per_axis + 2)
for mask in masks:
if (np.sum(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low, mask.shape[0]-skip_high, add_blank_spacing_size):
for ix in range(blanks_per_axis):
xpos = crop + (ix+1)*dx + dx //2
for iy in range(blanks_per_axis):
ypos = crop + (iy+1)*dy + dy //2
#print (xpos, ypos)
mask[skip_low, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
#for k in range(len(blankids)):
# if blankids[k] > 0:
# mask = masks[k]
# ## add the blanls
# for i in range(skip_low, mask.shape[0]-skip_high, add_blank_spacing_size):
# mask[skip_low, 0, 0] = -1 # negative pixel to be picked up below and corrected back to none
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
#img1 = mask[i-1]
#img2 = mask[i]
#img3 = mask[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = np.sum(masks1[:,skip_low], axis = (1,2)) ## abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
#blank_mask_factor = np.sign(nudels_pix_count)[nudels_pix_count != 0]
#sum(blank_mask_factor)
#blank_mask_factor[blank_mask_factor <0] = 0
#mask1_orig = masks1
#np.sum(mask1_orig)
#np.min(masks1)
#masks1 = masks1[nudels_pix_count != 0] * blank_mask_factor # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
masks1[masks1 < 0] = 0 # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[nudels_pix_count < 0] = 0 # making empty mask for balancing training set
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
#scans = [scans[j]]
#masks = [masks[j]]
def convert_scans_and_masks_xd3(scans, masks, only_with_nudels, dim=3, crop=16, blanks_per_axis = 4, add_blank_spacing_size=0, add_blank_layers = 0):
# reuse scan to reduce memory footprint
dim_orig = dim
#add_blank_spacing_size = 0 # dim *4 # dim # was dim ### set to 0 for version_16 #### initial trial (should perhaps be just dim ....), if 0 - do not add ...
#add_blank_layers = 0 # was 4
#skip = dim // 2 # old
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = [] # 3 layers
#scan = scans[0]
for scan in scans: ##TEMP
tmp = []
#i = 1
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
##blanks_per_axis = 6 # cover all slice
##crop = 44
dxrange = scans[0].shape[-1] - 2 * crop
dyrange = scans[0].shape[-2] - 2 * crop
#dx = (img_cols - 2 * crop) // (blanks_per_axis)
#dy = (img_rows - 2 * crop) // (blanks_per_axis)
#dx = dxrange // (blanks_per_axis+1)
#dy = dyrange // (blanks_per_axis+1)
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
if add_blank_spacing_size > 0:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low+(add_blank_spacing_size//2), mask.shape[0]-skip_high, add_blank_spacing_size):
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
if add_blank_layers > 0:
for mask in masks:
if (np.min(mask) < 0):
dzrange = mask.shape[0]-dim
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for k in range(add_blank_layers):
i = np.random.randint(0, dzrange) + skip_low
#print ("dz position, random, mask.shape ", i, mask.shape)
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
#mask = masks[0]
add_random_blanks_in_blanks = False ## NO need for the extra random blank pixels now, 20170327
if add_random_blanks_in_blanks:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
#zlow = skip_low
#zhigh = mask.shape[0]-skip_high
pix_sum = np.sum(mask, axis=(1,2))
idx_blanks = np.min(mask, axis=(1,2)) < 0 ## don't use it - let's vary the position across the space
for iz in range(mask.shape[0]):
if (np.min(mask[iz])) < 0:
for ix in range(blanks_per_axis):
#xpos = crop + (ix)*dx + dx //2
for iy in range(blanks_per_axis):
#ypos = crop + (iy)*dy + dy //2
xpos = crop + np.random.randint(0,dxrange)
ypos = crop + np.random.randint(0,dyrange)
#print (iz, xpos, ypos)
#mask[idx_blanks, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
mask[iz, ypos, xpos] = -1
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
#img1 = mask[i-1]
#img2 = mask[i]
#img3 = mask[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
#nudels_pix_count = np.sum(np.abs(masks1[:,skip_low]), axis = (1,2)) ## CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2,3)) ## USE ANY March 1; CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
#blank_mask_factor = np.sign(nudels_pix_count)[nudels_pix_count != 0]
#sum(blank_mask_factor)
#blank_mask_factor[blank_mask_factor <0] = 0
#mask1_orig = masks1
#np.sum(mask1_orig)
#np.min(masks1)
#masks1 = masks1[nudels_pix_count != 0] * blank_mask_factor # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[masks1 < 0] = 0 # !!!!!!!!!!!!!! in GRID version do NOT do that - do it in the key version 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[nudels_pix_count < 0] = 0 # making empty mask for balancing training set
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
### after this scans1 becomes float64 ....
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
scans1 = scans1.astype(np.float32) # make it float 32 (not point carring 64, also because kears operates on float32, and originals were in int
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
def convert_scans_and_masks_3d(scans, masks, only_with_nudels):
# reuse scan to reduce memory footprint
work = [] # 3 layers
#scan = scans[0]
for scan in scans:
tmp = []
#i = 0
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(1, scan.shape[0]-1):
img1 = scan[i-1]
img2 = scan[i]
img3 = scan[i+1]
rgb = np.stack((img1, img2, img3))
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
use_3d_mask = False
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(1, mask.shape[0]-1, 3): # SKIP EVERY 3
img1 = mask[i-1]
img2 = mask[i]
img3 = mask[i+1]
rgb = np.stack((img1, img2, img3))
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[1:-1]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = np.sum(masks1, axis = (1,2,3))
else:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count>0]
masks1 = masks1[nudels_pix_count>0] # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
def view_scans(scans):
#%matplotlib inline
for i in range(scans.shape[0]):
print ('scan '+str(i))
plt.imshow(scans[i,0,:,:], cmap=plt.cm.gray)
plt.show()
def view_scans_widget(scans):
#%matplotlib tk
for i in range(scans.shape[0]):
plt.figure(figsize=(7,7))
plt.imshow(scans[i,0,:,:], cmap=plt.cm.gray)
plt.show()
def get_masks(scans,masks_list):
#%matplotlib inline
scans1=scans.copy()
maxv=255
masks=np.zeros(shape=(scans.shape[0],1,img_rows,img_cols))
for i_m in range(len(masks_list)):
for i in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
for j in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
masks[masks_list[i_m][0],0,masks_list[i_m][2]+i,masks_list[i_m][1]+j]=1
for i1 in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]+masks_list[i_m][3]]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]-masks_list[i_m][3]]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]-masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
for i in range(scans.shape[0]):
print ('scan '+str(i))
f, ax = plt.subplots(1, 2,figsize=(10,5))
ax[0].imshow(scans1[i,0,:,:],cmap=plt.cm.gray)
ax[1].imshow(masks[i,0,:,:],cmap=plt.cm.gray)
plt.show()
return(masks)
def augmentation(scans,masks,n):
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=25, # was 25
width_shift_range=0.3, # ws 0.3; was 0.1# tried 0.01
height_shift_range=0.3, # was 0.3; was 0.1 # tried 0.01
horizontal_flip=True,
vertical_flip=True,
zoom_range=False)
i=0
scans_g=scans.copy()
for batch in datagen.flow(scans, batch_size=1, seed=1000):
scans_g=np.vstack([scans_g,batch])
i += 1
if i > n:
break
i=0
masks_g=masks.copy()
for batch in datagen.flow(masks, batch_size=1, seed=1000):
masks_g=np.vstack([masks_g,batch])
i += 1
if i > n:
break
return((scans_g,masks_g))
def hu_to_pix (hu):
return (hu - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) - PIXEL_MEAN
def pix_to_hu (pix):
return (pix + PIXEL_MEAN) * (MAX_BOUND - MIN_BOUND) + MIN_BOUND
from scipy import stats
def eliminate_incorrectly_segmented(scans, masks):
skip = dim // 2 # To Change see below ...
sxm = scans * masks
near_air_thresh = (-900 - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) - PIXEL_MEAN # version 3 # -750 gives one more (for 0_3, d4, -600 give 15 more than -900
near_air_thresh #0.08628 for -840 # 0.067 # for -867; 0.1148 for -800
cnt = 0
for i in range(sxm.shape[0]):
#sx = sxm[i,skip]
sx = sxm[i]
mx = masks[i]
if np.sum(mx) > 0: # only check non-blanks ...(keep blanks)
sx_max = np.max(sx)
if (sx_max) <= near_air_thresh:
cnt += 1
print ("Entry, count # and max: ", i, cnt, sx_max)
print (stats.describe(sx, axis=None))
#plt.imshow(sx, cmap='gray')
plt.imshow(sx[0,skip], cmap='gray') # selecting the mid entry
plt.show()
s_eliminate = np.max(sxm, axis=(1,2,3,4)) <= near_air_thresh # 3d
s_preserve = np.max(sxm, axis=(1,2,3,4)) > near_air_thresh #3d
s_eliminate_sum = sum(s_eliminate)
s_preserve_sum = sum(s_preserve)
print ("Eliminate, preserve =", s_eliminate_sum, s_preserve_sum)
masks = masks[s_preserve]
scans = scans[s_preserve]
del(sxm)
return scans, masks
# the following 3 functions to read LUNA files are from: https://www.kaggle.com/arnavkj95/data-science-bowl-2017/candidate-generation-and-luna16-preprocessing/notebook
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
'''
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
#get middel x-,y-, and z-worldcoordinate of the nodule
#radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% ....
radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net .
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#x = y = z = -2
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
'''
This function takes the path to a '.mhd' file as input and
is used to create the nodule masks and segmented lungs after
rescaling to 1mm size in all directions. It saved them in the .npz
format. It also takes the list of nodule locations in that CT Scan as
input.
'''
def load_scans_masks_or_blanks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
candidates = pd.read_csv(LUNA_CANDIDATES)
candidates_false = candidates[candidates["class"] == 0] # only select the false candidates
candidates_true = candidates[candidates["class"] == 1] # only select the false candidates
sids = []
scans = []
masks = []
blankids = [] # class/id whether scan is with nodule or without, 0 - with, 1 - without
cnt = 0
skipped = 0
#file=files[7]
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
ctrue = candidates_true[seriesuid == candidates_true.seriesuid]
cfalse = candidates_false[seriesuid == candidates_false.seriesuid]
blankid = 1 if (len(cands) == 0 and len(ctrue) == 0 and len(cfalse) > 0) else 0
skip_nodules_entirely = False # was False
use_only_nodules = False
if skip_nodules_entirely and blankid ==0:
## manual switch to generate extra data for the corrupted set
print("Skipping nodules (skip_nodules_entirely) ", seriesuid)
skipped += 1
elif use_only_nodules and (len(cands) == 0):
## manual switch to generate only nodules data due lack of time and repeat etc time pressures
print("Skipping blanks (use_only_nodules) ", seriesuid)
skipped += 1
else: # NORMAL operations
if (len(cands) > 0 or
(blankid >0) or
useAll):
sids.append(seriesuid)
blankids.append(blankid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask_wblanks' + '.npz')))
mask = mask_z['arr_0']
testPlot = False
if testPlot:
maskcheck_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
maskcheck = maskcheck_z['arr_0']
f, ax = plt.subplots(1, 2, figsize=(10,5))
ax[0].imshow(np.sum(np.abs(maskcheck), axis=0),cmap=plt.cm.gray)
ax[1].imshow(np.sum(np.abs(mask), axis=0),cmap=plt.cm.gray)
#ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules and non-blank entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids, blankids
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
def normalize(image):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
PIXEL_MEAN = 0.028 ## for LUNA subset 0 and our preprocessing, only with nudels was 0.028, all was 0.020421744071562546 (in the tutorial they used 0.25)
def zero_center(image):
image = image - PIXEL_MEAN
return image
def convert_scans_and_masks_xd3(scans, masks, only_with_nudels, dim=3, crop=16, blanks_per_axis = 4, add_blank_spacing_size=0, add_blank_layers = 0):
# reuse scan to reduce memory footprint
dim_orig = dim
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = []
for scan in scans:
tmp = []
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
dxrange = scans[0].shape[-1] - 2 * crop
dyrange = scans[0].shape[-2] - 2 * crop
if add_blank_spacing_size > 0:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low+(add_blank_spacing_size//2), mask.shape[0]-skip_high, add_blank_spacing_size):
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
if add_blank_layers > 0:
for mask in masks:
if (np.min(mask) < 0):
dzrange = mask.shape[0]-dim
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for k in range(add_blank_layers):
i = np.random.randint(0, dzrange) + skip_low
#print ("dz position, random, mask.shape ", i, mask.shape)
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
add_random_blanks_in_blanks = False ## NO need for the extra random blank pixels now, 20170327
if add_random_blanks_in_blanks:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
#zlow = skip_low
#zhigh = mask.shape[0]-skip_high
pix_sum = np.sum(mask, axis=(1,2))
idx_blanks = np.min(mask, axis=(1,2)) < 0 ## don't use it - let's vary the position across the space
for iz in range(mask.shape[0]):
if (np.min(mask[iz])) < 0:
for ix in range(blanks_per_axis):
#xpos = crop + (ix)*dx + dx //2
for iy in range(blanks_per_axis):
#ypos = crop + (iy)*dy + dy //2
xpos = crop + np.random.randint(0,dxrange)
ypos = crop + np.random.randint(0,dyrange)
#print (iz, xpos, ypos)
#mask[idx_blanks, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
mask[iz, ypos, xpos] = -1
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2,3)) ## USE ANY March 1; CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans1 = zero_center(scans1)
scans1 = scans1.astype(np.float32) # make it float 32 (not point carring 64, also because kears operates on float32, and originals were in int
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
def eliminate_incorrectly_segmented(scans, masks):
skip = dim // 2 # To Change see below ...
sxm = scans * masks
near_air_thresh = (-900 - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) - PIXEL_MEAN # version 3 # -750 gives one more (for 0_3, d4, -600 give 15 more than -900
#near_air_thresh #0.08628 for -840 # 0.067 # for -867; 0.1148 for -800
cnt = 0
for i in range(sxm.shape[0]):
#sx = sxm[i,skip]
sx = sxm[i]
mx = masks[i]
if np.sum(mx) > 0: # only check non-blanks ...(keep blanks)
sx_max = np.max(sx)
if (sx_max) <= near_air_thresh:
cnt += 1
print ("Entry, count # and max: ", i, cnt, sx_max)
print (stats.describe(sx, axis=None))
#plt.imshow(sx, cmap='gray')
plt.imshow(sx[0,skip], cmap='gray') # selecting the mid entry
plt.show()
s_eliminate = np.max(sxm, axis=(1,2,3,4)) <= near_air_thresh # 3d
s_preserve = np.max(sxm, axis=(1,2,3,4)) > near_air_thresh #3d
s_eliminate_sum = sum(s_eliminate)
s_preserve_sum = sum(s_preserve)
print ("Eliminate, preserve =", s_eliminate_sum, s_preserve_sum)
masks = masks[s_preserve]
scans = scans[s_preserve]
del(sxm)
return scans, masks
def grid_data(source, grid=32, crop=16, expand=12):
gridsize = grid + 2 * expand
stacksize = source.shape[0]
height = source.shape[3] # should be 224 for our data
width = source.shape[4]
gridheight = (height - 2 * crop) // grid # should be 6 for our data
gridwidth = (width - 2 * crop) // grid
cells = []
for j in range(gridheight):
for i in range (gridwidth):
cell = source[:,:,:, crop+j*grid-expand:crop+(j+1)*grid+expand, crop+i*grid-expand:crop+(i+1)*grid+expand]
cells.append(cell)
cells = np.vstack (cells)
return cells, gridwidth, gridheight
def data_from_grid (cells, gridwidth, gridheight, grid=32):
height = cells.shape[3] # should be 224 for our data
width = cells.shape[4]
crop = (width - grid ) // 2 ## for simplicity we are assuming the same crop (and grid) vertically and horizontally
dspacing = gridwidth * gridheight
layers = cells.shape[0] // dspacing
if crop > 0: # do NOT crop with 0 as we get empty cells ...
cells = cells[:,:,:,crop:-crop,crop:-crop]
if crop > 2*grid:
print ("data_from_grid Warning, unusually large crop (> 2*grid); crop, & grid, gridwith, gridheight: ", (crop, grid, gridwidth, gridheight))
shape = cells.shape
new_shape_1_dim = shape[0]// (gridwidth * gridheight) # ws // 36 -- Improved on 20170306
new_shape = (gridwidth * gridheight, new_shape_1_dim, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
cells = np.reshape(cells, new_shape)
cells = np.moveaxis(cells, 0, -3)
shape = cells.shape
new_shape2 = tuple([x for x in shape[0:3]]) + (gridheight, gridwidth,) + tuple([x for x in shape[4:]])
cells = np.reshape(cells, new_shape2)
cells = cells.swapaxes(-2, -3)
shape = cells.shape
combine_shape =tuple([x for x in shape[0:3]]) + (shape[-4]*shape[-3], shape[-2]*shape[-1],)
cells = np.reshape(cells, combine_shape)
return cells
def data_from_grid_by_proximity (cells, gridwidth, gridheight, grid=32):
# disperse the sequential dats into layers and then use data_from_grid
dspacing = gridwidth * gridheight
layers = cells.shape[0] // dspacing
shape = cells.shape
new_shape_1_dim = shape[0]// (gridwidth * gridheight) # ws // 36 -- Improved on 20170306
### NOTE tha we invert the order of shapes below to get the required proximity type ordering
new_shape = (new_shape_1_dim, gridwidth * gridheight, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
# swap ordering of axes
cells = np.reshape(cells, new_shape)
cells = cells.swapaxes(0, 1)
cells = np.reshape(cells, shape)
cells = data_from_grid (cells, gridwidth, gridheight, grid)
return cells
def find_voxels(dim, grid, images3, images3_seg, pmasks3, nodules_threshold=0.999, voxelscountmax = 1000, mid_mask_only = True, find_blanks_also = True, centralcutonly=True):
zsel = dim // 2
sstart = 0
send = images3.shape[0]
if mid_mask_only:
pmav = pmasks3[:,0,dim // 2] # using the mid mask
pmav.shape
else:
pmav = pmasks3[:,0] ### NOTE this variant has NOT been tested fully YET
run_UNNEEDED_code = False
ims = images3[sstart:send,0,zsel] # selecting the zsel cut for nodules calc ...
ims_seg = images3_seg[sstart:send,0,zsel]
ims.shape
#pms = pmasks3[sstart:send,0,0]
pms = pmav[sstart:send]
images3.shape
thresh = nodules_threshold # for testing , set it here and skip the loop
segment = 2 # for compatibility of the naming convention
# threshold the precited nasks ...
#for thresh in [0.5, 0.9, 0.9999]:
#for thresh in [0.5, 0.75, 0.9, 0.95, 0.98, 0.99, 0.999, 0.9999, 0.99999, 0.999999, 0.9999999]:
for thresh in [nodules_threshold]: # jusst this one - keeping loop for a while
if find_blanks_also:
idx = np.abs(pms) > thresh
else:
idx = pms > thresh
idx.shape
nodls = np.zeros(pms.shape).astype(np.int16)
nodls[idx] = 1
nx = nodls[idx]
nodules_pixels = ims[idx] # flat
nodules_hu = pix_to_hu(nodules_pixels)
part_name = ''.join([str(segment), '_', str(thresh)])
### DO NOT do them here
use_corrected_nodules = True # do it below from 20170311
if not use_corrected_nodules:
df = hu_describe(nodules_hu, uid=uid, part=part_name)
add_projections = False
axis = 1
nodules_projections = []
for axis in range(3):
nodls_projection = np.max(nodls, axis=axis)
naxis_name = ''.join(["naxis_", str(axis),"_", part_name])
if add_projections:
df[naxis_name] = np.sum(nodls_projection)
nodules_projections.append(nodls_projection)
idx.shape
## find the individual nodules ... as per the specified probabilities
labs, labs_num = measure.label(idx, return_num = True, neighbors = 8 , background = 0) # label the nodules in 3d, allow for diagonal connectivity
voxels = []
vmasks = []
if labs_num > 0 and labs.shape[0] >1: # checking for height > 1 is needed as measure.regionprops fails when it is not, for instance for shape (1, 20, 20) we get ValueError: Label and intensity image must have the same shape.
print("Befpre measure.regionprops, labs & intensity shapes: ", labs.shape, ims.shape)
regprop = measure.regionprops(labs, intensity_image=ims) # probkem here on 20170327
voxel_volume = np.product(RESIZE_SPACING)
areas = [rp.area for rp in regprop] # this is in cubic mm now (i.e. should really be called volume)
volumes = [rp.area * voxel_volume for rp in regprop]
diameters = [2 * (3* volume / (4 * np.pi ))**0.3333 for volume in volumes]
labs_ids = [rp.label for rp in regprop]
#ls = [rp.label for rp in regprop]
max_val = np.max(areas)
max_index = areas.index(max_val)
max_label = regprop[max_index].label
bboxes = [r.bbox for r in regprop]
idl = labs == regprop[max_index].label # 400
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
if run_UNNEEDED_code:
nodules_hu_reg = []
for rp in regprop:
idl = labs == rp.label
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
nodules_hu_reg.append(nodules_hu) # NOTE some are out of interest, i.e. are equal all (or near all) to MAX_BOUND (400)
dfn = pd.DataFrame(
{
"area": areas,
"diameter": diameters,
"bbox": bboxes
},
index=labs_ids)
nodules_count = len(dfn) # 524 for file 1 of part 8 ..
max_nodules_count = voxelscountmax
n=0
for n in range(max_nodules_count):
if n < len(dfn): # use the nodule data, otheriwse empty
bb = dfn.iloc[n]["bbox"]
zmin = bb[0]
zmax = bb[3]
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
xmin = np.max([bb[2] - np.max([(grid - xlen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
xmax = np.min([xmin + grid, ims.shape[2]]) ## do not beyond the right side
xmin = xmax - grid
if (xmax - xmin) != grid:
print ("ERROR in calculating the cut-offs ..., xmin, xmax =", xmin, xmax)
ymin = np.max([bb[1] - np.max([(grid - ylen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
ymax = np.min([ymin + grid, ims.shape[1]]) ## do not beyond the right side
ymin = ymax - grid
if (ymax - ymin) != grid:
print ("ERROR in calculating the cut-offs ..., ymin, ymax =", ymin, ymax)
zmin_sel = zmin
zmax_sel = zmax
if centralcutonly: #include only one voxel representation
zmin_sel = zmin + zlen // 2
zmax_sel = zmin_sel + 1
iz=zmin_sel # for testing
for iz in range(zmin_sel,zmax_sel):
voxel = images3[iz,:,:, ymin:ymax, xmin:xmax]
vmask = pmasks3[iz,:,:, ymin:ymax, xmin:xmax]
voxels.append(voxel)
vmasks.append(vmask)
testPlot = False
if testPlot:
print ('scan '+str(iz))
f, ax = plt.subplots(1, 8, figsize=(24,3))
ax[0].imshow(nodls[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[1].imshow(ims[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[2].imshow(images3_amp[iz,0, dim//2, ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[3].imshow(voxel[0,dim//2],cmap=plt.cm.gray)
ax[4].imshow(voxel[0,dim],cmap=plt.cm.gray)
ax[5].imshow(voxel[0,dim+1],cmap=plt.cm.gray)
ax[6].imshow(voxel[0,dim+2],cmap=plt.cm.gray)
ax[7].imshow(voxel[0,dim+3],cmap=plt.cm.gray)
if len(voxels) > 0:
voxel_stack = np.stack(voxels)
vmask_stack = np.stack(vmasks)
else:
print_warning = False
if print_warning:
print("WARNING, find_voxels, not single voxel found even though expected")
voxel_stack = []
vmask_stack = []
if testPlot:
print ('voxels count ', len(voxel_stack))
for ii in range(0,len(voxel_stack),len(voxel_stack)//10):
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(voxel_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
ax[1].imshow(vmask_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
return voxel_stack, vmask_stack
def measure_voxels(labs, ims):
#print("Befpre measure.regionprops, labs & intensity shapes: ", labs.shape, ims.shape)
regprop = measure.regionprops(labs, intensity_image=ims) # probkem here on 20170327
voxel_volume = np.product(RESIZE_SPACING)
areas = [rp.area for rp in regprop] # this is in cubic mm now (i.e. should really be called volume)
volumes = [rp.area * voxel_volume for rp in regprop]
diameters = [2 * (3* volume / (4 * np.pi ))**0.3333 for volume in volumes]
labs_ids = [rp.label for rp in regprop]
#ls = [rp.label for rp in regprop]
max_val = np.max(areas)
max_index = areas.index(max_val)
max_label = regprop[max_index].label
bboxes = [r.bbox for r in regprop]
#max_ls = ls[max_index]
idl = labs == regprop[max_index].label # 400
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
run_UNNEEDED_code = False
if run_UNNEEDED_code:
nodules_hu_reg = []
for rp in regprop:
idl = labs == rp.label
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
nodules_hu_reg.append(nodules_hu) # NOTE some are out of interest, i.e. are equal all (or near all) to MAX_BOUND (400)
dfn = pd.DataFrame(
{
#"zcenter": zcenters,
#"ycenter": ycenters,
#"xcenter": xcenters,
"area": areas,
"diameter": diameters,
#"irreg_vol": irreg_vol,
#"irreg_shape": irreg_shape,
#"nodules_hu": nodules_hu_reg,
"bbox": bboxes
},
index=labs_ids)
return dfn
def find_voxels_and_blanks(dim, grid, images3, images3_seg, pmasks3, nodules_threshold=0.999, voxelscountmax = 1000, find_blanks_also = True, centralcutonly=True, diamin=2, diamax=10):
if np.sum(pmasks3) > 0:
centralcutonly = False # override centralcut for True nodule masks
zsel = dim // 2 if centralcutonly else range(0,dim)
pmav = pmasks3[:,0,zsel]
ims = images3[:,0,zsel] # selecting the zsel cut for nodules calc ...
ims_seg = images3_seg[:,0,zsel]
sstart = 0
send = images3.shape[0]
pms = pmav[sstart:send]
run_UNNEEDED_code = False
thresh = nodules_threshold # for testing , set it here and skip the loop
segment = 2 # for compatibility of the naming convention
for thresh in [nodules_threshold]: # jusst this one - keeping loop for a while
if find_blanks_also:
idx = np.abs(pms) > thresh
else:
idx = pms > thresh
idx.shape
nodls = np.zeros(pms.shape).astype(np.int16)
nodls[idx] = 1
nx = nodls[idx]
volume = np.sum(nodls) # A check calculation ... :wcounted as a count within hu_describe
nodules_pixels = ims[idx] # flat
nodules_hu = pix_to_hu(nodules_pixels)
part_name = ''.join([str(segment), '_', str(thresh)])
### DO NOT do them here
use_corrected_nodules = True # do it below from 20170311
if not use_corrected_nodules:
df = hu_describe(nodules_hu, uid=uid, part=part_name)
add_projections = False
if add_projections:
nodules_projections = []
for axis in range(3):
#sxm_projection = np.max(sxm, axis = axis)
nodls_projection = np.max(nodls, axis=axis)
naxis_name = ''.join(["naxis_", str(axis),"_", part_name])
if add_projections:
df[naxis_name] = np.sum(nodls_projection)
nodules_projections.append(nodls_projection)
voxels = []
vmasks = []
if not centralcutonly:
for k in range(idx.shape[0]):
if np.sum(idx[k]) > 0:
## find the nodules and take a cut
labs, labs_num = measure.label(idx[k], return_num = True, neighbors = 8 , background = 0) # label the nodules in 3d, allow for diagonal connectivity
dfn = measure_voxels(labs, ims[k])
nodules_count_0 = len(dfn)
## CUT out anything that is outside of the specified diam range
dfn = dfn[(dfn["diameter"] >= diamin) & ((dfn["diameter"] < diamax))] # CUT OUT anything that is less than 3 mm (essentially less than 7 voxels for 2x2x2
nodules_count = len(dfn) # 524 for file 1 of part 8 ..
max_nodules_count = voxelscountmax
n=0
for n in range(max_nodules_count):
if n < len(dfn): # use the nodule data, otheriwse empty
bb = dfn.iloc[n]["bbox"]
zmin = bb[0]
zmax = bb[3]
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
xmin = np.max([bb[2] - np.max([(grid - xlen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
xmax = np.min([xmin + grid, ims.shape[-1]]) ## do not beyond the right side
xmin = xmax - grid
if (xmax - xmin) != grid:
print ("ERROR in calculating the cut-offs ..., xmin, xmax =", xmin, xmax)
ymin = np.max([bb[1] - np.max([(grid - ylen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
ymax = np.min([ymin + grid, ims.shape[-2]]) ## do not beyond the right side
ymin = ymax - grid
if (ymax - ymin) != grid:
print ("ERROR in calculating the cut-offs ..., ymin, ymax =", ymin, ymax)
# here simply takje the entire voxel we have
#images3.shape
voxel = images3[k,:,:, ymin:ymax, xmin:xmax]
vmask = pmasks3[k,:,:, ymin:ymax, xmin:xmax]
voxels.append(voxel)
vmasks.append(vmask)
#voxel.shape
else:# essentially taking the central cuts of the blanks
## find the individual nodules ... as per the specified probabilities
labs, labs_num = measure.label(idx, return_num = True, neighbors = 8 , background = 0) # label the nodules in 3d, allow for diagonal connectivity
if labs_num > 0 and labs.shape[0] >1: # checking for height > 1 is needed as measure.regionprops fails when it is not, for instance for shape (1, 20, 20) we get ValueError: Label and intensity image must have the same shape.
#labs_num_to_store = 5
dfn = measure_voxels(labs, ims)
nodules_count = len(dfn) # 524 for file 1 of part 8 ..
max_nodules_count = voxelscountmax
n=0
for n in range(max_nodules_count):
if n < len(dfn): # use the nodule data, otheriwse empty
bb = dfn.iloc[n]["bbox"]
zmin = bb[0]
zmax = bb[3]
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
xmin = np.max([bb[2] - np.max([(grid - xlen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
xmax = np.min([xmin + grid, ims.shape[-1]]) ## do not beyond the right side
xmin = xmax - grid
if (xmax - xmin) != grid:
print ("ERROR in calculating the cut-offs ..., xmin, xmax =", xmin, xmax)
ymin = np.max([bb[1] - np.max([(grid - ylen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
ymax = np.min([ymin + grid, ims.shape[-2]]) ## do not beyond the right side
ymin = ymax - grid
if (ymax - ymin) != grid:
print ("ERROR in calculating the cut-offs ..., ymin, ymax =", ymin, ymax)
zmin_sel = zmin
zmax_sel = zmax
if centralcutonly: #include only one voxel representation
zmin_sel = zmin + zlen // 2
zmax_sel = zmin_sel + 1
iz=zmin_sel # for testing
for iz in range(zmin_sel,zmax_sel):
voxel = images3[iz,:,:, ymin:ymax, xmin:xmax]
vmask = pmasks3[iz,:,:, ymin:ymax, xmin:xmax]
voxels.append(voxel)
vmasks.append(vmask)
testPlot = False
if testPlot:
print ('scan '+str(iz))
f, ax = plt.subplots(1, 8, figsize=(24,3))
ax[0].imshow(nodls[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[1].imshow(ims[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[2].imshow(images3_amp[iz,0, dim//2, ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[3].imshow(voxel[0,dim//2],cmap=plt.cm.gray)
ax[4].imshow(voxel[0,dim],cmap=plt.cm.gray)
ax[5].imshow(voxel[0,dim+1],cmap=plt.cm.gray)
ax[6].imshow(voxel[0,dim+2],cmap=plt.cm.gray)
ax[7].imshow(voxel[0,dim+3],cmap=plt.cm.gray)
if len(voxels) > 0:
voxel_stack = np.stack(voxels)
vmask_stack = np.stack(vmasks)
else:
print_warning = False
if print_warning:
print("WARNING, find_voxels, not single voxel found even though expected")
voxel_stack = []
vmask_stack = []
#print("Nodules, voxels_aggregated: ", len(dfn), len(voxel_stack))
#np.savez_compressed(path_voxels_variant, voxel_stack)
testPlot = False
if testPlot:
print ('voxels count ', len(voxel_stack))
for ii in range(0,len(voxel_stack),len(voxel_stack)//10):
#plt.imshow(voxel_stack[ii,0,dim // 2], cmap=plt.cm.gray)
#plt.show()
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(voxel_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
ax[1].imshow(vmask_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
return voxel_stack, vmask_stack
def shuffle_scans_masks(scans, masks, seed):
| np.random.seed(seed) | numpy.random.seed |
import numpy
from xgboost import XGBClassifier
import scipy.io
import pandas as pd
from CALC_FEAT import feat_ext
from gtfparse import read_gtf
import sys
class preprocess:
def multiclass_problem(self,RNA_types,dfex):
classes=['protein_coding','Housekeeping','sncRNA','lncRNA']
new_dfex=pd.DataFrame(columns=dfex.columns)
new_dfex=new_dfex.append(dfex.loc[dfex['transcript_type'].isin(RNA_types)],ignore_index=True)
HK=['tRNA','rRNA']
new_dfex=new_dfex.replace({'transcript_type':HK},{'transcript_type':'Housekeeping'},regex=True)
LRNA=['lincRNA','antisense_RNA','antisense','sense_intronic','sense_overlapping']
new_dfex=new_dfex.replace({'transcript_type':LRNA},{'transcript_type':'lncRNA'},regex=True)
dc=dfex.index[dfex['transcript_type']=='pre_miRNA']
if(len(dc)>0):
SRNA=['snRNA','snoRNA','pre_miRNA']
else:
SRNA=['snRNA','snoRNA','miRNA']
new_dfex=new_dfex.replace({'transcript_type':SRNA},{'transcript_type':'sncRNA'},regex=True)
return classes,new_dfex
def remove_nans(self,test_feat):
check_nan=numpy.argwhere(numpy.isnan(test_feat))
for i in range(0,len(check_nan)):
test_feat[check_nan[i,0],check_nan[i,1]]=0
check_inf=numpy.argwhere( | numpy.isinf(test_feat) | numpy.isinf |
import numpy as np
from numba import njit, prange
@njit()
def calc_coherence(data, semb_win, mode='semb'):
"""" Calculate coherency over 2-D array data
Inputs:
data - 2D numpy array, of dimensions [channels, samples]
semb_win - window size, in samples
mode - type of coherency measure
semb - Windowed semblance (result between 0 and 1)
stack - mean over channels
sembstack - semblance multiplied by absolute value of stack (result > 0)
Output:
1D numpy array of sample-by-sample coherency
"""
nchan, nt = data.shape
semblance_res = np.zeros(shape=(nt,), dtype=np.float32)
sum_of_sqr = np.zeros(shape=(nt,), dtype=np.float32)
stack_res = np.zeros(shape=(nt,), dtype=np.float32)
half_win = int(np.floor(semb_win/2))
sum_of_sqr[:] = 0.0
stack_res = | np.sum(data, axis=0) | numpy.sum |
import numpy as np
from numpy.linalg import norm
import tensorflow as tf
from sklearn.cluster import KMeans
import random
class KMeansTF26:
def __init__(self, n_clusters, max_iter=100, random_state=123):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.random_state = random_state
def initializ_centroids(self, X):
return tf.gather(X,
indices=np.random.randint(len(X), size=self.n_clusters))
def compute_centroids(self, X, labels):
centroids = []
for k in range(self.n_clusters):
centroids.append(tf.reduce_mean(X[labels == k], axis=0))
return tf.stack(centroids)
def compute_distance(self, X, centroids):
return [tf.reduce_sum(tf.square(tf.subtract(X, cent), 2), 1) for cent in centroids]
def find_closest_cluster(self, distances):
return tf.argmin(distances, axis=0)
def fit(self, X):
X = tf.constant(X)
self.centroids = self.initializ_centroids(X)
for i in range(self.max_iter):
old_centroids = self.centroids
distance = self.compute_distance(X, old_centroids)
self.labels = self.find_closest_cluster(distance)
self.centroids = self.compute_centroids(X, self.labels)
# print(old_centroids)
# print()
# print(self.centroids)
# print(old_centroids - self.centroids)
if tf.reduce_sum(tf.abs(old_centroids - self.centroids)) < self.n_clusters :
break
return self
class Kmeans:
'''Implementing Kmeans algorithm.'''
def __init__(self, n_clusters, max_iter=100, random_state=123):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.random_state = random_state
def initializ_centroids(self, X):
np.random.RandomState(self.random_state)
random_idx = np.random.permutation(X.shape[0])
centroids = X[random_idx[:self.n_clusters]]
return centroids
def compute_centroids(self, X, labels):
centroids = np.zeros((self.n_clusters, X.shape[1]))
for k in range(self.n_clusters):
centroids[k, :] = np.mean(X[labels == k, :], axis=0)
return centroids
def compute_distance(self, X, centroids):
distance = np.zeros((X.shape[0], self.n_clusters))
for k in range(self.n_clusters):
row_norm = norm(X - centroids[k, :], axis=1)
distance[:, k] = np.square(row_norm)
return distance
def find_closest_cluster(self, distance):
return np.argmin(distance, axis=1)
def compute_sse(self, X, labels, centroids):
distance = np.zeros(X.shape[0])
for k in range(self.n_clusters):
distance[labels == k] = norm(X[labels == k] - centroids[k], axis=1)
return np.sum( | np.square(distance) | numpy.square |
import os
from os import path
import numpy as np
import pytest
import shutil
import autoarray as aa
from autoarray import exc
test_data_dir = path.join(
"{}".format(path.dirname(path.realpath(__file__))), "files", "mask"
)
class TestMask:
def test__manual(self):
mask = aa.Mask2D.manual(
mask=[[False, False], [True, True]], pixel_scales=1.0, sub_size=1
)
assert type(mask) == aa.Mask2D
assert (mask == np.array([[False, False], [True, True]])).all()
assert mask.pixel_scales == (1.0, 1.0)
assert mask.origin == (0.0, 0.0)
assert mask.sub_size == 1
assert (mask.extent == np.array([-1.0, 1.0, -1.0, 1.0])).all()
mask = aa.Mask2D.manual(
mask=[[False, False], [True, True]],
pixel_scales=(2.0, 3.0),
sub_size=2,
origin=(0.0, 1.0),
)
assert type(mask) == aa.Mask2D
assert (mask == np.array([[False, False], [True, True]])).all()
assert mask.pixel_scales == (2.0, 3.0)
assert mask.origin == (0.0, 1.0)
assert mask.sub_size == 2
mask = aa.Mask2D.manual(
mask=[[False, False], [True, True], [True, False], [False, True]],
pixel_scales=1.0,
sub_size=2,
)
assert type(mask) == aa.Mask2D
assert (
mask
== np.array([[False, False], [True, True], [True, False], [False, True]])
).all()
assert mask.pixel_scales == (1.0, 1.0)
assert mask.origin == (0.0, 0.0)
assert mask.sub_size == 2
def test__mask__invert_is_true_inverts_the_mask(self):
mask = aa.Mask2D.manual(
mask=[[False, False, True], [True, True, False]],
pixel_scales=1.0,
invert=True,
)
assert type(mask) == aa.Mask2D
assert (mask == np.array([[True, True, False], [False, False, True]])).all()
def test__mask__input_is_1d_mask__no_shape_native__raises_exception(self):
with pytest.raises(exc.MaskException):
aa.Mask2D.manual(mask=[False, False, True], pixel_scales=1.0)
with pytest.raises(exc.MaskException):
aa.Mask2D.manual(mask=[False, False, True], pixel_scales=False)
with pytest.raises(exc.MaskException):
aa.Mask2D.manual(mask=[False, False, True], pixel_scales=1.0, sub_size=1)
with pytest.raises(exc.MaskException):
aa.Mask2D.manual(mask=[False, False, True], pixel_scales=False, sub_size=1)
def test__is_all_true(self):
mask = aa.Mask2D.manual(mask=[[False, False], [False, False]], pixel_scales=1.0)
assert mask.is_all_true is False
mask = aa.Mask2D.manual(mask=[[False, False]], pixel_scales=1.0)
assert mask.is_all_true is False
mask = aa.Mask2D.manual(mask=[[False, True], [False, False]], pixel_scales=1.0)
assert mask.is_all_true is False
mask = aa.Mask2D.manual(mask=[[True, True], [True, True]], pixel_scales=1.0)
assert mask.is_all_true is True
def test__is_all_false(self):
mask = aa.Mask2D.manual(mask=[[False, False], [False, False]], pixel_scales=1.0)
assert mask.is_all_false is True
mask = aa.Mask2D.manual(mask=[[False, False]], pixel_scales=1.0)
assert mask.is_all_false is True
mask = aa.Mask2D.manual(mask=[[False, True], [False, False]], pixel_scales=1.0)
assert mask.is_all_false is False
mask = aa.Mask2D.manual(mask=[[True, True], [False, False]], pixel_scales=1.0)
assert mask.is_all_false is False
class TestClassMethods:
def test__mask_all_unmasked__5x5__input__all_are_false(self):
mask = aa.Mask2D.unmasked(shape_native=(5, 5), pixel_scales=1.0, invert=False)
assert mask.shape == (5, 5)
assert (
mask
== np.array(
[
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
]
)
).all()
mask = aa.Mask2D.unmasked(
shape_native=(3, 3), pixel_scales=(1.5, 1.5), invert=False, sub_size=2
)
assert mask.shape == (3, 3)
assert (
mask
== np.array(
[[False, False, False], [False, False, False], [False, False, False]]
)
).all()
assert mask.sub_size == 2
assert mask.pixel_scales == (1.5, 1.5)
assert mask.origin == (0.0, 0.0)
assert mask.mask_centre == (0.0, 0.0)
mask = aa.Mask2D.unmasked(
shape_native=(3, 3),
pixel_scales=(2.0, 2.5),
invert=True,
sub_size=4,
origin=(1.0, 2.0),
)
assert mask.shape == (3, 3)
assert (
mask
== np.array([[True, True, True], [True, True, True], [True, True, True]])
).all()
assert mask.sub_size == 4
assert mask.pixel_scales == (2.0, 2.5)
assert mask.origin == (1.0, 2.0)
def test__mask_circular__compare_to_array_util(self):
mask_via_util = aa.util.mask_2d.mask_2d_circular_from(
shape_native=(5, 4), pixel_scales=(2.7, 2.7), radius=3.5, centre=(0.0, 0.0)
)
mask = aa.Mask2D.circular(
shape_native=(5, 4),
pixel_scales=(2.7, 2.7),
sub_size=1,
radius=3.5,
centre=(0.0, 0.0),
)
assert (mask == mask_via_util).all()
assert mask.origin == (0.0, 0.0)
assert mask.mask_centre == pytest.approx((0.0, 0.0), 1.0e-8)
def test__mask_circular__inverted__compare_to_array_util(self):
mask_via_util = aa.util.mask_2d.mask_2d_circular_from(
shape_native=(5, 4), pixel_scales=(2.7, 2.7), radius=3.5, centre=(0.0, 0.0)
)
mask = aa.Mask2D.circular(
shape_native=(5, 4),
pixel_scales=(2.7, 2.7),
sub_size=1,
radius=3.5,
centre=(0.0, 0.0),
invert=True,
)
assert (mask == np.invert(mask_via_util)).all()
assert mask.origin == (0.0, 0.0)
assert mask.mask_centre == (0.0, 0.0)
def test__mask_annulus__compare_to_array_util(self):
mask_via_util = aa.util.mask_2d.mask_2d_circular_annular_from(
shape_native=(5, 4),
pixel_scales=(2.7, 2.7),
inner_radius=0.8,
outer_radius=3.5,
centre=(0.0, 0.0),
)
mask = aa.Mask2D.circular_annular(
shape_native=(5, 4),
pixel_scales=(2.7, 2.7),
sub_size=1,
inner_radius=0.8,
outer_radius=3.5,
centre=(0.0, 0.0),
)
assert (mask == mask_via_util).all()
assert mask.origin == (0.0, 0.0)
assert mask.mask_centre == pytest.approx((0.0, 0.0), 1.0e-8)
def test__mask_annulus_inverted__compare_to_array_util(self):
mask_via_util = aa.util.mask_2d.mask_2d_circular_annular_from(
shape_native=(5, 4),
pixel_scales=(2.7, 2.7),
inner_radius=0.8,
outer_radius=3.5,
centre=(0.0, 0.0),
)
mask = aa.Mask2D.circular_annular(
shape_native=(5, 4),
pixel_scales=(2.7, 2.7),
sub_size=1,
inner_radius=0.8,
outer_radius=3.5,
centre=(0.0, 0.0),
invert=True,
)
assert (mask == np.invert(mask_via_util)).all()
assert mask.origin == (0.0, 0.0)
assert mask.mask_centre == (0.0, 0.0)
def test__mask_anti_annulus__compare_to_array_util(self):
mask_via_util = aa.util.mask_2d.mask_2d_circular_anti_annular_from(
shape_native=(9, 9),
pixel_scales=(1.2, 1.2),
inner_radius=0.8,
outer_radius=2.2,
outer_radius_2_scaled=3.0,
centre=(0.0, 0.0),
)
mask = aa.Mask2D.circular_anti_annular(
shape_native=(9, 9),
pixel_scales=(1.2, 1.2),
sub_size=1,
inner_radius=0.8,
outer_radius=2.2,
outer_radius_2=3.0,
centre=(0.0, 0.0),
)
assert (mask == mask_via_util).all()
assert mask.origin == (0.0, 0.0)
assert mask.mask_centre == (0.0, 0.0)
def test__mask_anti_annulus_inverted__compare_to_array_util(self):
mask_via_util = aa.util.mask_2d.mask_2d_circular_anti_annular_from(
shape_native=(9, 9),
pixel_scales=(1.2, 1.2),
inner_radius=0.8,
outer_radius=2.2,
outer_radius_2_scaled=3.0,
centre=(0.0, 0.0),
)
mask = aa.Mask2D.circular_anti_annular(
shape_native=(9, 9),
pixel_scales=(1.2, 1.2),
sub_size=1,
inner_radius=0.8,
outer_radius=2.2,
outer_radius_2=3.0,
centre=(0.0, 0.0),
invert=True,
)
assert (mask == np.invert(mask_via_util)).all()
assert mask.origin == (0.0, 0.0)
assert mask.mask_centre == (0.0, 0.0)
def test__mask_elliptical__compare_to_array_util(self):
mask_via_util = aa.util.mask_2d.mask_2d_elliptical_from(
shape_native=(8, 5),
pixel_scales=(2.7, 2.7),
major_axis_radius=5.7,
axis_ratio=0.4,
angle=40.0,
centre=(0.0, 0.0),
)
mask = aa.Mask2D.elliptical(
shape_native=(8, 5),
pixel_scales=(2.7, 2.7),
sub_size=1,
major_axis_radius=5.7,
axis_ratio=0.4,
angle=40.0,
centre=(0.0, 0.0),
)
assert (mask == mask_via_util).all()
assert mask.origin == (0.0, 0.0)
assert mask.mask_centre == pytest.approx((0.0, 0.0), 1.0e-8)
def test__mask_elliptical_inverted__compare_to_array_util(self):
mask_via_util = aa.util.mask_2d.mask_2d_elliptical_from(
shape_native=(8, 5),
pixel_scales=(2.7, 2.7),
major_axis_radius=5.7,
axis_ratio=0.4,
angle=40.0,
centre=(0.0, 0.0),
)
mask = aa.Mask2D.elliptical(
shape_native=(8, 5),
pixel_scales=(2.7, 2.7),
sub_size=1,
major_axis_radius=5.7,
axis_ratio=0.4,
angle=40.0,
centre=(0.0, 0.0),
invert=True,
)
assert (mask == np.invert(mask_via_util)).all()
assert mask.origin == (0.0, 0.0)
assert mask.mask_centre == (0.0, 0.0)
def test__mask_elliptical_annular__compare_to_array_util(self):
mask_via_util = aa.util.mask_2d.mask_2d_elliptical_annular_from(
shape_native=(8, 5),
pixel_scales=(2.7, 2.7),
inner_major_axis_radius=2.1,
inner_axis_ratio=0.6,
inner_phi=20.0,
outer_major_axis_radius=5.7,
outer_axis_ratio=0.4,
outer_phi=40.0,
centre=(0.0, 0.0),
)
mask = aa.Mask2D.elliptical_annular(
shape_native=(8, 5),
pixel_scales=(2.7, 2.7),
sub_size=1,
inner_major_axis_radius=2.1,
inner_axis_ratio=0.6,
inner_phi=20.0,
outer_major_axis_radius=5.7,
outer_axis_ratio=0.4,
outer_phi=40.0,
centre=(0.0, 0.0),
)
assert (mask == mask_via_util).all()
assert mask.origin == (0.0, 0.0)
assert mask.mask_centre == pytest.approx((0.0, 0.0), 1.0e-8)
def test__mask_elliptical_annular_inverted__compare_to_array_util(self):
mask_via_util = aa.util.mask_2d.mask_2d_elliptical_annular_from(
shape_native=(8, 5),
pixel_scales=(2.7, 2.7),
inner_major_axis_radius=2.1,
inner_axis_ratio=0.6,
inner_phi=20.0,
outer_major_axis_radius=5.7,
outer_axis_ratio=0.4,
outer_phi=40.0,
centre=(0.0, 0.0),
)
mask = aa.Mask2D.elliptical_annular(
shape_native=(8, 5),
pixel_scales=(2.7, 2.7),
sub_size=1,
inner_major_axis_radius=2.1,
inner_axis_ratio=0.6,
inner_phi=20.0,
outer_major_axis_radius=5.7,
outer_axis_ratio=0.4,
outer_phi=40.0,
centre=(0.0, 0.0),
invert=True,
)
assert (mask == np.invert(mask_via_util)).all()
assert mask.origin == (0.0, 0.0)
assert mask.mask_centre == (0.0, 0.0)
def test__from_pixel_coordinates__mask_with_or_without_buffer__false_at_buffed_coordinates(
self,
):
mask = aa.Mask2D.from_pixel_coordinates(
shape_native=(5, 5), pixel_coordinates=[[2, 2]], pixel_scales=1.0, buffer=0
)
assert (
mask
== np.array(
[
[True, True, True, True, True],
[True, True, True, True, True],
[True, True, False, True, True],
[True, True, True, True, True],
[True, True, True, True, True],
]
)
).all()
mask = aa.Mask2D.from_pixel_coordinates(
shape_native=(5, 5), pixel_coordinates=[[2, 2]], pixel_scales=1.0, buffer=1
)
assert (
mask
== np.array(
[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
]
)
).all()
mask = aa.Mask2D.from_pixel_coordinates(
shape_native=(7, 7),
pixel_coordinates=[[2, 2], [5, 5]],
pixel_scales=1.0,
buffer=1,
)
assert (
mask
== np.array(
[
[True, True, True, True, True, True, True],
[True, False, False, False, True, True, True],
[True, False, False, False, True, True, True],
[True, False, False, False, True, True, True],
[True, True, True, True, False, False, False],
[True, True, True, True, False, False, False],
[True, True, True, True, False, False, False],
]
)
).all()
class TestToFromFits:
def test__load_and_output_mask_to_fits(self):
mask = aa.Mask2D.from_fits(
file_path=path.join(test_data_dir, "3x3_ones.fits"),
hdu=0,
sub_size=1,
pixel_scales=(1.0, 1.0),
)
output_data_dir = path.join(
"{}".format(path.dirname(path.realpath(__file__))),
"files",
"array",
"output_test",
)
if path.exists(output_data_dir):
shutil.rmtree(output_data_dir)
os.makedirs(output_data_dir)
mask.output_to_fits(file_path=path.join(output_data_dir, "mask.fits"))
mask = aa.Mask2D.from_fits(
file_path=path.join(output_data_dir, "mask.fits"),
hdu=0,
sub_size=1,
pixel_scales=(1.0, 1.0),
origin=(2.0, 2.0),
)
assert (mask == np.ones((3, 3))).all()
assert mask.pixel_scales == (1.0, 1.0)
assert mask.origin == (2.0, 2.0)
def test__load_from_fits_with_resized_mask_shape(self):
mask = aa.Mask2D.from_fits(
file_path=path.join(test_data_dir, "3x3_ones.fits"),
hdu=0,
sub_size=1,
pixel_scales=(1.0, 1.0),
resized_mask_shape=(1, 1),
)
assert mask.shape_native == (1, 1)
mask = aa.Mask2D.from_fits(
file_path=path.join(test_data_dir, "3x3_ones.fits"),
hdu=0,
sub_size=1,
pixel_scales=(1.0, 1.0),
resized_mask_shape=(5, 5),
)
assert mask.shape_native == (5, 5)
class TestSubQuantities:
def test__sub_shape_is_shape_times_sub_size(self):
mask = aa.Mask2D.unmasked(shape_native=(5, 5), pixel_scales=1.0, sub_size=1)
assert mask.sub_shape_native == (5, 5)
mask = aa.Mask2D.unmasked(shape_native=(5, 5), pixel_scales=1.0, sub_size=2)
assert mask.sub_shape_native == (10, 10)
mask = aa.Mask2D.unmasked(shape_native=(10, 5), pixel_scales=1.0, sub_size=3)
assert mask.sub_shape_native == (30, 15)
class TestNewMasksFromMask:
def test__sub_mask__is_mask_at_sub_grid_resolution(self):
mask = aa.Mask2D.manual(
mask=[[False, True], [False, False]], pixel_scales=1.0, sub_size=2
)
assert (
mask.sub_mask
== np.array(
[
[False, False, True, True],
[False, False, True, True],
[False, False, False, False],
[False, False, False, False],
]
)
).all()
mask = aa.Mask2D.manual(
mask=[[False, False, True], [False, True, False]],
pixel_scales=1.0,
sub_size=2,
)
assert (
mask.sub_mask
== np.array(
[
[False, False, False, False, True, True],
[False, False, False, False, True, True],
[False, False, True, True, False, False],
[False, False, True, True, False, False],
]
)
).all()
def test__resized_mask__pad__compare_to_manual_mask(self):
mask = aa.Mask2D.unmasked(shape_native=(5, 5), pixel_scales=1.0)
mask[2, 2] = True
mask_resized = mask.resized_mask_from(new_shape=(7, 7))
mask_resized_manual = | np.full(fill_value=False, shape=(7, 7)) | numpy.full |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: bio_time_series
# language: python
# name: bio_time_series
# ---
# %%
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# %config InlineBackend.print_figure_kwargs = {'bbox_inches': None}
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import time
import pandas as pd
from tqdm.notebook import tqdm
from bioslds.arma import Arma
from bioslds.dataset import RandomArmaDataset
from bioslds.plotting import FigureManager, show_latent
from bioslds.cluster_quality import unordered_accuracy_score
from bioslds.batch import hyper_score_ar
from bioslds.regressors import (
BioWTARegressor,
CrosscorrelationRegressor,
CepstralRegressor,
)
from draft_helpers import (
paper_style,
calculate_ar_identification_progress,
make_multi_trajectory_plot,
make_accuracy_plot,
predict_plain_score,
make_accuracy_comparison_diagram,
get_accuracy_metrics,
calculate_smooth_weight_errors,
)
fig_path = os.path.join("..", "figs", "draft")
# %% [markdown]
# # Run BioWTA, autocorrelation, and cepstral oracle algorithms on signals based on pairs of AR(3) processes
# %% [markdown]
# ## Define the problem and the parameters for the learning algorithms
# %% [markdown]
# Using best parameters obtained from hyperoptimization runs.
# %%
n_signals = 100
n_samples = 200_000
orders = [(3, 0), (3, 0)]
dwell_times = 100
min_dwell = 50
max_pole_radius = 0.95
normalize = True
fix_scale = None
seed = 153
n_models = 2
n_features = 3
rate_nsm = 0.005028
streak_nsm = 9.527731
rate_cepstral = 0.071844
order_cepstral = 2
metric = unordered_accuracy_score
good_score = 0.85
threshold_steps = 10_000
dataset = RandomArmaDataset(
n_signals,
n_samples,
orders,
dwell_times=dwell_times,
min_dwell=min_dwell,
fix_scale=fix_scale,
normalize=normalize,
rng=seed,
arma_kws={"max_pole_radius": max_pole_radius},
)
# %% [markdown]
# ## Run BioWTA with all combinations of enhancements
# %%
biowta_configurations = {
(1, 1, 0): {
"rate": 0.001992,
"trans_mat": 1 - 1 / 7.794633,
"temperature": 1.036228,
"error_timescale": 1.000000,
},
(0, 0, 1): {
"rate": 0.004718,
"trans_mat": 1 - 1 / 2.000000,
"temperature": 0.000000,
"error_timescale": 4.216198,
},
(1, 1, 1): {
"rate": 0.004130,
"trans_mat": 1 - 1 / 5.769690,
"temperature": 0.808615,
"error_timescale": 1.470822,
},
(0, 1, 1): {
"rate": 0.004826,
"trans_mat": 1 - 1 / 2.154856,
"temperature": 0.000000,
"error_timescale": 4.566321,
},
(1, 0, 1): {
"rate": 0.006080,
"trans_mat": 1 - 1 / 2.000000,
"temperature": 0.117712,
"error_timescale": 4.438448,
},
(0, 1, 0): {
"rate": 0.001476,
"trans_mat": 1 - 1 / 2.984215,
"temperature": 0.000000,
"error_timescale": 1.000000,
},
(0, 0, 0): {
"rate": 0.001199,
"trans_mat": 1 - 1 / 2.000000,
"temperature": 0.000000,
"error_timescale": 1.000000,
},
(1, 0, 0): {
"rate": 0.005084,
"trans_mat": 1 - 1 / 2.000000,
"temperature": 0.011821,
"error_timescale": 1.000000,
},
}
biowta_configurations_human = {
(0, 0, 0): "plain",
(0, 0, 1): "avg_error",
(0, 1, 0): "persistent",
(1, 0, 0): "soft",
(0, 1, 1): "persistent+avg_error",
(1, 1, 0): "soft+persistent",
(1, 0, 1): "soft+avg_error",
(1, 1, 1): "full",
}
# %%
result_biowta_mods = {}
for key in tqdm(biowta_configurations, desc="biowta cfg"):
result_biowta_mods[key] = hyper_score_ar(
BioWTARegressor,
dataset,
metric,
n_models=n_models,
n_features=n_features,
progress=tqdm,
monitor=["r", "weights_", "prediction_"],
**biowta_configurations[key],
)
crt_scores = result_biowta_mods[key][1].trial_scores
crt_median = np.median(crt_scores)
crt_quantile = np.quantile(crt_scores, 0.05)
crt_good = np.mean(crt_scores > good_score)
print(
f"{''.join(str(_) for _ in key)}: median={crt_median:.4f}, "
f"5%={crt_quantile:.4f}, "
f"fraction>{int(100 * good_score)}%={crt_good:.4f}"
)
# %%
for key in tqdm(biowta_configurations, desc="biowta cfg, reconstruction progress"):
calculate_ar_identification_progress(result_biowta_mods[key][1].history, dataset)
# %% [markdown]
# Find some "good" indices in the dataset: one that obtains an accuracy score close to a chosen threshold for "good-enough" (which we set to 85%); and one that has a similar score but also has small reconstruction error for the weights.
# %%
result_biowta_chosen = result_biowta_mods[1, 1, 0]
crt_mask = (result_biowta_chosen[1].trial_scores > 0.98 * good_score) & (
result_biowta_chosen[1].trial_scores < 1.02 * good_score
)
crt_idxs = crt_mask.nonzero()[0]
crt_errors_norm = np.asarray(
[ | np.mean(_.weight_errors_normalized_[-1]) | numpy.mean |
'''
Created with love by Sigmoid
@Author - <NAME> - <EMAIL>
'''
import numpy as np
import pandas as pd
import random
import sys
from random import randrange
from .SMOTE import SMOTE
from sklearn.mixture import GaussianMixture
from .erorrs import NotBinaryData, NoSuchColumn
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
class SCUT:
def __init__(self,k: "int > 0" = 5, seed: float = 42, binary_columns : list = None) -> None:
'''
Setting up the algorithm
:param k: int, k>0, default = 5
Number of neighbours which will be considered when looking for simmilar data points
:param seed: intt, default = 42
seed for random
:param binary_columns: list, default = None
The list of columns that should have binary values after balancing.
'''
self.__k = k
if binary_columns is None:
self.__binarize = False
self.__binary_columns = None
else:
self.__binarize = True
self.__binary_columns = binary_columns
self.__seed = seed
np.random.seed(self.__seed)
random.seed(self.__seed)
def __to_binary(self) -> None:
'''
If the :param binary_columns: is set to True then the intermediate values in binary columns will be rounded.
'''
for column_name in self.__binary_columns:
serie = self.synthetic_df[column_name].values
threshold = (self.df[column_name].max() + self.df[column_name].min()) / 2
for i in range(len(serie)):
if serie[i] >= threshold:
serie[i] = self.df[column_name].max()
else:
serie[i] = self.df[column_name].min()
self.synthetic_df[column_name] = serie
def __infinity_check(self, matrix : 'np.array') -> 'np.array':
'''
This function replaces the infinity and -infinity values with the minimal and maximal float python values.
:param matrix: 'np.array'
The numpy array that was generated my the algorithm.
:return: 'np.array'
The numpy array with the infinity replaced values.
'''
matrix[matrix == -np.inf] = sys.float_info.min
matrix[matrix == np.inf] = sys.float_info.max
return matrix
def balance(self, df : pd.DataFrame, target : str):
'''
Reducing the dimensionality of the data
:param df: pandas DataFrame
Data Frame on which the algorithm is applied
:param y_column: string
The target name of the value that we have to predict
'''
#get unique values from target column
unique = df[target].unique()
if target not in df.columns:
raise NoSuchColumn(f"{target} isn't a column of passed data frame")
self.target= target
self.df = df.copy()
#training columns
self.X_columns = [column for column in self.df.columns if column != target]
class_samples = []
for clas in unique:
class_samples.append(self.df[self.df[self.target]==clas][self.X_columns].values)
classes_nr_samples = []
for clas in unique:
classes_nr_samples.append(len(self.df[self.df[self.target]==clas]))
#getting mean number of samples of all classes
mean = | np.mean(classes_nr_samples) | numpy.mean |
import jax.numpy as jnp
from jax import grad, vmap, hessian
from jax.config import config
config.update("jax_enable_x64", True)
# numpy
import numpy as onp
from numpy import random
import argparse
import logging
import datetime
from time import time
import os
# solving -grad(a*grad u) + alpha u^m = f
def get_parser():
parser = argparse.ArgumentParser(description='NonLinElliptic equation GP solver')
parser.add_argument("--freq_a", type=float, default = 1.0)
parser.add_argument("--alpha", type=float, default = 1.0)
parser.add_argument("--m", type = int, default = 3)
parser.add_argument("--dim", type = int, default = 2)
parser.add_argument("--kernel", type=str, default="Matern_7half", choices=["gaussian","inv_quadratics","Matern_3half","Matern_5half","Matern_7half","Matern_9half","Matern_11half"])
parser.add_argument("--sigma-scale", type = float, default = 0.25)
# sigma = args.sigma-scale*sqrt(dim)
parser.add_argument("--nugget", type = float, default = 1e-10)
parser.add_argument("--GNsteps", type = int, default = 6)
parser.add_argument("--logroot", type=str, default='./logs/')
parser.add_argument("--randomseed", type=int, default=1)
parser.add_argument("--num_exp", type=int, default=1)
args = parser.parse_args()
return args
def get_GNkernel_train(x,y,wx0,wx1,wxg,wy0,wy1,wyg,d,sigma):
# wx0 * delta_x + wxg * nabla delta_x + wx1 * Delta delta_x
return wx0*wy0*kappa(x,y,d,sigma) + wx0*wy1*Delta_y_kappa(x,y,d,sigma) + wy0*wx1*Delta_x_kappa(x,y,d,sigma) + wx1*wy1*Delta_x_Delta_y_kappa(x,y,d,sigma) + wx0*D_wy_kappa(x,y,d,sigma,wyg) + wy0*D_wx_kappa(x,y,d,sigma,wxg) + wx1*Delta_x_D_wy_kappa(x,y,d,sigma,wyg) + wy1*D_wx_Delta_y_kappa(x,y,d,sigma,wxg) + D_wx_D_wy_kappa(x,y,d,sigma,wxg,wyg)
def get_GNkernel_train_boundary(x,y,wy0,wy1,wyg,d,sigma):
return wy0*kappa(x,y,d,sigma) + wy1*Delta_y_kappa(x,y,d,sigma) + D_wy_kappa(x,y,d,sigma,wyg)
def get_GNkernel_val_predict(x,y,wy0,wy1,wyg,d,sigma):
return wy0*kappa(x,y,d,sigma) + wy1*Delta_y_kappa(x,y,d,sigma) + D_wy_kappa(x,y,d,sigma,wyg)
def get_GNkernel_val_predict_Delta(x,y,wy0,wy1,wyg,d,sigma):
return wy0*Delta_x_kappa(x,y,d,sigma) + wy1*Delta_x_Delta_y_kappa(x,y,d,sigma) + Delta_x_D_wy_kappa(x,y,d,sigma,wyg)
def assembly_Theta(X_domain, X_boundary, w0, w1, wg, sigma):
# X_domain, dim: N_domain*d;
# w0 col vec: coefs of Diracs, dim: N_domain;
# w1 coefs of Laplacians, dim: N_domain
N_domain,d = onp.shape(X_domain)
N_boundary,_ = onp.shape(X_boundary)
Theta = onp.zeros((N_domain+N_boundary,N_domain+N_boundary))
XdXd0 = onp.reshape(onp.tile(X_domain,(1,N_domain)),(-1,d))
XdXd1 = onp.tile(X_domain,(N_domain,1))
XbXd0 = onp.reshape(onp.tile(X_boundary,(1,N_domain)),(-1,d))
XbXd1 = onp.tile(X_domain,(N_boundary,1))
XbXb0 = onp.reshape(onp.tile(X_boundary,(1,N_boundary)),(-1,d))
XbXb1 = onp.tile(X_boundary,(N_boundary,1))
arr_wx0 = onp.reshape(onp.tile(w0,(1,N_domain)),(-1,1))
arr_wx1 = onp.reshape(onp.tile(w1,(1,N_domain)),(-1,1))
arr_wxg = onp.reshape(onp.tile(wg,(1,N_domain)),(-1,d))
arr_wy0 = onp.tile(w0,(N_domain,1))
arr_wy1 = onp.tile(w1,(N_domain,1))
arr_wyg = onp.tile(wg,(N_domain,1))
arr_wy0_bd = onp.tile(w0,(N_boundary,1))
arr_wy1_bd = onp.tile(w1,(N_boundary,1))
arr_wyg_bd = onp.tile(wg,(N_boundary,1))
val = vmap(lambda x,y,wx0,wx1,wxg,wy0,wy1,wyg: get_GNkernel_train(x,y,wx0,wx1,wxg,wy0,wy1,wyg,d,sigma))(XdXd0,XdXd1,arr_wx0,arr_wx1,arr_wxg,arr_wy0,arr_wy1,arr_wyg)
Theta[:N_domain,:N_domain] = onp.reshape(val, (N_domain,N_domain))
val = vmap(lambda x,y,wy0,wy1,wyg: get_GNkernel_train_boundary(x,y,wy0,wy1,wyg,d,sigma))(XbXd0,XbXd1,arr_wy0_bd,arr_wy1_bd,arr_wyg_bd)
Theta[N_domain:,:N_domain] = onp.reshape(val, (N_boundary,N_domain))
Theta[:N_domain,N_domain:] = onp.transpose(onp.reshape(val, (N_boundary,N_domain)))
val = vmap(lambda x,y: kappa(x,y,d,sigma))(XbXb0, XbXb1)
Theta[N_domain:,N_domain:] = onp.reshape(val, (N_boundary, N_boundary))
return Theta
def assembly_Theta_value_predict(X_infer, X_domain, X_boundary, w0, w1, wg, sigma):
N_infer, d = onp.shape(X_infer)
N_domain, _ = onp.shape(X_domain)
N_boundary, _ = onp.shape(X_boundary)
Theta = onp.zeros((2*N_infer,N_domain+N_boundary))
XiXd0 = onp.reshape(onp.tile(X_infer,(1,N_domain)),(-1,d))
XiXd1 = onp.tile(X_domain,(N_infer,1))
XiXb0 = onp.reshape(onp.tile(X_infer,(1,N_boundary)),(-1,d))
XiXb1 = onp.tile(X_boundary,(N_infer,1))
arr_wy0 = onp.tile(w0,(N_infer,1))
arr_wy1 = onp.tile(w1,(N_infer,1))
arr_wyg = onp.tile(wg,(N_infer,1))
val = vmap(lambda x,y,wy0,wy1,wyg: get_GNkernel_val_predict(x,y,wy0,wy1,wyg,d,sigma))(XiXd0,XiXd1,arr_wy0,arr_wy1,arr_wyg)
Theta[:N_infer,:N_domain] = onp.reshape(val, (N_infer,N_domain))
val = vmap(lambda x,y: kappa(x,y,d,sigma))(XiXb0, XiXb1)
Theta[:N_infer,N_domain:] = onp.reshape(val, (N_infer,N_boundary))
val = vmap(lambda x,y,wy0,wy1,wyg: get_GNkernel_val_predict_Delta(x,y,wy0,wy1,wyg,d,sigma))(XiXd0,XiXd1,arr_wy0,arr_wy1,arr_wyg)
Theta[N_infer:,:N_domain] = onp.reshape(val, (N_infer,N_domain))
val = vmap(lambda x,y: Delta_x_kappa(x,y,d,sigma))(XiXb0, XiXb1)
Theta[N_infer:,N_domain:] = onp.reshape(val, (N_infer,N_boundary))
return Theta
def GPsolver(X_domain, X_boundary, X_test, sigma, nugget, sol_init, GN_step = 4):
N_domain, d = onp.shape(X_domain)
sol = sol_init
rhs_f = vmap(f)(X_domain)[:,onp.newaxis]
bdy_g = vmap(g)(X_boundary)[:,onp.newaxis]
wg = -vmap(grad_a)(X_domain) #size?
w1 = -vmap(a)(X_domain)[:,onp.newaxis]
time_begin = time()
for i in range(GN_step):
w0 = alpha*m*(sol**(m-1))
Theta_train = assembly_Theta(X_domain, X_boundary, w0, w1, wg, sigma)
Theta_test = assembly_Theta_value_predict(X_domain, X_domain, X_boundary, w0, w1, wg, sigma)
rhs = rhs_f + alpha*(m-1)*(sol**m)
rhs = onp.concatenate((rhs, bdy_g), axis = 0)
sol = Theta_test[:N_domain,:] @ (onp.linalg.solve(Theta_train + nugget*onp.diag(onp.diag(Theta_train)),rhs))
total_mins = (time() - time_begin) / 60
logging.info(f'[Timer] GP iteration {i+1}/{GN_step}, finished in {total_mins:.2f} minutes')
Theta_test = assembly_Theta_value_predict(X_test, X_domain, X_boundary, w0, w1, wg, sigma)
result_test = Theta_test @ (onp.linalg.solve(Theta_train + nugget*onp.diag(onp.diag(Theta_train)),rhs))
N_infer, d = onp.shape(X_test)
sol_test = result_test[:N_infer]
Delta_sol_test = result_test[N_infer:]
return sol, sol_test, Delta_sol_test
# def sample_points(N_domain, N_boundary, d, choice = 'random'):
# X_domain = onp.zeros((N_domain,d))
# X_boundary = onp.zeros((N_boundary,d))
# X_domain = onp.random.randn(N_domain,d) # N_domain*d
# X_domain /= onp.linalg.norm(X_domain, axis=1)[:,onp.newaxis] # the divisor is of N_domain*1
# random_radii = onp.random.rand(N_domain,1) ** (1/d)
# X_domain *= random_radii
# X_boundary = onp.random.randn(N_boundary,d)
# X_boundary /= onp.linalg.norm(X_boundary, axis=1)[:,onp.newaxis]
# return X_domain, X_boundary
def sample_points(N_domain, N_boundary, d, choice = 'random'):
x1l = 0.0
x1r = 1.0
x2l = 0.0
x2r = 1.0
#(x,y) in [x1l,x1r]*[x2l,x2r] default = [0,1]*[0,1]
# interior nodes
X_domain = onp.concatenate((random.uniform(x1l, x1r, (N_domain, 1)), random.uniform(x2l, x2r, (N_domain, 1))), axis = 1)
N_boundary_per_bd = int(N_boundary/4)
X_boundary = onp.zeros((N_boundary_per_bd*4, 2))
# bottom face
X_boundary[0:N_boundary_per_bd, 0] = random.uniform(x1l, x1r, N_boundary_per_bd)
X_boundary[0:N_boundary_per_bd, 1] = x2l
# right face
X_boundary[N_boundary_per_bd:2*N_boundary_per_bd, 0] = x1r
X_boundary[N_boundary_per_bd:2*N_boundary_per_bd, 1] = random.uniform(x2l, x2r, N_boundary_per_bd)
# top face
X_boundary[2*N_boundary_per_bd:3*N_boundary_per_bd, 0] = random.uniform(x1l, x1r, N_boundary_per_bd)
X_boundary[2*N_boundary_per_bd:3*N_boundary_per_bd, 1] = x2r
# left face
X_boundary[3*N_boundary_per_bd:4*N_boundary_per_bd, 1] = random.uniform(x2l, x2r, N_boundary_per_bd)
X_boundary[3*N_boundary_per_bd:4*N_boundary_per_bd, 0] = x1l
return X_domain, X_boundary
def logger(args, level = 'INFO'):
log_root = args.logroot + 'NonVarLinElliptic_rate'
log_name = 'dim' + str(args.dim) + '_kernel' + str(args.kernel)
logdir = os.path.join(log_root, log_name)
os.makedirs(logdir, exist_ok=True)
log_para = 's' + str(args.sigma_scale) + str(args.nugget).replace(".","") + '_fa' + str(args.freq_a) + '_cos' + '_nexp' + str(args.num_exp)
date = str(datetime.datetime.now())
log_base = date[date.find("-"):date.rfind(".")].replace("-", "").replace(":", "").replace(" ", "_")
filename = log_para + '_' + log_base + '.log'
logging.basicConfig(level=logging.__dict__[level],
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler(logdir+'/'+filename),
logging.StreamHandler()]
)
return logdir+'/'+filename
def set_random_seeds(args):
random_seed = args.randomseed
| random.seed(random_seed) | numpy.random.seed |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import sympy
import cirq
import pytest
import qsimcirq
class NoiseTrigger(cirq.SingleQubitGate):
"""A no-op gate with no _unitary_ method defined.
Appending this gate to a circuit will force it to use qtrajectory, but the
new circuit will otherwise behave identically to the original.
"""
# def _mixture_(self):
# return ((1.0, np.asarray([1, 0, 0, 1])),)
def _kraus_(self):
return ( | np.asarray([1, 0, 0, 1]) | numpy.asarray |
# OpenPharmacophore
from openpharmacophore._private_tools.exceptions import InvalidFeatureError, InvalidFileFormat
from openpharmacophore.io import (from_pharmer, from_moe, from_ligandscout,
to_ligandscout, to_moe, to_pharmagist, to_pharmer)
from openpharmacophore import PharmacophoricPoint
from openpharmacophore.algorithms.discretize import discretize
from openpharmacophore.pharmacophore.pharmacophoric_point import distance_bewteen_pharmacophoric_points
from openpharmacophore.pharmacophore.color_palettes import get_color_from_palette_for_feature
# Third party
import networkx as nx
import nglview as nv
import numpy as np
import pyunitwizard as puw
from rdkit import Geometry, RDLogger
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem.Pharm3D import Pharmacophore as rdkitPharmacophore
RDLogger.DisableLog('rdApp.*') # Disable rdkit warnings
# Standard library
import copy
import itertools
class Pharmacophore():
""" Native object for pharmacophores.
Openpharmacophore native class to store pharmacophoric models. A pharmacophore can be constructed
from a list of elements or from a file.
Parameters
----------
elements : list openpharmacophore.PharmacophoricPoint
List of pharmacophoric elements
Attributes
----------
elements : list openpharmacophore.PharmacophoricPoint
List of pharmacophoric elements
n_elements : int
Number of pharmacophoric elements
"""
def __init__(self, elements=[]):
self.elements = elements
self.n_elements = len(elements)
@classmethod
def from_file(cls, file_name, **kwargs):
"""
Class method to load a pharmacophore from a file.
Parameters
---------
file_name : str
Name of the file containing the pharmacophore
"""
fextension = file_name.split(".")[-1]
if fextension == "json":
points, _ , _ = from_pharmer(file_name, False)
elif fextension == "ph4":
points = from_moe(file_name)
elif fextension == "pml":
points = from_ligandscout(file_name)
else:
raise InvalidFileFormat(f"Invalid file format, \"{file_name}\" is not a supported file format")
return cls(elements=points)
def add_to_NGLView(self, view, palette='openpharmacophore'):
"""Add the pharmacophore representation to a view (NGLWidget) from NGLView.
Each pharmacophoric element is added to the NGLWidget as a new component.
Parameters
----------
view : nglview.NGLWidget
View as NGLView widget where the representation of the pharmacophore is going to be
added.
palette : str or dict
Color palette name or dictionary. (Default: 'openpharmacophore')
Note
----
Nothing is returned. The `view` object is modified in place.
"""
first_element_index = len(view._ngl_component_ids)
for ii, element in enumerate(self.elements):
# Add Spheres
center = puw.get_value(element.center, to_unit="angstroms").tolist()
radius = puw.get_value(element.radius, to_unit="angstroms")
feature_color = get_color_from_palette_for_feature(element.feature_name, color_palette=palette)
label = f"{element.feature_name}_{ii}"
view.shape.add_sphere(center, feature_color, radius, label)
# Add vectors
if element.has_direction:
label = f"{element.feature_name}_vector"
if element.feature_name == "hb acceptor":
end_arrow = puw.get_value(element.center - 2 * radius * puw.quantity(element.direction, "angstroms"), to_unit='angstroms').tolist()
view.shape.add_arrow(end_arrow, center, feature_color, 0.2, label)
else:
end_arrow = puw.get_value(element.center + 2 * radius * puw.quantity(element.direction, "angstroms"), to_unit='angstroms').tolist()
view.shape.add_arrow(center, end_arrow, feature_color, 0.2, label)
# Add opacity to spheres
last_element_index = len(view._ngl_component_ids)
for jj in range(first_element_index, last_element_index):
view.update_representation(component=jj, opacity=0.8)
def show(self, palette='openpharmacophore'):
""" Show the pharmacophore model.
Parameters
----------
palette : str or dict.
Color palette name or dictionary. (Default: 'openpharmacophore')
Returns
-------
nglview.NGLWidget
An nglview.NGLWidget is returned with the 'view' of the pharmacophoric model and the
molecular system used to elucidate it.
"""
view = nv.NGLWidget()
self.add_to_NGLView(view, palette=palette)
return view
def add_element(self, pharmacophoric_element):
"""Add a new element to the pharmacophore.
Parameters
----------
pharmacophoric_element : openpharmacophore.PharmacophricPoint
The pharmacophoric point that will be added.
Note
------
The pharmacophoric element given as input argument is added to the pharmacophore
as a new entry of the list `elements`.
"""
self.elements.append(pharmacophoric_element)
self.n_elements +=1
def remove_elements(self, element_indices):
""" Remove elements from the pharmacophore.
Parameters
----------
element_indices : int or list of int
Indices of the elements to be removed. Can be a list of integers if multiple elements will be
removed or a single integer to remove one element.
Note
-----
The pharmacophoric element given as input argument is removed from the pharmacophore.
"""
if isinstance(element_indices, int):
self.elements.pop(element_indices)
self.n_elements -=1
elif isinstance(element_indices, list):
new_elements = [element for i, element in enumerate(self.elements) if i not in element_indices]
self.elements = new_elements
self.n_elements = len(self.elements)
def remove_feature(self, feat_type):
""" Remove an especific feature type from the pharmacophore elements list
Parameters
----------
feat_type : str
Name or type of the feature to be removed.
Note
-----
The pharmacophoric elements of the feature type given as input argument
are removed from the pharmacophore.
"""
feats = PharmacophoricPoint.get_valid_features()
if feat_type not in feats:
raise InvalidFeatureError(f"Cannot remove feature. \"{feat_type}\" is not a valid feature type")
temp_elements = [element for element in self.elements if element.feature_name != feat_type]
if len(temp_elements) == self.n_elements: # No element was removed
raise InvalidFeatureError(f"Cannot remove feature. The pharmacophore does not contain any {feat_type}")
self.elements = temp_elements
self.n_elements = len(self.elements)
def _reset(self):
"""Private method to reset all attributes to default values.
Note
----
Nothing is returned. All attributes are set to default values.
"""
self.elements.clear()
self.n_elements = 0
self.extractor = None
self.molecular_system = None
def to_ligandscout(self, file_name):
"""Method to export the pharmacophore to the ligandscout compatible format.
Parameters
----------
file_name : str
Name of file to be written with the ligandscout format of the pharmacophore.
Note
----
Nothing is returned. A new file is written.
"""
return to_ligandscout(self, file_name=file_name)
def to_pharmer(self, file_name):
"""Method to export the pharmacophore to the pharmer compatible format.
Parameters
----------
file_name : str
Name of file to be written with the pharmer format of the pharmacophore.
Note
----
Nothing is returned. A new file is written.
"""
return to_pharmer(self, file_name=file_name)
def to_pharmagist(self, file_name):
"""Method to export the pharmacophore to the pharmagist compatible format.
Parameters
----------
file_name : str
Name of file to be written with the pharmagist format of the pharmacophore.
Note
----
Nothing is returned. A new file is written.
"""
return to_pharmagist(self, file_name=file_name)
def to_moe(self, file_name):
"""Method to export the pharmacophore to the MOE compatible format.
Parameters
----------
file_name: str
Name of file to be written with the MOE format of the pharmacophore.
Note
----
Nothing is returned. A new file is written.
"""
return to_moe(self, file_name=file_name)
def to_rdkit(self):
""" Returns an rdkit pharmacophore with the elements from the original pharmacophore.
rdkit pharmacophores do not store the elements radii, so they are returned as well.
Returns
-------
rdkit_pharmacophore : rdkit.Chem.Pharm3D.Pharmacophore
The rdkit pharmacophore.
radii : list of float
List with the radius in angstroms of each pharmacophoric point.
"""
rdkit_element_name = { # dictionary to map openpharmacophore feature names to rdkit feature names
"aromatic ring": "Aromatic",
"hydrophobicity": "Hydrophobe",
"hb acceptor": "Acceptor",
"hb donor": "Donor",
"positive charge": "PosIonizable",
"negative charge": "NegIonizable",
}
points = []
radii = []
for element in self.elements:
feat_name = rdkit_element_name[element.feature_name]
center = puw.get_value(element.center, to_unit="angstroms")
center = Geometry.Point3D(center[0], center[1], center[2])
points.append(ChemicalFeatures.FreeChemicalFeature(
feat_name,
center
))
radius = puw.get_value(element.radius, to_unit="angstroms")
radii.append(radius)
rdkit_pharmacophore = rdkitPharmacophore.Pharmacophore(points)
return rdkit_pharmacophore, radii
def to_nx_graph(self, dmin=2.0, dmax=13.0, bin_size=1.0):
""" Obtain a networkx graph representation of the pharmacophore.
The pharmacophore graph is a graph whose nodes are pharmacophoric features and
its edges are the euclidean distance between those features. The distance is
discretized into bins so more molecules can match the pharmacophore.
Parameters
----------
dmin : float
The minimun distance in angstroms from which two pharmacophoric points are considered different.
dmax : flaot
The maximum distance in angstroms between pharmacohoric points.
bin_size : float
The size of the bins that will be used to bin the distances.
Returns
-------
pharmacophore_graph : networkx.Graph
The pharmacophore graph
"""
pharmacophore_graph = nx.Graph()
bins = | np.arange(dmin, dmax, bin_size) | numpy.arange |
import numpy as np
def get_angle_acc(acc):
th_acc = np.arctan2(-acc[0], np.sqrt(acc[1] * acc[1] + acc[2] * acc[2]))
ps_acc = np.arctan2(acc[1], acc[2])
y = np.array([th_acc, ps_acc])
return y
def get_Kalamgain(P, c, r):
CPC = np.dot(c, np.dot(P, c)) + r
return np.dot(P, np.dot(c, np.linalg.inv(CPC)))
def get_preEstimation2(x, gyro, Ts, Tri):
Q = | np.array([[0, Tri[1, 0], -Tri[1, 1]], [1, Tri[1, 1] * Tri[0, 2], Tri[1, 0] * Tri[0, 2]]]) | numpy.array |
import torch
import torch.nn as nn
import numpy as np
from .actnorm import ActNorm
from .invertible_conv import InvertibleConvolution
from .coupling import CouplingLayer
# device
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
class Flow(nn.Module):
def __init__(self, channels, coupling, device, coupling_bias, nn_init_last_zeros=False):
super(Flow, self).__init__()
self.actnorm = ActNorm(channels, device)
self.coupling = CouplingLayer(channels, coupling, coupling_bias, device, nn_init_last_zeros)
self.invconv = InvertibleConvolution(channels, device)
self.to(device)
def forward(self, x, logdet=None, reverse=False):
if not reverse:
x, logdet, actnormloss = self.actnorm(x, logdet=logdet, reverse=reverse)
assert not np.isnan(x.mean().item()), "nan after actnorm in forward"
assert not np.isinf(x.mean().item()), "inf after actnorm in forward"
assert not np.isnan(logdet.sum().item()), "nan in log after actnorm in forward"
assert not np.isinf(logdet.sum().item()), "inf in log after actnorm in forward"
x, logdet = self.invconv(x, logdet=logdet, reverse=reverse)
assert not np.isnan(x.mean().item()), "nan after invconv in forward"
assert not np.isinf(x.mean().item()), "inf after invconv in forward"
assert not np.isnan(logdet.sum().item()), "nan in log after invconv"
assert not np.isinf(logdet.sum().item()), "inf in log after invconv"
x, logdet = self.coupling(x, logdet=logdet, reverse=reverse)
assert not np.isnan(x.mean().item()), "nan after coupling in forward"
assert not np.isinf(x.mean().item()), "inf after coupling in forward"
assert not np.isnan(logdet.sum().item()), "nan in log after coupling"
assert not np.isinf(logdet.sum().item()), "inf in log after coupling"
return x, logdet, actnormloss
if reverse:
x = self.coupling(x, reverse=reverse)
assert not np.isnan(x.mean().item()), "nan after coupling in reverse"
assert not np.isinf(x.mean().item()), "inf after coupling in reverse"
x = self.invconv(x, reverse=reverse)
assert not np.isnan(x.mean().item()), "nan after invconv in reverse"
assert not np.isinf(x.mean().item()), "inf after invconv in reverse"
x = self.actnorm(x, reverse=reverse)
assert not np.isnan(x.mean().item()), "nan after actnorm in reverse"
assert not np.isinf(x.mean().item()), "inf after actnorm in reverse"
return x
if __name__ == "__main__":
size = (16, 4, 32, 32)
flow = Flow(channels=4, coupling="affine", device=device, nn_init_last_zeros=False)
opt = torch.optim.Adam(flow.parameters(), lr=0.01)
for i in range(5000):
opt.zero_grad()
x = torch.tensor( | np.random.normal(1, 1, size) | numpy.random.normal |
from subprocess import call
import os, time
import shutil
import io
import base64
from IPython.display import HTML
import numpy as np
from PIL import ImageDraw, Image, ImageFont
from tempfile import NamedTemporaryFile
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation
import matplotlib
import math
import copy
import itertools
import tensorflow as tf
import subprocess
FLAGS = tf.app.flags.FLAGS
import cv2
#from pylab import *
import pylab
from matplotlib.patches import Wedge
from scipy.ndimage.filters import gaussian_filter
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredDrawingArea
from matplotlib.patches import FancyArrowPatch
def images2video_highqual(frame_rate,
name="temp_name", dir_name="temp_dir"):
# make dir if not exists
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
pwd = os.getcwd()
os.chdir(dir_name)
print("converting to video")
video_name = name+'.mp4'
cmd = "ffmpeg -y -f image2 -r " + str(frame_rate) + " -pattern_type glob -i '*.png' -crf 5 -preset veryslow " + \
"-threads 16 -vcodec libx264 -pix_fmt yuv420p " + video_name
call(cmd, shell=True)
call("rm *.png", shell=True)
os.chdir(pwd)
return os.path.join(dir_name, video_name)
def images2video(images, frame_rate,
name="temp_name", dir_name="temp_dir", highquality=True):
images = np.uint8(images)
shape = images.shape
assert (len(shape) == 4)
assert (shape[3] == 3 or shape[3] == 1)
# make dir if not exists
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
pwd = os.getcwd()
os.chdir(dir_name)
# write out images
print("writing images")
for i in range(shape[0]):
j = Image.fromarray(images[i, :, :, :])
j.save("%05d.jpeg" % i, "jpeg", quality=93)
print("converting to video")
video_name = name+'.mp4'
quality_str = '16' if highquality else '28'
cmd = "ffmpeg -y -f image2 -r " + str(frame_rate) + " -pattern_type glob -i '*.jpeg' -crf "+quality_str+" -preset veryfast " + \
"-threads 16 -vcodec libx264 -pix_fmt yuv420p " + video_name
call(cmd, shell=True)
call("rm *.jpeg", shell=True)
os.chdir(pwd)
return os.path.join(dir_name, video_name)
def play_video(path):
video = io.open(path, 'r+b').read()
encoded = base64.b64encode(video)
return HTML(data='''<video alt="test" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii')))
def visualize_images(images, frame_rate,
name="temp_name", dir_name="temp_dir",delete_temp=True):
path = images2video(images, frame_rate, name, dir_name)
out = play_video(path)
if delete_temp:
assert not("*" in dir_name)
shutil.rmtree(dir_name)
return out
def write_text_on_image(image, string,
lines=[],
fontsize=30,
lines_color=[]):
shape = image.shape
assert (len(shape) == 3)
assert (shape[-1] == 3 or shape[-1] == 1)
image = np.uint8(image)
j = Image.fromarray(image)
draw = ImageDraw.Draw(j)
# font = ImageFont.load_default().font
#font = ImageFont.truetype("/usr/share/fonts/truetype/inconsolata/Inconsolata.otf", fontsize)
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", fontsize)
if isinstance(string, list):
for s in string:
draw.text(s[0], s[1], s[2], font=font)
else:
draw.text((0, 0), string, (255, 0, 0), font=font)
for line in lines:
draw.line(line, fill=128, width=1)
for line in lines_color:
draw.line(line[0], fill=line[1], width=1)
return np.array(j)
def egomotion2animation(ego):
# ego is a egomotion matrix, with nframes * previous frames * 3
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
line = ax.plot([], [], '.', zs=[])
line = line[0]
def get_range(ego, axis):
data = ego[:, :, axis]
data = np.reshape(data, [-1])
return [np.min(data), np.max(data)]
ax.axis(get_range(ego, 0) + get_range(ego, 1))
zrange = get_range(ego, 2)
ax.set_zlim(zrange[0], zrange[1])
# initialization function: plot the background of each frame
def init():
line.set_data([], [])
return line,
# animation function. This is called sequentially
def animate(i):
line.set_data(ego[i, :, 0], ego[i, :, 1])
line.set_3d_properties(ego[i, :, 2])
return line,
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=ego.shape[0], blit=True)
plt.close(anim._fig)
return anim
def animation2HTML(anim, frame_rate):
print("animaiton to video...")
if not hasattr(anim, '_encoded_video'):
with NamedTemporaryFile(suffix='.mp4') as f:
anim.save(f.name, fps=frame_rate,
extra_args=['-vcodec', 'libx264',
'-pix_fmt', 'yuv420p',
'-crf', '28',
'-preset', 'veryfast'])
video = io.open(f.name, 'r+b').read()
encoded = base64.b64encode(video)
return HTML(data='''<video alt="test" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii')))
def visualize_egomotion(ego, frame_rate):
anim = egomotion2animation(ego)
return animation2HTML(anim, frame_rate)
def vis_reader(tout, frame_rate, j=0):
decoded, isvalid, ego, name, isstop = tout
images = decoded[j, :, :, :, :]
images_txt = np.zeros_like(images)
this_stop = isstop[j]
this_valid = isvalid[j]
for i in range(images.shape[0]):
stop_str = {1: "STOP",
0: "GO",
-1: "UNKNOWN"}[this_stop[i]]
valid_str = {0: "Egomotion=Invalid",
1: "Egomotion=Valid"}[this_valid[i]]
showing_str = stop_str + "\n" + valid_str
# showing_str = stop_str
images_txt[i, :, :, :] = write_text_on_image(images[i, :, :, :], showing_str)
print("showing visualization for video %s" % name[0])
return visualize_images(images_txt, frame_rate)
def move_to_line(move, h, w, multiplier = 10):
m = copy.deepcopy(move)
m[1] *= multiplier
m = [m[1] * math.sin(m[0]), m[1]*math.cos(m[0])]
return [w / 2, h, w/2+m[0], h-m[1]]
def draw_bar_on_image(image, bar_left_top, fraction, fill=(0,0,0,128), height=20, length=120):
image = np.uint8(image)
j = Image.fromarray(image)
draw = ImageDraw.Draw(j)
l = bar_left_top
draw.rectangle([l, (l[0]+int(length*fraction), l[1]+height)], fill=fill)
return np.array(j)
def vis_reader_stop_go(tout, prediction,frame_rate, j=0, save_visualize = False, dir_name="temp", provider="nexar_large_speed"):
#out_of_date, won't do stop go any more
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
turn = turn[j, :, :]
locs = locs[j, :, :]
images = decoded[j, :, :, :, :]
images_txt = np.zeros_like(images)
stop = isstop[j, :]
speed = speed[j, :, :]
for i in range(images.shape[0]):
showing_str = "STOP" if prediction[i] == 1 else "GO!"
showing_str += "\n" + str(np.linalg.norm(speed[i, :]))
showing_str += "\n" + "GT: STOP" if stop[i] == 1 else "\nGT: GO!"
images_txt[i, :, :, :] = write_text_on_image(images[i, :, :, :],
showing_str)
print("showing visualization for video %s" % name[0])
#vis_speed(speed, frame_rate)
if save_visualize:
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
return visualize_images(images_txt, frame_rate,
name=short_name,
dir_name=dir_name,
delete_temp=False)
else:
return visualize_images(images_txt, frame_rate)
def vis_discrete(tout, predict, frame_rate,
j=0, save_visualize=False, dir_name="temp"):
import data_providers.nexar_large_speed as provider
int2str = provider.MyDataset.turn_int2str
# city_data and only_seg are mutually exclusive, actually one flag is enough
if FLAGS.city_data == 1:
decoded = tout[0]
speed = tout[1]
name = tout[2]
isstop = tout[5]
turn = tout[6]
locs = tout[7]
elif FLAGS.only_seg == 1:
decoded = tout[0]
speed = tout[1]
name = tout[2]
isstop = tout[6]
turn = tout[7]
locs = tout[8]
else:
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
_, hi, wi, _ = images.shape
locs = locs[j, :, :]
turn = turn[j, :, :]
for i in range(images.shape[0]):
# the ground truth course and speed
showing_str = "speed: %.1f m/s \ncourse: %.2f degree/s" % \
(locs[i, 1], locs[i, 0]/math.pi*180)
for k in range(4):
showing_str += "\n"+int2str[k]
gtline = move_to_line(locs[i,:], hi, wi)
FontHeight=18
FontWidth =8
for k in range(4):
images[i, :, :, :] = draw_bar_on_image(images[i,:,:,:],
(FontWidth*14, FontHeight*(2+k)),
fraction = turn[i, k],
fill=(255, 0, 0, 128),
height=FontHeight * 2 // 3,
length=FontWidth * 4)
images[i, :, :, :] = draw_bar_on_image(images[i, :, :, :],
(FontWidth * 20, FontHeight * (2 + k)),
fraction=predict[i, k],
fill=(0, 255, 0, 128),
height=FontHeight * 2 // 3,
length=FontWidth * 4)
images[i, :, :, :] = write_text_on_image(images[i, :, :, :],
showing_str,
[gtline],
fontsize=15)
print("showing visualization for video %s" % name[j])
if save_visualize:
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
for i in range(10):
this_name = short_name + "_" + str(i)
if not os.path.isfile(os.path.join(dir_name,this_name+'.mp4')):
break
return visualize_images(images, frame_rate,
name=this_name,
dir_name=dir_name,
delete_temp=False)
else:
return visualize_images(images, frame_rate)
def vis_discrete_simplified(tout, predict, frame_rate,
j=0, save_visualize=False, dir_name="temp"):
import data_providers.nexar_large_speed as provider
int2str = provider.MyDataset.turn_int2str
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
_, hi, wi, _ = images.shape
locs = locs[j, :, :]
turn = turn[j, :, :]
for i in range(images.shape[0]):
# the ground truth course and speed
showing_str = ""
for k in range(4):
showing_str += int2str[k] + "\n"
FontHeight = 18
FontWidth = 8
for k in range(4):
images[i, :, :, :] = draw_bar_on_image(images[i, :, :, :],
(FontWidth * 14, FontHeight * k),
fraction=turn[i, k],
fill=(255, 0, 0, 128),
height=FontHeight * 2 // 3,
length=FontWidth * 4)
images[i, :, :, :] = draw_bar_on_image(images[i, :, :, :],
(FontWidth * 20, FontHeight * k),
fraction=predict[i, k],
fill=(0, 255, 0, 128),
height=FontHeight * 2 // 3,
length=FontWidth * 4)
images[i, :, :, :] = write_text_on_image(images[i, :, :, :],
showing_str,
fontsize=15)
print("showing visualization for video %s" % name[j])
if save_visualize:
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
for i in range(10):
this_name = short_name + "_" + str(i)
if not os.path.isfile(os.path.join(dir_name, this_name + '.mp4')):
break
return visualize_images(images, frame_rate,
name=this_name,
dir_name=dir_name,
delete_temp=False)
else:
return visualize_images(images, frame_rate)
def generate_meshlist(arange1, arange2):
return np.dstack(np.meshgrid(arange1, arange2, indexing='ij')).reshape((-1,2))
def draw_sector(image,
predict,
car_stop_model,
course_delta = 0.5 / 180 * math.pi,
speed_delta=0.3,
pdf_multiplier=255,
speed_multiplier = 5,
h=360, w=640,
max_speed=30,
uniform_speed=False,
consistent_vis=(False, 1e-3, 1e2),
has_alpha_channel=False):
course_samples = np.arange(-math.pi / 2-course_delta,
math.pi / 2+course_delta,
course_delta)
speed_samples = np.arange(0, max_speed+speed_delta, speed_delta)
total_pdf = car_stop_model.continous_pdf([predict],
generate_meshlist(course_samples, speed_samples),
"multi_querys")
total_pdf = np.reshape(total_pdf, (len(course_samples), len(speed_samples)))
if uniform_speed:
total_pdf = total_pdf / np.sum(total_pdf, axis=1, keepdims=True)
speed_scaled = max_speed * speed_multiplier
# potential xy positions to be filled
xy = generate_meshlist(np.arange(w / 2 - speed_scaled, w / 2 + speed_scaled),
np.arange(h - speed_scaled, h))
# filter out invalid speed
v=np.stack((xy[:,0]-w/2, h-xy[:,1]), axis=1)
speed_norm = np.sqrt(v[:,0]**2 + v[:,1]**2) *(1.0/speed_multiplier)
valid_speed = np.less(speed_norm, max_speed)
xy = xy[valid_speed, :]
speed_norm=speed_norm[valid_speed]
v=v[valid_speed]
course_norm = np.arctan(1.0*v[:, 0] / v[:, 1])
# search the course and speed
icourse = np.searchsorted(course_samples, course_norm)
ispeed = np.searchsorted(speed_samples, speed_norm)
green_portion = 1
total = total_pdf[icourse, ispeed]
if consistent_vis[0] == False:
total_max = np.amax(total)
total = total / total_max * 255*green_portion
else:
# consistent visualization between methods
MIN = consistent_vis[1]
MAX = consistent_vis[2]
total = np.maximum(MIN, total)
total = np.minimum(MAX, total)
#total = np.log(total) # map to log(MIN) to log(MAX)
#total = (total -np.log(MIN)) / (np.log(MAX) - np.log(MIN)) * 255
total = (total - MIN) / (MAX - MIN)
total = np.sqrt(total)
total = total * 255
# assign to image
image[xy[:, 1], xy[:, 0], :] *= (1-green_portion)
image[xy[:, 1], xy[:, 0], 1] += total
if has_alpha_channel:
image[xy[:, 1], xy[:, 0], 3] = 255
return image
def vis_continuous(tout, predict, frame_rate, car_stop_model,
j=0, save_visualize=False, dir_name="temp", return_first=False, **kwargs):
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
images = images.astype('float64')
_, hi, wi, _ = images.shape
locs = locs[j, :, :]
for i in range(images.shape[0]):
# the ground truth course and speed
showing_str = "speed: %.1f m/s \ncourse: %.2f degree/s" % \
(locs[i, 1], locs[i, 0] / math.pi * 180)
gtline = move_to_line(locs[i, :], hi, wi, 10)
images[i, :, :, :] = draw_sector(images[i, :, :, :],
predict[i:(i+1), :],
car_stop_model,
course_delta=0.3 / 180 * math.pi,
speed_delta=0.3,
pdf_multiplier=255*10,
speed_multiplier=wi/30/3,
h=hi, w=wi,
consistent_vis=(True, 1e-5, 0.3))
# get the MAP prediction
map = car_stop_model.continous_MAP([predict[i:(i+1), :]])
mapline = move_to_line(map.ravel(), hi, wi, 10)
# swap the shorter line to the latter, avoid overwriting
lines_v = [(gtline, (255,0,0)), (mapline, (0, 0, 255))]
if locs[i, 1] < map.ravel()[1]:
lines_v = [lines_v[1], lines_v[0]]
images[i, :, :, :] = write_text_on_image(images[i, :, :, :],
showing_str,
lines_color=lines_v,
fontsize=15)
print("showing visualization for video %s" % name[j])
if return_first:
return images[0, :, :, :].astype(np.uint8)
if save_visualize:
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
return visualize_images(images, frame_rate,
name=short_name,
dir_name=dir_name,
delete_temp=False)
else:
return visualize_images(images, frame_rate)
def vis_continuous_simplified(tout, predict, frame_rate, car_stop_model,
j=0, save_visualize=False, dir_name="temp", vis_radius=10):
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
images = images.astype('float64')
_, hi, wi, _ = images.shape
locs = locs[j, :, :]
locs = copy.deepcopy(locs)
for i in range(images.shape[0]):
# the ground truth course and speed
locs[i, 1] = 10.0
# get the MAP prediction
map = car_stop_model.continous_MAP([predict[i:(i+1), :]])
map = map.ravel()
map[1] = 10.0
mapline = move_to_line(map, hi, wi, 10)
# get map2
map2 = car_stop_model.continous_MAP([predict[i:(i + 1), :]], return_second_best=True)
map2 = map2.ravel()
map2[1] = 10.0
mapline2 = move_to_line(map2, hi, wi, 10)
showing_str = [
[(0, 0), "driver's angular speed: %.2f degree/s" % (locs[i, 0] / math.pi * 180), (255, 0, 0)],
[(0, 20), "predicted angular speed: %.2f degree/s" % (map[0] / math.pi * 180), (0, 0, 255)]]
# disable the small str on top first
showing_str = ""
showing_str = "speed: %.1f m/s \ncourse: %.2f degree/s" % \
(locs[i, 1], locs[i, 0] / math.pi * 180)
gtline = move_to_line(locs[i, :], hi, wi, 10)
if FLAGS.is_MKZ_dataset:
# might be problematic since we enable the normalization
higher_bound = 0.3
else:
higher_bound = 3.0
images[i, :, :, :] = draw_sector(images[i, :, :, :],
predict[i:(i+1), :],
car_stop_model,
course_delta=0.1 / 180 * math.pi,
speed_delta=0.1,
pdf_multiplier=255*10,
speed_multiplier=int(wi/30/3),
h=hi, w=wi,
uniform_speed=True,
consistent_vis=(True, 1e-5, higher_bound))
# disable the MAP line first, since many times not the MAP line is considered
'''
# swap the shorter line to the latter, avoid overwriting
lines_v = [(gtline, (255,0,0)), (mapline, (0, 0, 255))]
if locs[i, 1] < map.ravel()[1]:
lines_v = [lines_v[1], lines_v[0]]
'''
lines_v = [(gtline, (255,0,0)), (mapline, (0,0,255)), (mapline2, (0, 255, 0))]
images[i, :, :, :] = write_text_on_image(images[i, :, :, :],
showing_str,
lines_color=lines_v,
fontsize=24)
print("showing visualization for video %s" % name[j])
if save_visualize:
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
return visualize_images(images, frame_rate,
name=short_name,
dir_name=dir_name,
delete_temp=False)
else:
return visualize_images(images, frame_rate)
# some visualization functions for the speed
def visLoc(locs, label="NotSet"):
axis = lambda i: [loc[i] for loc in locs]
import matplotlib.ticker as ticker
fig, ax = plt.subplots()
#plt.grid(True)
ax.plot(axis(0), axis(1), 'g^', ms=2)
ylim = ax.get_ylim()
xlim = ax.get_xlim()
ax.set_xlim(min(xlim[0],ylim[0]) ,max(xlim[1],ylim[1]))
ax.set_ylim(min(xlim[0],ylim[0]) ,max(xlim[1],ylim[1]))
plt.title("Moving paths from " + label)
plt.xlabel("West -- East")
plt.ylabel("South -- North")
plt.show()
def integral(speed, time0):
out = np.zeros_like(speed)
l = speed.shape[0]
for i in range(l):
s = speed[i, :]
if i > 0:
out[i, :] = out[i - 1, :] + s * time0
return out
def vis_speed(speed, hz):
visLoc(integral(speed, 1.0 / hz), "speed and course")
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
# x has shape: #instances * #classes
maxes = np.max(x, axis=1)
e_x = np.exp(x - maxes[:, None])
sums = np.sum(e_x, axis=1)
return e_x / sums[:, None]
def read_video_file(video_path, HEIGHT, WIDTH):
# take a video's path and return its decoded contents
cmnd = ['ffmpeg',
'-i', video_path,
'-f', 'image2pipe',
'-loglevel', 'panic',
'-pix_fmt', 'rgb24',
'-vcodec', 'rawvideo', '-']
pipe = subprocess.Popen(cmnd, stdout=subprocess.PIPE, bufsize=10 ** 7)
pout, perr = pipe.communicate()
image_buff = np.fromstring(pout, dtype='uint8')
if image_buff.size % (HEIGHT*WIDTH):
print("Height and Width are potentially not correct")
return None
image_buff = image_buff.reshape((-1, HEIGHT, WIDTH, 3))
return image_buff
def vis_discrete_colormap_antialias(tout, predict, frame_rate, j=0, save_visualize=False, dir_name="temp", string_type='image'):
if FLAGS.only_seg:
decoded = tout[0]
speed = tout[1]
name = tout[2]
isstop = tout[6]
turn = tout[7]
locs = tout[8]
else:
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
_, hi, wi, _ = images.shape
turn = turn[j, :, :]
def get_color(prob):
cm = pylab.get_cmap('viridis') # inferno
color = cm(prob) # color will now be an RGBA tuple
r = color[0] * 255
g = color[1] * 255
b = color[2] * 255
return r, g, b
def clamp(x):
x = float(x)
return max(0, min(x, 1))
def add_to_ada(ada, pos_x, pos_y, radius, angle_s, angle_e, ring_width, color_code, edge_color, alpha_value):
ada.drawing_area.add_artist(
Wedge((pos_x, pos_y), radius, angle_s, angle_e, width=ring_width # , color=color_code#'#DAF7A6'
, alpha=alpha_value, antialiased=True, ec=edge_color, fc=color_code))
def draw_cake(ada, pos_x, pos_y, radius, angle_s, angle_diff, ring_width, color_code, edge_color, alpha_value,
share, shift=45):
angle_s = angle_s + shift
for i in range(share):
if (angle_s + (i + 1) * (angle_diff) / share) == 360:
angle_end = 360
else:
angle_end = angle_s + (i + 1) * (angle_diff) / share
#print(i,'_______________________________________')
add_to_ada(ada, pos_x, pos_y, radius,
angle_s + i * (angle_diff) / share, angle_end,
ring_width, color_code=color_code, edge_color=edge_color, alpha_value=alpha_value[i])
def draw_pile_cake(ada, pos_x, pos_y, radius, angle_s, angle_diff, ring_width, color_code, edge_color, alpha_value,
share, x_frac, y_frac, split, fontsize=24, shift=45):
# draw the black one
draw_cake(ada, pos_x=pos_x, pos_y=pos_y, radius=radius, angle_s=angle_s, angle_diff=360, ring_width=None,
color_code='k', edge_color=None, alpha_value=[0.6], share=1)
# draw the green one
draw_cake(ada, pos_x=pos_x, pos_y=pos_y, radius=radius, angle_s=angle_s, angle_diff=360, ring_width=ring_width,
color_code=color_code, edge_color='#FFFFFF', alpha_value=alpha_value, share=4)
# draw the white edge
draw_cake(ada, pos_x=pos_x, pos_y=pos_y, radius=radius, angle_s=angle_s, angle_diff=360, ring_width=ring_width,
color_code='none', edge_color='#FFFFFF', alpha_value=[1, 1, 1, 1], share=4)
ada.da.add_artist(
ax.annotate(split, xy=(x_frac, y_frac), xycoords="axes fraction", fontsize=fontsize, color='w'))
def draw_cake_type(ada, string_type, action_mean, predict_mean):
if string_type == 'video':
draw_pile_cake(ada, pos_x=210, pos_y=70, radius=60, angle_s=0, angle_diff=360, ring_width=30,
color_code='#00FF00', edge_color=None, alpha_value=predict_mean, share=1,
x_frac=0.513, y_frac=0.895, split='P')
draw_pile_cake(ada, pos_x=80, pos_y=70, radius=60, angle_s=0, angle_diff=360, ring_width=30,
color_code='#00FF00', edge_color=None, alpha_value=action_mean, share=1,
x_frac=0.185, y_frac=0.895, split='G')
elif string_type == 'image':
draw_pile_cake(ada, pos_x=240, pos_y=70, radius=70, angle_s=0, angle_diff=360, ring_width=40,
color_code='#00FF00', edge_color=None, alpha_value=predict_mean, share=1,
x_frac=0.580, y_frac=0.89, split='P', fontsize=32)
draw_pile_cake(ada, pos_x=80, pos_y=70, radius=70, angle_s=0, angle_diff=360, ring_width=40,
color_code='#00FF00', edge_color=None, alpha_value=action_mean, share=1,
x_frac=0.18, y_frac=0.89, split='G', fontsize=32)
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
for i in range(images.shape[0]):
action_mean = [clamp(turn[i, 0]+0.05), clamp(turn[i, 2]+0.05),
clamp(turn[i, 1]+0.1), clamp(turn[i, 3]+0.05)]
predict_mean = [clamp(predict[i, 0]+0.05), clamp(predict[i, 2]+0.05),
clamp(predict[i, 1]+0.05), clamp(predict[i, 3]+0.05)]
fig = plt.figure(figsize=(16, 12))
ax_original = plt.gca()
ax_original.set_axis_off()
ax_original.get_xaxis().set_visible(False)
ax_original.get_yaxis().set_visible(False)
plt.imshow(images[i, :, :, :])
plt.axis('off')
ax = fig.add_subplot(121, projection='polar')
ax_2 = fig.add_subplot(122, projection='polar')
ada = AnchoredDrawingArea(200, 100, 0, 0, loc=2, pad=0., frameon=False)
draw_cake_type(ada, string_type, action_mean, predict_mean)
ax.add_artist(ada)
ax.set_axis_off()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax_2.set_axis_off()
ax_2.get_xaxis().set_visible(False)
ax_2.get_yaxis().set_visible(False)
if not os.path.exists(os.path.join(dir_name,'viz')):
os.mkdir(os.path.join(dir_name,'viz'))
if not os.path.exists(os.path.join(dir_name,'viz', short_name+string_type)):
os.mkdir(os.path.join(dir_name, 'viz', short_name+string_type))
fig.savefig(os.path.join(dir_name, 'viz', short_name+string_type,'{0:04}.png'.format(i)),
bbox_inches='tight', pad_inches = -0.04, Transparent=True, dpi=100)
print(short_name,' ', i, 'Done!')
plt.show()
plt.close()
images2video_highqual(frame_rate = 3,
name=short_name, dir_name=os.path.join(dir_name, 'viz', short_name+string_type))
def vis_continuous_colormap_antialias(tout, predict, frame_rate, car_stop_model,
j=0, save_visualize=False, dir_name="temp", vis_radius=10):
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
#images = images.astype('float64')
_, hi, wi, _ = images.shape
locs = locs[j, :, :]
def plot_greens(bin_ends, values, image_width, image_height, radius, driver_action):
# bins are: [0, bin_ends[0]], [bin_ends[0], bin_ends[1]] ...
# and the corresponding values to display are: values[0], values[1]
# the final results are added to ada
ada = AnchoredDrawingArea(radius * 2, radius, 0, 0, loc=10, pad=0., frameon=False)
def add_ada_custom(angle_s, angle_e, value, color):
add_to_ada(ada, radius, -(image_height / 2 - radius / 2), radius, angle_s, angle_e, None, color, value)
def add_to_ada(ada, pos_x, pos_y, radius, angle_s, angle_e, ring_width, color_code, alpha_value):
ada.drawing_area.add_artist(
Wedge((pos_x, pos_y), radius, angle_s, angle_e, width=ring_width, fc=color_code # '#DAF7A6'
,ec = 'none', alpha=alpha_value, antialiased=True))
bin_ends = 180 - np.array(bin_ends)
bin_ends = bin_ends[::-1]
values = np.array(values)
values = np.squeeze(values)
values = values[::-1]
# add a black background
add_ada_custom(0, 180, 0.8, "#000000")
color_shading = "#00FF00"
for i in range(len(values)):
#print(bin_ends.shape, '____all____bin_____')
#print(values.shape, '___all_____values____')
if i < 5:
print(bin_ends[i], bin_ends[i + 1], values[i], '________________________')
add_ada_custom(bin_ends[i], bin_ends[i + 1], values[i], color_shading)
white_border = 1
border_color = '#FFFFFF'
add_to_ada(ada, radius, -(image_height / 2 - radius / 2), radius + white_border, 0, 180, white_border,
border_color, 1)
tick_len = 20
tick_color = '#FFFFFF'
tick_width = 1.0 / 2
for i in range(len(bin_ends)):
add_to_ada(ada, radius, -(image_height / 2 - radius / 2), radius + white_border,
bin_ends[i] - tick_width / 2, bin_ends[i] + tick_width / 2, tick_len, tick_color, 10)
driver_action = driver_action / 180.0 * math.pi
start = np.array([radius, -(image_height / 2 - radius / 2) - 2])
delta = np.array([radius * math.cos(driver_action), radius * math.sin(driver_action)]) * 0.8
color_driver = "#0000FF"
ada.drawing_area.add_artist(FancyArrowPatch(start, start + delta, linewidth=2, color=color_driver))
return ada
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
for i in range(images.shape[0]):
# the ground truth course and speed
locs[i, 1] = 10.0
# get the MAP prediction
fig = plt.figure(figsize=(16, 12))
course_bin, speed_bin = car_stop_model.get_bins()
course_bin = [-math.pi/2] + course_bin + [math.pi/2]
course_bin = np.array(course_bin)*180/math.pi + 90
ax_original = plt.gca()
ax_original.set_axis_off()
ax_original.get_xaxis().set_visible(False)
ax_original.get_yaxis().set_visible(False)
plt.imshow(images[i, :, :, :])
plt.axis('off')
course = softmax(predict[i:(i + 1), 0:FLAGS.discretize_n_bins])
course = course/np.max(course)
print(course_bin, course, '!'*10)
ada2 = plot_greens(course_bin, course, 1280, 501, 200, -locs[i, 0]*180/math.pi+90)
ax_original.add_artist(ada2)
plt.show()
if not os.path.exists(os.path.join(dir_name,'viz')):
os.mkdir(os.path.join(dir_name,'viz'))
if not os.path.exists(os.path.join(dir_name,'viz', short_name)):
os.mkdir(os.path.join(dir_name, 'viz', short_name))
fig.savefig(os.path.join(dir_name, 'viz', short_name, '{0:04}.png'.format(i)),
bbox_inches='tight', pad_inches=-0.04, Transparent=True, dpi=100)
plt.close()
print(short_name)
print("showing visualization for video %s" % name[j])
def vis_continuous_interpolated(tout, predict, frame_rate, car_stop_model,
j=0, save_visualize=False, dir_name="temp", vis_radius=10, need_softmax=True, return_first=False):
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
_, hi, wi, _ = images.shape
locs = locs[j, :, :]
def gen_mask(bin_ends, values, radius, height, width):
# convert bin_ends to bin centers
new_ends = []
for i in range(len(bin_ends) - 1):
new_ends.append((bin_ends[i] + bin_ends[i + 1]) / 2)
# RGBA
out = np.zeros((height, width, 4), dtype=np.uint8)
xy = np.dstack(np.meshgrid(np.arange(width / 2 - radius, width / 2 + radius),
np.arange(height - radius, height),
indexing='ij')).reshape((-1, 2))
# filter out invalid speed
v = np.stack((xy[:, 0] - width / 2, height - xy[:, 1]), axis=1)
speed_norm = np.sqrt(v[:, 0] ** 2 + v[:, 1] ** 2)
valid_speed = np.less(speed_norm, radius)
xy = xy[valid_speed, :]
speed_norm = speed_norm[valid_speed]
v = v[valid_speed]
course_norm = np.arccos(1.0 * v[:, 0] / speed_norm)
course_norm = | np.degrees(course_norm) | numpy.degrees |
import torch
import numpy as np
from torch.utils.data import Dataset
import os, glob
import re
import cv2
import math
from random import shuffle
import torch.nn.functional as F
from torchvision import transforms
from tqdm import tqdm
from PIL import Image
import scipy.io as io
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
from mpl_toolkits.mplot3d import Axes3D
import time
import open3d as o3d
from queue import Queue
class Standardize(object):
""" Standardizes a 'PIL Image' such that each channel
gets zero mean and unit variance. """
def __call__(self, img):
return (img - img.mean(dim=(1,2), keepdim=True)) \
/ torch.clamp(img.std(dim=(1,2), keepdim=True), min=1e-8)
def __repr__(self):
return self.__class__.__name__ + '()'
def rotate(xyz):
def dotproduct(v1, v2):
return sum((a * b) for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
def angle(v1, v2):
num = dotproduct(v1, v2)
den = (length(v1) * length(v2))
if den == 0:
print('den = 0')
print(length(v1))
print(length(v2))
print(num)
ratio = num/den
ratio = np.minimum(1, ratio)
ratio = np.maximum(-1, ratio)
return math.acos(ratio)
p1 = np.float32(xyz[1, :])
p2 = np.float32(xyz[6, :])
v1 = np.subtract(p2, p1)
mod_v1 = np.sqrt(np.sum(v1 ** 2))
x = np.float32([1., 0., 0.])
y = np.float32([0., 1., 0.])
z = np.float32([0., 0., 1.])
theta = math.acos(np.sum(v1 * z) / (mod_v1 * 1)) * 360 / (2 * math.pi)
# M = cv2.getAffineTransform()
p = np.cross(v1, z)
# if sum(p)==0:
# p = np.cross(v1,y)
p[2] = 0.
# ang = -np.minimum(np.abs(angle(p, x)), 2 * math.pi - np.abs(angle(p, x)))
ang = angle(x, p)
if p[1] < 0:
ang = -ang
M = [[np.cos(ang), np.sin(ang), 0.],
[-np.sin(ang), np.cos(ang), 0.], [0., 0., 1.]]
M = np.reshape(M, [3, 3])
xyz = np.transpose(xyz)
xyz_ = | np.matmul(M, xyz) | numpy.matmul |
import numpy as np
import trimesh
import pyrender
from pyrender.constants import RenderFlags
from pyrender.light import DirectionalLight
from pyrender.node import Node
import cv2
from copy import deepcopy
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
def get_mesh(verts, faces):
vert_colors = np.tile([128, 128, 128], (verts.shape[0], 1))
mesh = trimesh.Trimesh(
vertices=verts,
faces=faces,
vertex_colors=vert_colors,
process=False
)
return mesh
def get_cube(ps, s=0.15):
diffs = np.array([
[-1, -1, -1, -1, 1, 1, 1, 1],
[-1, -1, 1, 1, -1, -1, 1, 1],
[-1, 1, -1, 1, -1, 1, -1, 1]
], dtype=np.float32) * s
ps = ps.reshape(-1, 3)
result = []
for p in ps:
result.append((diffs + p.reshape(3, -1)).T)
result = np.concatenate(result)
return result
def get_bbox(points):
left = np.min(points[:, 0])
right = np.max(points[:, 0])
top = np.min(points[:, 1])
bottom = np.max(points[:, 1])
h = bottom - top
w = right - left
if h > w:
cx = (left + right) / 2
left = cx - h / 2
right = left + h
else:
cy = (bottom + top) / 2
top = cy - w / 2
bottom = top + w
return left, top, right, bottom
class Pyrenderer:
def __init__(self, is_shading=True, d_light=3., scale=None):
self.is_shading = is_shading
self.light = (.3, .3, .3) if self.is_shading else (1., 1., 1.)
self.scene = pyrender.Scene(bg_color=[255, 255, 255], ambient_light=self.light)
self.size = None
self.viewer = None
self.T = None
self.K_no_scale = None
self.K = None
self.camera = None
self.camera_node = None
self.d_light = d_light
self.light_nodes = None
self.scale = scale
def add_raymond_light(self, s=1, d=0.25, T= | np.eye(4) | numpy.eye |
import os
import numpy as np
import matplotlib.pyplot as plt
import pretty_midi as pm
import mir_eval
import peamt.features.utils as utils
from peamt.features.rhythm import rhythm_histogram, rhythm_dispersion
from peamt.features.utils import get_time, str_to_bar_beat
import warnings
warnings.filterwarnings("ignore")
result_folder = "validate_rhythm_feature_plots_update"
utils.create_folder(result_folder)
MIDI_path = "app/static/data/all_midi_cut"
cut_points_path = "app/static/data/cut_points"
all_midi_path = "app/static/data/A-MAPS_1.2_with_pedal"
systems = ["kelz", "lisu", "google", "cheng"]
fs = 100
N_features = 8
sfo, sfd, stdmean, stdmin, stdmax, drmean, drmin, drmax = range(N_features)
N_computes = 5 # calculate quantize_over_original and noisy_over_original
noise_level = [0.1, 0.2, 0.3]
strict_quantize, quantize, noisy1, noisy2, noisy3 = range(N_computes)
all_MIDI = [elt for elt in os.listdir(MIDI_path) if not elt.startswith('.')]
N_outputs = len(all_MIDI) * len(systems)
# get cut points
cut_points_dict = dict()
for filename in os.listdir(cut_points_path):
musicname = filename[:-4]
cut_points_dict[musicname] = np.genfromtxt(os.path.join(cut_points_path, filename), dtype='str')
def plot_hist(x1, x2, x3, x4, x5, title, limits, filename, n_bins=50):
plt.figure(figsize=(6.4, 8.2))
plt.subplot(511)
plt.hist(x1, bins=n_bins, range=limits)
plt.ylabel("strict quantize/original")
plt.title(title)
plt.subplot(512)
plt.hist(x2, bins=n_bins, range=limits)
plt.ylabel("quantize/original")
plt.subplot(513)
plt.hist(x3, bins=n_bins, range=limits)
plt.ylabel("noisy({:.1f})/original".format(noise_level[0]))
plt.subplot(514)
plt.hist(x4, bins=n_bins, range=limits)
plt.ylabel("noisy({:.1f})/original".format(noise_level[1]))
plt.subplot(515)
plt.hist(x5, bins=n_bins, range=limits)
plt.ylabel("noisy({:.1f})/original".format(noise_level[2]))
plt.savefig(filename)
# plt.show()
def add_noise(intervals,noise_level):
return intervals + np.random.uniform(-noise_level,noise_level,size = [intervals.shape[0],1])
def print_line(values, feature_name, feature_index):
print(feature_name+"\t| {:.3f} \t {:.3f} \t| {:.3f} \t {:.3f} \t| {:.3f} \t {:.3f} \t| {:.3f} \t {:.3f} \t| {:.3f} \t {:.3f}".format(np.mean(values[feature_index, strict_quantize]), np.std(values[feature_index, strict_quantize]), np.mean(values[feature_index, quantize]), np.std(values[feature_index, quantize]), np.mean(values[feature_index, noisy1]), np.std(values[feature_index, noisy1]), | np.mean(values[feature_index, noisy2]) | numpy.mean |
"""
Deproject 2-d circular annular spectra to 3-d object properties.
This module implements the "onion-skin" approach popular in X-ray
analysis of galaxy clusters and groups to estimate the three-dimensional
temperature, metallicity, and density distributions of an optically-thin
plasma from the observed (projected) two-dimensional data, arranged in
concentric circular annuli.
:Copyright: Smithsonian Astrophysical Observatory (2009, 2019)
:Author: <NAME> (<EMAIL>), <NAME> (<EMAIL>)
"""
from collections import defaultdict, OrderedDict
import copy
import logging
from math import pi
import re
import numpy
from astropy.table import Table
from astropy import units as u
from astropy.cosmology import Planck15
from sherpa.plot import plotter
from sherpa.astro import ui
from sherpa.astro.io import read_pha
from sherpa.models.parameter import CompositeParameter
from . import specstack
from . import simplegraph
from . import fieldstore
__all__ = ("Deproject", "deproject_from_xflt")
_sherpa_logger = logging.getLogger('sherpa')
arcsec_per_rad = (u.radian / u.arcsec).to(1)
class Deproject(specstack.SpecStack):
"""Support deprojecting a set of spectra (2-d concentric circular annuli).
Parameters
----------
radii : AstroPy Quantity representing an angle on the sky
The edges of each annulus, which must be circular, concentric,
in ascending order, and >= 0. If there are n annuli then there are n+1
radii, since the start and end of the sequence must be given.
The units are expected to be arcsec, arcminute, or degree.
theta : AstroPy Quantity (scalar or array) representing an angle
The "fill factor" of each annulus, given by the azimuthal coverage
of the shell in degrees. The value can be a scalar, so the same
value is used for all annuli, or a sequence with a length
matching the number of annuli. Since the annulus assumes circular
symmetry there is no need to define the starting point of the
measurement, for cases when the value is less than 360 degrees.
angdist : None or AstroPy.Quantity, optional
The angular-diameter distance to the source. If not given then
it is calculated using the source redshift along with the
`cosmology` attribute.
cosmology : None or astropy.cosmology object, optional
The cosmology used to convert redshift to an angular-diameter
distance. This is used when `angdist` is None. If `cosmology`
is None then the `astropy.cosmology.Planck15` Cosmology
object is used.
Examples
--------
The following highly-simplified example fits a deprojected model
to data from three annuli - ann1.pi, ann2.pi, and ann3.pi - and
also calculates errors on the parameters using the confidence
method::
>>> dep = Deproject([0, 10, 40, 100] * u.arcsec)
>>> dep.load_pha('ann1.pi', 0)
>>> dep.load_pha('ann2.pi', 1)
>>> dep.load_pha('ann3.pi', 2)
>>> dep.subtract()
>>> dep.notice(0.5, 7.0)
>>> dep.set_source('xsphabs * xsapec')
>>> dep.set_par('xsapec.redshift', 0.23)
>>> dep.thaw('xsapec.abundanc')
>>> dep.set_par('xsphabs.nh', 0.087)
>>> dep.freeze('xsphabs.nh')
>>> dep.fit()
>>> dep.fit_plot('rstat')
>>> errs = dep.conf()
>>> dep.conf_plot('density')
"""
@u.quantity_input(radii='angle', theta='angle', angdist='length')
def __init__(self, radii,
theta=360 * u.deg,
angdist=None,
cosmology=None):
nshell = numpy.size(radii) - 1
if nshell < 1:
raise ValueError('radii parameter must be a sequence ' +
'with at least two values')
dr = radii[1:] - radii[:-1]
if numpy.any(dr <= 0):
raise ValueError('radii parameter must be in increasing order')
# All values must be >= 0
#
if radii[0] < 0:
raise ValueError('radii must be >= 0')
self.radii = radii
self.nshell = nshell
ntheta = numpy.size(theta)
if ntheta == 1:
thetas = numpy.repeat(theta, nshell)
elif ntheta == nshell:
thetas = theta
else:
raise ValueError('theta must be a scalar or ' +
'match the number of annuli')
theta_min = thetas.min()
if theta_min <= (0.0 * u.deg):
raise ValueError('theta must be > 0 degrees')
theta_max = thetas.max()
if theta_max > (360.0 * u.deg):
raise ValueError('theta must be <= 360 degrees')
self._theta = thetas
if angdist is not None:
self._set_angdist(angdist)
else:
self._angdist = None
self._redshift = None
self._fit_results = None
self._covar_results = None
self._conf_results = None
self._cosmology = Planck15 if cosmology is None else cosmology
super().__init__()
def load_pha(self, specfile, annulus):
if annulus < 0 or annulus >= self.nshell:
raise ValueError("Expected 0 <= annulus < " +
"{} but sent {}".format(self.nshell, annulus))
super().load_pha(specfile, annulus)
def _get_redshift(self):
if self._redshift is None:
self._redshift = self.find_parval('redshift')
return self._redshift
def _set_redshift(self, redshift):
self._redshift = redshift
# Perhaps the angular-diameter distance shouldn't be cached if
# not explicitly set. This lets the value be updated if the
# redshift or cosmology object is updated. Alternatively, we
# tell users they have to manually set da if these things
# change.
#
def _get_angdist(self):
if self._angdist is None:
da = self.cosmology.angular_diameter_distance(self.redshift)
self._angdist = da
return self._angdist
@u.quantity_input(angdist='length')
def _set_angdist(self, angdist):
if angdist <= 0:
raise ValueError("angdist must be > 0")
self._angdist = angdist
redshift = property(_get_redshift, _set_redshift, None,
"Source redshift")
angdist = property(_get_angdist, _set_angdist, None,
"Angular size distance (an AstroPy quantity)")
@property
def cosmology(self):
"""Return the cosmology object (only used if angdist not set)"""
return self._cosmology
def _calc_vol_norm(self):
r"""Calculate the normalized volumes of the deprojected views.
Sets the `vol_norm` field to a matrix of the normalized volumes
of the cylindrical annuli intersecting with the spherical shell.
The matrix is defined as $volume[i, j] / V_sphere$, where
$i$ represents the shell and $j$ the annulus (with indexes
starting at 0), $V_sphere = 4/3 \pi r_o^3$, and $r_o$ is the
outermost radius of the shells.
"""
# The units for the radii are not important here
r = self.radii.value
theta_rad = self._theta.to_value(u.rad)
cv = numpy.zeros([self.nshell, self.nshell])
v = numpy.zeros([self.nshell, self.nshell])
for a, ra0 in enumerate(r[:-1]):
# Annulus
ra1 = r[a + 1]
ra0sq = ra0**2
ra1sq = ra1**2
for s, rs0 in enumerate(r[:-1]):
# Spherical shell
rs1 = r[s + 1]
if s >= a:
# Volume of cylindrical annulus (ra0,ra1) intersecting
# the sphere (rs1)
#
rs1sq = rs1**2
rterm = (rs1sq - ra0sq)**1.5 - (rs1sq - ra1sq)**1.5
cv[s, a] = 2.0 * theta_rad[a] / 3 * rterm
# Volume of annulus (ra0,ra1) intersecting the spherical
# shell (rs0,rs1)
v[s, a] = cv[s, a]
if s - a > 0:
v[s, a] -= cv[s - 1, a]
self.vol_norm = v / (4. * pi / 3. * r[-1]**3)
def _create_name(self, model_name, annulus):
"""Create the name used for a model component for the given annulus.
Parameters
----------
model_name : str
The Sherpa model name (e.g. 'xsphabs').
annulus : int
The annulus number.
Returns
-------
name : str
The name of the model component.
Notes
-----
At present there is no real need to allow the naming scheme to
be changed (e.g. in a sub-class), but it is useful to help
record where names are created.
"""
# This is perhaps a bit "over engineered"
#
name = '{}_{}'.format(model_name, annulus)
return name
def _create_src_model_components(self):
"""Create the model components for each shell."""
self._reset_model_comps()
# Find the generic components in source model expression
# and set up their names.
#
RE_model = re.compile(r'\b \w+ \b', re.VERBOSE)
for match in RE_model.finditer(self.srcmodel):
model_type = match.group()
store = dict(type=model_type,
start=match.start(),
end=match.end())
self.srcmodel_comps.append(store)
# For each shell create the corresponding model components so they can
# be used later to create composite source models for each dataset
for shell in range(self.nshell):
for srcmodel_comp in self.srcmodel_comps:
model_comp = {}
model_comp['type'] = srcmodel_comp['type']
name = self._create_name(model_comp['type'], shell)
model_comp['name'] = name
model_comp['shell'] = shell
comp = ui.create_model_component(model_comp['type'], name)
model_comp['object'] = comp
self.model_comps.append(model_comp)
def set_source(self, srcmodel='xsphabs*xsapec'):
"""Create a source model for each annulus.
Unlike the standard `set_source` command, this version just
uses the <model name>, not <model name>.<username>, since
the <username> is automatically created for users by appending
the annulus number to <model name>.
Parameters
----------
srcmodel : str, optional
The source model expression applied to each annulus.
See Also
--------
set_bkg_model, set_par
Notes
-----
The data must have been read in for all the data before calling
this method (this matches Sherpa, where you can not call set_source
unless you have already loaded the data to fit).
Examples
--------
The following two calls have the same result: model instances
called 'xsphabs<annulus>' and 'xsapec<annulus>' are created
for each annulus, and the source expression for the annulus
set to their multiplication:
>>> dep.set_source()
>>> dep.set_source('xsphabs * xsapec')
Use the XSPEC vapec model rather than the apec model to
represent the plasma emission:
>>> dep.set_source('xsphabs * xsvapec')
"""
# We can not check that all data has been loaded in (that is,
# if there are multiple data sets per annulis), but we can at
# least ensure that there is a dataset loaded
# for each annulus.
#
seen = set([])
for dataset in self.datasets:
seen.add(dataset['annulus'])
expected = set(range(self.nshell))
diff = sorted(list(expected.difference(seen)))
if len(diff) == 1:
raise ValueError("missing data for annulus {}".format(diff[0]))
elif len(diff) > 0:
raise ValueError("missing data for annuli {}".format(diff))
self.srcmodel = srcmodel
self._calc_vol_norm()
self._create_src_model_components()
# TODO: isn't it better to loop over annuli, out to in, to
# avoid some repeated work?
#
for dataset in self.datasets:
dataid = dataset['id']
annulus = dataset['annulus']
modelexprs = []
for shell in range(annulus, self.nshell):
srcmodel = self.srcmodel
for model_comp in reversed(self.srcmodel_comps):
i0 = model_comp['start']
i1 = model_comp['end']
model_comp_name = self._create_name(model_comp['type'],
shell)
srcmodel = srcmodel[:i0] + model_comp_name + srcmodel[i1:]
f = self.vol_norm[shell, annulus]
modelexprs.append('{} * {}'.format(f, srcmodel))
modelexpr = " + ".join(modelexprs)
print('Setting source model for dataset %d = %s' % (dataid, modelexpr))
ui.set_source(dataid, modelexpr)
def set_bkg_model(self, bkgmodel):
"""Create a background model for each annulus.
The background model is the same between the annuli, except that
a scaling factor is added for each annulus (to allow for
normalization uncertainities). The scaling factors are labelled
'bkg_norm_<obsid>', and at least one of these must be frozen
(otherwise it is likely to be degenerate with the background
normalization, causing difficulties for the optimiser).
Parameters
----------
bkgmodel : model instance
The background model expression applied to each annulus.
Unlike set_source this should be the actual model instance,
and not a string.
See Also
--------
set_source, set_par
Examples
--------
Model the background with a single power-law component:
>>> dep.set_bkg_model(xspowerlaw.bpl)
"""
self.bkgmodel = bkgmodel
# TODO:
# - record the background components in the same way the source
# is done
# - should the background be allowed to have different components
# per annulus?
#
bkg_norm = {}
for obsid in self.obsids:
bkg_norm_name = 'bkg_norm_%d' % obsid
print('Creating model component xsconstant.%s' % bkg_norm_name)
bcomp = ui.create_model_component('xsconstant', bkg_norm_name)
bkg_norm[obsid] = bcomp
for dataset in self.datasets:
print('Setting bkg model for dataset %d to bkg_norm_%d' % (dataset['id'], dataset['obsid']))
ui.set_bkg_model(dataset['id'],
bkg_norm[dataset['obsid']] * bkgmodel)
def get_shells(self):
"""How are the annuli grouped?
An annulus may have multiple data sets associated with it, but
it may also be linked to other annuli due to tied parameters.
The return value is per group, in the ordering needed for
the outside-to-inside onion skin fit, where the keys for
the dictionary are 'annuli' and 'dataids'.
Returns
-------
groups : list of dicts
Each dictionary has the keys 'annuli' and 'dataids', and
lists the annuli and data identifiers that are fit together.
The ordering matches that of the onion-skin approach, so
the outermost group first.
See Also
--------
get_radii, tie_par
Examples
--------
For a 3-annulus deprojection where there are no parameter ties
to combine annului:
>>> dep.get_shells()
[{'annuli': [2], 'dataids': [2]},
{'annuli': [1], 'dataids': [1]},
{'annuli': [0], 'dataids': [0]}]
After tie-ing the abundance parameter for the outer two shells,
there are now two groups of annuli:
>>> dep.tie_par('xsapec.abundanc', 1, 2)
Tying xsapec_2.Abundanc to xsapec_1.Abundanc
>>> dep.get_shells()
[{'annuli': [1, 2], 'dataids': [1, 2]},
{'annuli': [0], 'dataids': [0]}]
"""
# Map from model component name (e.g. 'xsapec_2') to shell
# number.
#
cpt_map = {}
for model_comp in self.model_comps:
mname = model_comp['name']
assert mname not in cpt_map
cpt_map[mname] = model_comp['shell']
# Find the connected shells/annuli.
#
graph = simplegraph.SimpleGraph()
for shell in range(self.nshell):
graph.add_link(shell, shell)
for model_comp in self.model_comps:
shell = model_comp['shell']
for par in model_comp['object'].pars:
if par.link is None:
continue
# For the moment only support tied parameters (i.e.
# they are set equal). It should be possible to just
# iterate through the parts of the composite parameter
# and extract the links (since there could be more than
# one), but do not try this yet.
#
if isinstance(par.link, CompositeParameter):
raise ValueError("Parameter link for " +
"{} is not simple".format(par.fullname))
# If the link is to a "unknown" component then we could
# iterate through all the source expressions to find
# the relevant data sets, and hence annuli, but leave
# that for a later revision since the current assumption
# is that all source components are handled by deproject.
#
linkname = par.link.modelname
try:
lshell = cpt_map[linkname]
except KeyError:
raise RuntimeError("Model component " +
"{} is not ".format(linkname) +
"managed by deproject")
graph.add_link(shell, lshell)
# Rely on the shell numbering to be numeric and in ascending
# order to get the list of shells that must be fit together.
#
fit_groups = sorted([sorted(grp) for grp in graph.get_groups()])
# It is possible for the groups to be invalid here, in that
# the user could have tied together annuli 2 and 4, but not
# 3, which breaks the onion-skin approach.
#
for grp in fit_groups:
# ensure that the membership is n, n+1, ..., m with no
# gaps.
if len(grp) == 1:
continue
grp = numpy.asarray(grp)
dg = grp[1:] - grp[:-1]
if numpy.any(dg != 1):
raise ValueError("Non-consecutive annuli are " +
"tied together: {}".format(grp))
# What datasets are used for each group?
#
out = []
for anns in fit_groups:
# What dataset ids are associated with these annuli
dataids = [x['id'] for x in self.datasets
if x['annulus'] in anns]
out.append({'annuli': anns, 'dataids': dataids})
return list(reversed(out))
def get_radii(self, units='arcsec'):
"""What are the radii of the shells?
Return the inner and outer edge of each annulus, in the given
units. Physical units (e.g. 'kpc') can only be used if a redshift or
angular-diameter distance has been set. This does not apply the
grouping that `get_shells` does.
Parameters
----------
units : str or astropy.units.Unit, optional
The name of the units to use for the returned radii. They must
be an angle - such as 'arcsec' - or a length - such as 'kpc'
or 'Mpc' (case is important).
See Also
--------
get_shells
Returns
-------
rlo, rhi : astropy.units.Quantity, astropy.units.Quantity
The inner and outer radius for each annulus.
"""
# Do we know about this unit? Give a slightly-more helpful
# message than the default from the AstroPy parser.
#
try:
unit = u.Unit(units)
except ValueError:
raise ValueError("Invalid unit: expected a value like " +
"'arcsec' or 'kpc'.")
radii = self.radii.copy()
if unit.physical_type == 'angle':
radii = radii.to(unit)
elif unit.physical_type == 'length':
# Treat the angular distance value as having length / radian
rscale = self.angdist / (1 * u.radian)
# This would convert to m
# radii = (radii * rscale).decompose()
radii = (radii * rscale).to(unit)
else:
raise u.UnitConversionError("Must be given an angle or length")
return radii[:-1], radii[1:]
def guess(self):
"""Guess the starting point by fitting the projected data.
Use a fitting scheme - based on the suggestion in the XSPEC projct
documention - to estimate the starting position of the fit (the
initial fit parameters). This can be useful since it can reduce
the time taken to fit the deprojected data and help avoid
the deprojection from getting stuck in a local minimum.
See Also
--------
fit
Notes
-----
Each annulus, from outer to inner, is fit individually, ignoring
the contribution from any outer annulus. After the fit, the
model normalisation is corrected for the volume-filling factor of
the annulus. If there are any tied parameters between annuli then
these annuli are combined together (fit simultaneously).
Unlike the Sherpa guess function, this does *not* change the
limits of any parameter.
Possible improvements include:
- re-normalize each spectrum before fitting.
- transfer the model parameters of the inner-most shell in a
group to the next set of shells to fit.
"""
groups = self.get_shells()
ngroups = len(groups)
assert (ngroups > 0) & (ngroups <= self.nshell)
if ngroups != self.nshell:
print("Note: annuli have been tied together")
for group in groups:
annuli = group['annuli']
nannuli = len(annuli)
assert nannuli > 0
dataids = group['dataids']
msg = 'Projected fit to '
if len(annuli) == 1:
msg += 'annulus {} '.format(annuli[0])
else:
msg += 'annuli {} '.format(annuli)
if len(dataids) == 1:
msg += 'dataset: {}'.format(dataids[0])
else:
msg += ' datasets: {}'.format(dataids)
print(msg)
orig_models = [(did, ui.get_source(did)) for did in dataids]
# perhaps this logic should be packaged up
shells = dict([(x['id'], x['annulus']) for x in self.datasets])
try:
# Is there a better way to re-create the "base" model?
#
for did in dataids:
srcmodel = self.srcmodel
shell = shells[did]
for model_comp in reversed(self.srcmodel_comps):
i0 = model_comp['start']
i1 = model_comp['end']
model_comp_name = self._create_name(model_comp['type'],
shell)
srcmodel = srcmodel[:i0] + model_comp_name + srcmodel[i1:]
ui.set_source(did, srcmodel)
# TODO: run renormalize on each dataset before the fit
ui.fit(*dataids)
finally:
for did, smdl in orig_models:
ui.set_source(did, smdl)
# Correct the normalization
#
fs = self.vol_norm.diagonal()
for did in dataids:
shell = shells[did]
f = fs[shell]
for mdl in self.model_comps:
if mdl['shell'] != shell:
continue
for par in mdl['object'].pars:
if par.name != 'norm':
continue
par.val /= f
def _freeze_model_pars(self):
"""Freeze, and return, all thawed parameters in the fit
Returns
-------
pars : list of sherpa.parameter.Parameter instances
The parameters that have been frozen.
See Also
--------
_thaw_model_pars
"""
out = []
for model_comp in self.model_comps:
out.extend([p for p in model_comp['object'].pars
if not p.frozen])
return out
def _thaw_model_pars(self, pars, message=False):
"""Thaw the parameters, with optional screen message.
Parameters
----------
pars : list of sherpa.parameter.Parameter instances
The parameters to thaw. Note that thaw is called whatever the
state of a parameter.
message : bool, optional
If True then a screen message is displayed for each parameter.
See Also
--------
_freeze_model_pars
"""
for par in pars:
if message:
print('Thawing {}'.format(par.fullname))
par.thaw()
def _apply_per_group(self, verb, store):
"""Apply a procedure per onion-skin group (from outer to inner).
For each shell - run from outer to inner - apply the onion-skin
approach (so free up the shell but freeze the contribution from
outer shells) to runfunc - which is given the data ids to use,
and then store the results of getfunc. Once all the shells have
been processed return a structure containing the results.
Parameters
----------
verb : str
This is the first part of the message displayed to users,
per annulus.
store : fieldstore.FieldStore instance
This object will run the function and then parse the output
into the necessary form.
Returns
-------
rvals : astropy.table.Table instance
The data, as a set of columns. The choice of columns is
controlled by the `store` object. Additional columns include
'annulus', 'rlo_ang', 'rhi_ang', 'rlo_phys', 'rhi_phys',
'density', and optionally 'density_lo' and 'density_hi'.
Notes
-----
Any parameter links result in annuli being grouped together
for a fit: that is, the fit will be a simultaneous fit to all
the datasets associated with the set of annuli.
It would be useful to add in extra metadata to the output, and
take advantage of the units support in AstroPy where possible.
"""
groups = self.get_shells()
ngroups = len(groups)
assert (ngroups > 0) & (ngroups <= self.nshell)
if ngroups != self.nshell:
print("Note: annuli have been tied together")
# Find all the thawed parameters so that they can be
# restored at the end of the fit, or in case of an error.
#
thawed = self._freeze_model_pars()
# We need to be able to map from dataset id to annulus, and
# from annulus to model components.
#
annulusmap = {x['id']: x['annulus'] for x in self.datasets}
# Get a list of all the annuli, in ascending order.
all_annuli = sorted(list({x['annulus'] for x in self.datasets}))
componentmap = defaultdict(list)
for mcomp in self.model_comps:
val = (mcomp['object'], mcomp['type'])
componentmap[mcomp['shell']].append(val)
# Store the data as a dictionary of arrays. It would be useful
# if the FieldStore instance could handle this - since it
# "knows" what the columns are going to be - but the current
# implementation calculates these columns when the run method
# is called (i.e. at run time), rather than before the
# onion-peel approach is called. It is possible that a
# re-design would be helpful (such as calculate the parameter
# names at the start, which would have other useful consequences
# once more-complicated model expressions are sorted), and then
# pass that information along, but for now let's see how this
# works.
#
out = OrderedDict()
try:
for group in groups:
annuli = group['annuli']
nannuli = len(annuli)
assert nannuli > 0
dataids = group['dataids']
msg = '{} '.format(verb)
if nannuli == 1:
msg += 'annulus {} '.format(annuli[0])
else:
msg += 'annuli {} '.format(annuli)
if len(dataids) == 1:
msg += ' dataset: {}'.format(dataids[0])
else:
msg += ' datasets: {}'.format(dataids)
print(msg)
res = store.run(annulusmap, componentmap, *dataids)
assert len(res) == nannuli
# Extract out the per-shell results and add to the
# output.
#
for shell in annuli:
if len(out) == 0:
# Note, add in extra fields to the stored fields
#
out['annulus'] = all_annuli
rlo_ang, rhi_ang = self.get_radii(units='arcsec')
out['rlo_ang'] = rlo_ang
out['rhi_ang'] = rhi_ang
rlo_phys, rhi_phys = self.get_radii(units='kpc')
out['rlo_phys'] = rlo_phys
out['rhi_phys'] = rhi_phys
for field in res[shell]:
out[field] = [None] * self.nshell
for field, value in res[shell].items():
assert out[field][shell] is None, shell
out[field][shell] = value
for model_comp in self.model_comps:
# Freeze the current annulus
if model_comp['shell'] in annuli:
print('Freezing {}'.format(model_comp['name']))
ui.freeze(model_comp['object'])
finally:
self._thaw_model_pars(thawed, message=True)
for k, vs in out.items():
assert vs is not None, k
# It's useful to have NumPy arrays for some of the following
# calculations, but do not convert those that already have
# units attached.
#
# Convert from None to numpy.NaN (which is assumed to
# only occur in an error colum (k ends in _lo or _hi)
# but this restriction is not checked
#
if not isinstance(vs, numpy.ndarray):
out[k] = numpy.asarray([numpy.nan if v is None else v
for v in vs])
# Add in the density calculation.
#
# The norm field should be identified at 'set_source' time
# so that it doesn't have to be re-discovered each time.
#
# For now look for .norm values in the returned structure,
# but could do this a number of ways.
#
normpars = [n for n in out.keys() if n.endswith('.norm')]
if len(normpars) == 0:
raise RuntimeError("Unable to find norm parameter!")
elif len(normpars) > 1:
raise RuntimeError("Multiple norm parameters found!")
normpar = normpars[0]
norms = out[normpar]
out['density'] = self._calc_density(norms)
normparlo = '{}_lo'.format(normpar)
normparhi = '{}_hi'.format(normpar)
try:
normlos = out[normparlo]
normhis = out[normparhi]
out['density_lo'] = self._calc_density(norms + normlos) - \
out['density']
out['density_hi'] = self._calc_density(norms + normhis) - \
out['density']
except KeyError:
pass
return Table(out)
def fit(self):
"""Fit the data using the "onion-peeling" method.
Unlike the normal Sherpa fit, this does not fit all the data
simultaneously, but instead fits the outermost annulus first,
then freezes its parameters and fits the annulus inside it,
repeating this until all annuli have been fit. At the end of
the fit all the parameters that were frozen are freed. The
results can also be retrieved with ``get_fit_results``.
Returns
-------
fits : astropy.table.Table instance
This records per-annulus data, such as the inner and outer
radius (`rlo_ang`, `rhi_ang`, `rlo_phys`, `rhi_phys`), the
final fit statistic and change in fit statistic (`statval` and
`dstatval`), the reduced statistic and q value (as `rstat`
and `qval`) if appropriate, and the thawed parameter values
(accessed using <model name>.<par name> syntax, where the
match is case sensitive).
See Also
--------
conf, covar, get_fit_results, guess, fit_plot
Notes
-----
If there are any tied parameters between annuli then these annuli
are combined together (fit simultaneously). The results from the
fits to each annulus can be retrieved after ``fit`` has been called
with the ``get_fit_results`` method.
The results have been separated out per annulus, even if several
annuli were combined in a fit due to tied parameters, and there is
no information in the returned structure to note this.
Examples
--------
Fit the annuli using the onion-peeling approach, and then plot
up the reduced statistic for each dataset:
>>> res = dep.fit()
>>> plt.clf()
>>> rmid = 0.5 * (res['rlo_phys'] + res['rhi_phys'])
>>> plt.plot(rmid, res['rstat'])
Plot the temperature-abundance values per shell, color-coded
by annulus:
>>> plt.clf()
>>> plt.plot(res['xsapec.kT'], res['xsapec.Abundanc'],
... c=res['annulus'])
>>> plt.colorbar()
>>> plt.xlabel('kT')
>>> plt.ylabel('Abundance')
Plot up the temperature distibution as a function of radius
from the fit::
>>> dep.fit()
>>> dep.fit_plot('xsmekal.kt')
"""
# For now return nothing
self._fit_results = None
self._fit_results = self._apply_per_group('Fitting',
fieldstore.FitStore())
return self._fit_results
def covar(self):
"""Estimate errors using covariance, using the "onion-peeling" method.
It is assumed that ``fit`` has been called. The results can also be
retrieved with ``get_covar_results``.
Returns
-------
errors : astropy.table.Table instance
This records per-annulus data, such as the inner and outer
radius (`rlo_ang`, `rhi_ang`, `rlo_phys`, `rhi_phys`), the
sigma and percent values, and parameter results (accessed
using <model name>.<par name>, <model name>.<par name>_lo,
and <model name>.<par name>_hi syntax, where the match is
case sensitive). The _lo and _hi values are symmetric for
covar, that is the _lo value will be the negative of the
_hi value.
See Also
--------
conf, fit, get_covar_results, covar_plot
Examples
--------
Run a fit and then error analysis, then plot up the abundance
against temperature values including the error bars. Since
the covariance routine returns symmetric error bars, the
<param>_hi values are used in the plot::
>>> dep.fit()
>>> errs = dep.covar()
>>> kt, abund = errs['xsapec.kT'], errs['xsapec.Abundanc']
>>> dkt = errs['xsapec.kT_hi']
>>> dabund = errs['xsapec.Abundanc_hi']
>>> plt.clf()
>>> plt.errorbar(kt, abund, xerr=dkt, yerr=dabund, fmt='.')
Plot up the temperature distibution as a function of radius,
including the error bars calculated by the covar routine::
>>> dep.fit()
>>> dep.covar()
>>> dep.covar_plot('xsmekal.kt')
"""
self._covar_results = None
self._covar_results = self._apply_per_group('Covariance for',
fieldstore.CovarStore())
return self._covar_results
def conf(self):
"""Estimate errors using confidence, using the "onion-peeling" method.
It is assumed that ``fit`` has been called. The results can also be
retrieved with ``get_conf_results``.
Returns
-------
errors : astropy.table.Table instance
This records per-annulus data, such as the inner and outer
radius (`rlo_ang`, `rhi_ang`, `rlo_phys`, `rhi_phys`), the
sigma and percent values, and parameter results (accessed
using <model name>.<par name>, <model name>.<par name>_lo,
and <model name>.<par name>_hi syntax, where the match is
case sensitive).
See Also
--------
covar, fit, get_conf_results, conf_plot
Examples
--------
Run a fit and then error analysis, then plot up the abundance
against temperature values including the error bars. Note that
the Matplotlib `errorbar` routine requires "positive" error values
whereas the <param>_lo values are negative, hence they are
negated in the creation of ``dkt`` and ``dabund``::
>>> dep.fit()
>>> errs = dep.conf()
>>> kt, abund = errs['xsapec.kT'], errs['xsapec.Abundanc']
>>> ktlo, kthi = errs['xsapec.kT_lo'], errs['xsapec.kT_hi']
>>> ablo, abhi = errs['xsapec.Abundanc_lo'], errs['xsapec.Abundanc_hi']
>>> dkt = np.vstack((-ktlo, kthi))
>>> dabund = np.vstack((-ablo, abhi))
>>> plt.clf()
>>> plt.errorbar(kt, abund, xerr=dkt, yerr=dabund, fmt='.')
Plot up the temperature distibution as a function of radius,
including the error bars calculated by the conf routine::
>>> dep.fit()
>>> dep.conf()
>>> dep.conf_plot('xsmekal.kt')
"""
self._conf_results = None
self._conf_results = self._apply_per_group('Confidence for',
fieldstore.ConfStore())
return self._conf_results
def get_fit_results(self):
"""What are the fit results, per annulus?
This returns the fit result for each annulus from the last time
that the ``fit`` method was called. It *does not* check to see
if anything has changed since the last ``fit`` call (e.g.
parameters being tied together or untied, or a manual fit
to a shell). Note that ``get_shells`` should be used to find out
if the shells were grouped together.
Returns
-------
fits : astropy.table.Table instance
This records per-annulus data, such as the inner and outer
radius (`rlo_ang`, `rhi_ang`, `rlo_phys`, `rhi_phys`), the
final fit statistic and change in fit statistic (`statval` and
`dstatval`), the reduced statistic and q value (as `rstat`
and `qval`) if appropriate, and the thawed parameter values
(accessed using <model name>.<par name> syntax, where the
match is case sensitive).
See Also
--------
fit, get_conf_results, get_covar_results, get_radii, get_shells,
fit_plot
"""
if self._fit_results is None:
raise ValueError("The fit method has not been called")
return copy.deepcopy(self._fit_results)
def get_covar_results(self):
"""What are the covar results, per annulus?
This returns the fit result for each annulus from the last time
that the ``covar`` method was called. It *does not* check to see
if anything has changed since the last ``covar`` call (e.g.
parameters being tied together or untied, or a manual fit
to a shell). Note that ``get_shells`` should be used to find out
if the shells were grouped together.
Returns
-------
errors : astropy.table.Table instance
This records per-annulus data, such as the inner and outer
radius (`rlo_ang`, `rhi_ang`, `rlo_phys`, `rhi_phys`), the
sigma and percent values, and parameter results (accessed
using <model name>.<par name>, <model name>.<par name>_lo,
and <model name>.<par name>_hi syntax, where the match is
case sensitive).
See Also
--------
fit, get_conf_results, get_fit_results, get_radii, get_shells,
covar_plot
"""
if self._covar_results is None:
raise ValueError("The covar method has not been called")
return copy.deepcopy(self._covar_results)
def get_conf_results(self):
"""What are the conf results, per annulus?
This returns the fit result for each annulus from the last time
that the ``conf`` method was called. It *does not* check to see
if anything has changed since the last ``conf`` call (e.g.
parameters being tied together or untied, or a manual fit
to a shell). Note that ``get_shells`` should be used to find out
if the shells were grouped together (although this can be
reconstructed from the `datasets` field of each `ErrorEstResults`
instance).
Returns
-------
errors : astropy.table.Table instance
This records per-annulus data, such as the inner and outer
radius (`rlo_ang`, `rhi_ang`, `rlo_phys`, `rhi_phys`), the
sigma and percent values, and parameter results (accessed
using <model name>.<par name>, <model name>.<par name>_lo,
and <model name>.<par name>_hi syntax, where the match is
case sensitive).
See Also
--------
fit, get_covar_results, get_fit_results, get_radii, get_shells,
conf_plot
"""
if self._conf_results is None:
raise ValueError("The conf method has not been called")
return copy.deepcopy(self._conf_results)
def _calc_density(self, norms, ne_nh_ratio=1.18):
"""Calculate the electron density for each shell.
This performs the calculation described in `get_density`.
Parameters
----------
norms : sequence of float
The normalization values, in annulus order.
ne_hh_ratio : float, optional
The n_e to n_h ratio (default 1.18).
Returns
-------
dens : astropy.units.quantity.Quantity instance
The densities calculated for each shell, in units of cm^-3.
"""
if len(norms) != self.nshell:
raise ValueError("norms has wrong length")
# Manual descontruction/reconstruction of units
#
DA_cm = self.angdist.to_value(u.cm)
rmax_rad = self.radii[-1].to_value(u.rad)
z = self.redshift
r_sphere = rmax_rad * DA_cm
# volume of sphere enclosing outer shell (cm^3)
#
# volume = 4 * pi / 3 * r_sphere**3
# factor = 4 * pi * DA_cm**2 * 1e14 * (1.0 + z)**2 / volume * ne_nh_ratio
#
# and after manual cancellation of the 4 pi terms
#
vterm = 1.0 / 3 * r_sphere**3
factor = DA_cm**2 * 1e14 * (1.0 + z)**2 / vterm * ne_nh_ratio
return numpy.sqrt(factor * numpy.asarray(norms)) * u.cm**(-3)
def get_density(self):
"""Calculate the electron density for each shell.
Convert the model normalzations (assumed to match the standard
definition for XSPEC thermal-plasma models) for each shell.
Returns
-------
dens : astropy.units.quantity.Quantity instance
The densities calculated for each shell, in units of cm^-3.
See Also
--------
find_norm
Notes
-----
The electron density is taken to be::
n_e^2 = norm * 4*pi * DA^2 * 1e14 * (1+z)^2 / volume * ne_nh_ratio
where::
norm = model normalization from sherpa fit
DA = angular size distance (cm)
volume = volume (cm^3)
ne_nh_ratio = 1.18
The model components for each volume element (the intersection of the
annular cylinder ``a`` with the spherical shell ``s``) are multiplied
by a volume normalization::
vol_norm[s,a] = volume[s,a] / v_sphere
v_sphere = volume of sphere enclosing outer annulus
With this convention the ``volume`` used in calculating the electron
density is simply ``v_sphere``.
"""
norms = [self.find_norm(s) for s in range(self.nshell)]
return self._calc_density(norms)
def _radial_plot(self, plottitle, xunits, ys, ylabel,
dys=None,
xlog=True, ylog=False,
overplot=False, clearwindow=True):
"""Create a plot of the data versus radius (of the annuli).
Parameters
----------
plottitle : str
The title for the plot
xunits : str or astropy.units.Unit
The X-axis units (a length or angle, such as 'Mpc' or
'arcsec', where the case is important).
ys : sequence of float
The Y values to plot (must be in annuli order).
ylabel : str
The label for the Y axis
dys : None or ndarray, optional
The error bars on the y axis. This can be None or a ndarray
of one or two dimensions (N points or N by 2).
xlog : bool, optional
Should the x axis be drawn with a log scale (default True)?
ylog : bool, optional
Should the y axis be drawn with a log scale (default False)?
overplot : bool, optional
Clear the plot or add to existing plot?
clearwindow : bool, optional
How does this interact with overplot?
"""
rlo, rhi = self.get_radii(units=xunits)
# drop units support immediately as ChIPS doesn't recognize
# this (can support them in matplotlib, but given the
# Sherpa plotting API it isn't clear how well supported it
# would be)
#
rmid = (rlo.value + rhi.value) / 2
dr = rhi.value - rlo.value
# Attempt to handle LaTeX differences between the backends,
# but the support is *very* limited so may not work here.
#
# The aim is to support the matplotlib backend, with minimal
# support for ChIPS.
#
xlabel = _add_unit('Radius', rlo)
if ylabel.find('_') > -1 or ylabel.find('^') > -1:
# Unfortunately the matplotlib version is a "global"
# check, so doesn't check if parts of the term are
# already enclosed in '$'. This is a problem for those
# labels that have AstroPy unit strings, since they
# have already been protected.
#
if not (plotter.name == 'pylab' and ylabel.find('$') > -1):
ylabel = plotter.get_latex_for_string(ylabel)
prefs = plotter.get_data_plot_defaults()
prefs['xerrorbars'] = True
# We handle error bars manually for ChIPS (it has to be done
# for asymmetric Y errors, but there also seems to be issues
# with the X axis errors not being drawn which I do not
# want to investigate too much just right now).
#
manual_errors = plotter.name == 'chips' and dys is not None
prefs['yerrorbars'] = dys is not None
if manual_errors:
prefs['yerrorbars'] = False
prefs['xlog'] = xlog
prefs['ylog'] = ylog
# Access the underlying plot machinery directly, rather than
# use the sherpa.plot.DataPlot object, since Sherpa does not
# support asymmetric errors but plotter.plot does, at least
# for the pylab backend.
#
try:
plotter.begin()
plotter.plot(rmid, ys, dys, dr, plottitle, xlabel, ylabel,
overplot, clearwindow, **prefs)
# For some reason the X error bar isn't being drawn with ChIPS
# so force it.
#
if plotter.name == 'chips':
import pychips
# Assume the current curve is the data we have just plotted
# and we do not want to replot the symbol.
#
crv = pychips.get_curve()
crv.symbol.style = 'none'
crv.err.up = True
crv.err.down = True
crv.err.left = True
crv.err.right = True
ndim = numpy.asarray(dys).ndim
if ndim == 2:
dylo = dys[0]
dyhi = dys[1]
elif ndim == 1:
dylo = dys
dyhi = dys
else:
dylo = None
dyhi = None
errs = [dylo, dyhi, dr / 2, dr / 2]
pychips.add_curve(rmid, ys, errs, crv)
except BaseException as exc:
plotter.exceptions()
raise exc
else:
plotter.end()
def par_plot(self, par, units='kpc',
xlog=True, ylog=False,
overplot=False, clearwindow=True):
"""Plot up the parameter as a function of radius.
This plots up the current parameter values. The ``fit_plot``,
``conf_plot``, and ``covar_plot`` routines display the fit
and error results for these parameters.
Parameters
----------
par : str
The parameter name, specified as <model_type>.<par_name>.
units : str or astropy.units.Unit, optional
The X-axis units (a length or angle, such as 'Mpc' or
'arcsec', where the case is important).
xlog : bool, optional
Should the x axis be drawn with a log scale (default True)?
ylog : bool, optional
Should the y axis be drawn with a log scale (default False)?
overplot : bool, optional
Clear the plot or add to existing plot?
clearwindow : bool, optional
How does this interact with overplot?
See Also
--------
conf_plot, covar_plot, density_plot, fit_plot
Examples
--------
Plot the temperature as a function of radius.
>>> dep.par_plot('xsapec.kt')
Label the radii with units of arcminutes for the abundanc
parameter of the xsapec model:
>>> dep.par_plot('xsapec.abundanc', units='arcmin')
"""
# Assume par is "model_name.par_name" and we do not have to
# worry about case for model_name, but may have to for par_name
#
mname, pname = self._split_parname(par)
pname = pname.lower()
cpts = [cpt['object'] for cpt in self.model_comps
if cpt['type'] == mname]
# Probably can not get here and this happen (thanks to the get_par)
# call, but just in case
if len(cpts) == 0:
raise ValueError("No matching model {} for par={}".format(mname,
par))
# Assume they are all the same (they better be)
#
yunits = None
for p in cpts[0].pars:
if p.name.lower() != pname:
continue
yunits = p.units
break
# Also should not happen, so report if it does but do not
# error out
if yunits is None:
print("WARNING: unable to find match for parameter {}".format(par))
yunits = ''
ylabel = par
if yunits.strip() != '':
ylabel += " ({})".format(yunits)
pvals = self.get_par(par)
self._radial_plot(par, units, pvals, ylabel,
xlog=xlog, ylog=ylog,
overplot=overplot, clearwindow=clearwindow)
def density_plot(self, units='kpc',
xlog=True, ylog=True,
overplot=False, clearwindow=True):
"""Plot up the electron density as a function of radius.
The density is displayed with units of cm^-3. This plots up the
density calculated using the current normalization parameter
values. The ``fit_plot``, ``conf_plot``, and ``covar_plot``
routines display the fit and error results for these parameters.
Parameters
----------
units : str or astropy.units.Unit, optional
The X-axis units (a length or angle, such as 'Mpc' or
'arcsec', where the case is important).
xlog : bool, optional
Should the x axis be drawn with a log scale (default True)?
ylog : bool, optional
Should the y axis be drawn with a log scale (default False)?
overplot : bool, optional
Clear the plot or add to existing plot?
clearwindow : bool, optional
How does this interact with overplot?
See Also
--------
conf_plot, covar_plot, fit_plot, par_plot
Examples
--------
Plot the density as a function of radius.
>>> dep.density_plot()
Label the radii with units of arcminutes:
>>> dep.density_plot(units='arcmin')
"""
nes = self.get_density().value
# Unfortunately the LaTeX emulation in the two backends is not
# comparable, which limits the fidelity of the label.
#
# ylabel = 'n$_e$ (cm$^{-3}$)'
ylabel = r'n_e\ (\mathrm{cm^{-3}})'
self._radial_plot('density', units, nes, ylabel,
xlog=xlog, ylog=ylog,
overplot=overplot, clearwindow=clearwindow)
def fit_plot(self, field, results=None,
units='kpc',
xlog=True, ylog=False,
overplot=False, clearwindow=True):
"""Plot up the fit results as a function of radius.
This method can be used to plot up the last fit results or
a previously-stored set. To include error bars on the
dependent values use the `conf_plot` or `covar_plot` methods.
Parameters
----------
field : str
The column to plot from the fit results (the match is case
insensitive).
results : None or astropy.table.Table instance
The return value from the ``fit`` or ``get_fit_results``
methods.
units : str or astropy.units.Unit, optional
The X-axis units (a length or angle, such as 'Mpc' or
'arcsec', where the case is important).
xlog : bool, optional
Should the x axis be drawn with a log scale (default True)?
ylog : bool, optional
Should the y axis be drawn with a log scale (default False)?
overplot : bool, optional
Clear the plot or add to existing plot?
clearwindow : bool, optional
How does this interact with overplot?
See Also
--------
fit, get_fit_results, conf_plot, covar_plot, density_plot, par_plot
Examples
--------
Plot the temperature as a function of radius from the last
fit:
>>> dep.fit_plot('xsapec.kt')
Plot the reduced fit statistic from the last fit:
>>> dep.fit_plot('rstat')
Plot the density with the radii labelled in arcminutes and the
density shown on a log scale:
>>> dep.fit_plot('density', units='arcmin', ylog=True)
Overplot the current fit results on those from a previous fit,
where ``fit1`` was returned from the ``fit`` or ``get_fit_results``
methods:
>>> dep.fit_plot('xsapec.abundanc', results=fit1)
>>> dep.fit_plot('xsapec.abundanc', overplot=True)
"""
if results is None:
plotdata = self.get_fit_results()
else:
plotdata = results
try:
ys = plotdata[field]
except KeyError:
flower = field.lower()
names = [n for n in plotdata.keys() if n.lower() == flower]
if len(names) == 0:
raise ValueError("Unrecognized field {}".format(field))
elif len(names) > 1:
raise RuntimeError("Multiple fields match {}".format(field))
field = names[0]
ys = plotdata[field]
ylabel = _add_unit(field, ys)
self._radial_plot(field, units, ys, ylabel,
xlog=xlog, ylog=ylog,
overplot=overplot, clearwindow=clearwindow)
def conf_plot(self, field, results=None,
units='kpc',
xlog=True, ylog=False,
overplot=False, clearwindow=True):
"""Plot up the confidence errors as a function of radius.
This method can be used to plot up the last conf results or
a previously-stored set. Any error bars are shown at the
scale they were calculated (as given by the ``sigma`` and
``percent`` columns of the results).
Parameters
----------
field : str
The column to plot from the fit results (the match is case
insensitive).
results : None or astropy.table.Table instance
The return value from the ``conf`` or ``get_conf_results``
methods.
units : str or astropy.units.Unit, optional
The X-axis units (a length or angle, such as 'Mpc' or
'arcsec', where the case is important).
xlog : bool, optional
Should the x axis be drawn with a log scale (default True)?
ylog : bool, optional
Should the y axis be drawn with a log scale (default False)?
overplot : bool, optional
Clear the plot or add to existing plot?
clearwindow : bool, optional
How does this interact with overplot?
See Also
--------
fit, get_conf_results, fit_plot, covar_plot, density_plot, par_plot
Notes
-----
Error bars are included on the dependent axis if the results
contain columns that match the requested field with suffixes
of '_lo' and '_hi'. These error bars are asymmetric, which is
different to ``covar_plot``.
If a limit is missing (i.e. it is a NaN) then no error bar is
drawn. This can make it look like the error is very small.
Examples
--------
Plot the temperature as a function of radius from the last
fit, including error bars:
>>> dep.conf_plot('xsapec.kt')
Plot the density with the radii labelled in arcminutes and the
density shown on a log scale:
>>> dep.conf_plot('density', units='arcmin', ylog=True)
Overplot the current conf results on those from a previous fit,
where ``conf1`` was returned from the ``conf`` or ``get_conf_results``
methods:
>>> dep.conf_plot('xsapec.abundanc', results=conf1)
>>> dep.conf_plot('xsapec.abundanc', overplot=True)
"""
if results is None:
plotdata = self.get_conf_results()
else:
plotdata = results
try:
ys = plotdata[field]
except KeyError:
flower = field.lower()
names = [n for n in plotdata.keys() if n.lower() == flower]
if len(names) == 0:
raise ValueError("Unrecognized field {}".format(field))
elif len(names) > 1:
raise RuntimeError("Multiple fields match {}".format(field))
field = names[0]
ys = plotdata[field]
try:
flo = '{}_lo'.format(field)
fhi = '{}_hi'.format(field)
dys = | numpy.vstack((-plotdata[flo], plotdata[fhi])) | numpy.vstack |
import numpy as np
from itertools import combinations as comb
def combn(m, n):
return np.array(list(comb(range(m), n)))
def Borda(mat):
np.fill_diagonal(mat, 1)
mat = mat/(mat+mat.T)
np.fill_diagonal(mat, 0)
return np.sum(mat, axis=1)
def BTL(Data, probs=False, max_iter=10**5):
'''
computes the parameters using maximum likelihood principle.
This function is adapted from the Matlab version provided by <NAME>
http://personal.psu.edu/drh20/code/btmatlab
'''
wm = Data
if probs:
np.fill_diagonal(wm, 1)
wm = wm/(wm+wm.T)
np.fill_diagonal(wm, 0)
n = wm.shape[0]
nmo = n-1
pi = np.ones(nmo, dtype=float)
gm = (wm[:,range(nmo)]).T + wm[range(nmo),:]
wins = np.sum(wm[range(nmo),], axis=1)
gind = gm>0
z = np.zeros((nmo,n))
pisum = z
for _ in range(max_iter):
pius = np.repeat(pi, n).reshape(nmo, -1)
piust = (pius[:,range(nmo)]).T
piust = np.column_stack((piust, np.repeat(1,nmo)))
pisum[gind] = pius[gind]+piust[gind]
z[gind] = gm[gind] / pisum[gind]
newpi = wins / np.sum(z, axis=1)
if np.linalg.norm(newpi - pi, ord=np.inf) <= 1e-6:
newpi = np.append(newpi, 1)
return newpi/sum(newpi)
pi = newpi
raise RuntimeError('did not converge')
'''
AB: numpy array where each row (instance) is \in [-1,1]^d
CD: numpy array where each row (instance) is \in [-1,1]^d
'''
def analogy(AB,CD):
''' equivalent analogies a:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:d b:a::d:c c:d::a:b d:c::b:a '''
''' equivalent analogies a:b::d:c b:a::c:d c:d::b:a d:c::a:b '''
S = 1 - np.abs(AB-CD)
cond0 = AB*CD < 0
cond1 = (AB==0) & (CD!=0)
cond2 = (AB!=0) & (CD==0)
S[ cond0 | cond1 | cond2 ] = 0
if S.ndim==1:
S = S.reshape(-1, len(S))
return np.mean(S, axis=1)
'''
arr_trn: numpy array containing n instances \in [0,1]^d
y_trn: numpy array of length n containing the rank of instances in arr_trn
arr_tst: numpy array containing n instances \in [0,1]^d
k: (integer) the no. of nearest neighbors
agg: (string) aggregation function to be used
'''
def able2rank_arithmetic(arr_trn, y_trn, arr_tst, k, agg):
arr_trn = arr_trn[ np.argsort(y_trn),: ]
nr_trn = arr_trn.shape[0]
nr_tst = arr_tst.shape[0]
nc = arr_trn.shape[1]
cmb_trn = combn(nr_trn, 2)
a_minus_b = arr_trn[ cmb_trn[:,0] ] - arr_trn[ cmb_trn[:,1] ]
cmb_tst = combn(nr_tst, 2)
mat = np.identity(nr_tst)-1
for t in range(cmb_tst.shape[0]):
i, j = cmb_tst[t,:]
c_minus_d = (arr_tst[i,:] - arr_tst[j,:]).reshape(-1, nc)
c_minus_d = np.repeat( c_minus_d, cmb_trn.shape[0], axis=0 )
d_minus_c = -c_minus_d
abcd = analogy(a_minus_b, c_minus_d)
abdc = analogy(a_minus_b, d_minus_c)
'''assuming arr_trn is ranked from top to bottom'''
merged = | np.column_stack((abcd, abdc)) | numpy.column_stack |
"""Main module."""
# LIBRARIES
import numpy as np # scientific computing lib
from pyfar import Signal # managing audio signals
import sounddevice as sd # sounddevice / hostapi handling
import soundfile as sf # cross-platform file reading/writing
import queue # information exchange between threads
import sys # used for prunting errors to std stream
import tempfile # create temporary files
import threading # create threads and non-blocking events
import os.path # file writing on harddrive
import time # timing program execution
# DEVICE CLASS
class Device():
"""Wrapper-class for sounddevice."""
def __init__(self, inp=0, out=1):
# initialize parameters
self.input = inp
self.output = out
sd.default.device = (self.input, self.output)
sd.default.samplerate = sd.query_devices(
device=self.input)["default_samplerate"]
def set_device(self, inp, out):
self.input = inp
self.output = out
sd.default.device = (self.input, self.output)
sd.default.samplerate = sd.query_devices(
device=self.input)["default_samplerate"]
def show_io(self):
print("\n\033[1m" + "Input:\n" + "\033[0m",
sd.query_devices(device=self.input))
print("\033[1m" + "Output:\n" + "\033[0m",
sd.query_devices(device=self.output))
def show_max_channels(self):
print('\nMax Channels for Input Device:',
sd.query_devices(device=self.input)['max_input_channels'])
print('Max Channels for Output Device:',
sd.query_devices(device=self.output)['max_output_channels'])
def set_channels(self, ichan, ochan):
sd.default.channels = (ichan, ochan)
def show_all(self):
print(sd.query_devices())
# AUDIO IO CLASS
class _AudioIO(object):
"""Abstract Container Class for haiopy-classes"""
def __init__(self,
blocksize=2048,
buffersize=20,
sampling_rate=48000,
dtype='float32',):
# initialize global-parameters
self.blocksize = blocksize
self.buffersize = buffersize
self.sampling_rate = sampling_rate
# provided by sd.Streams
self._VALID_DTYPES = ["int8", "int16", "int32", "float32"]
self.dtype = dtype
@property
def blocksize(self):
"""Get Blocksize"""
return self._blocksize
@blocksize.setter
def blocksize(self, value):
"""Set Blocksize"""
self._blocksize = value
@property
def buffersize(self):
"""Get Buffersize"""
return self._buffersize
@buffersize.setter
def buffersize(self, value):
"""Set Buffersize"""
self._buffersize = value
@property
def sampling_rate(self):
"""Get Sampling_Rate"""
return self._sampling_rate
@sampling_rate.setter
def sampling_rate(self, value):
"""Set Sampling_Rate"""
self._sampling_rate = value
@property
def dtype(self):
"""Get dtype"""
return self._dtype
@dtype.setter
def dtype(self, value):
"""Set dtype"""
if value in self._VALID_DTYPES:
self._dtype = value
else:
raise ValueError('Wrong dtype')
def check_input_sampling_rate(self, sr):
if self.sampling_rate is None or self.sampling_rate != sr:
self.sampling_rate = sr
print('Sampling_Rates adjusted!')
def check_input_dtype(self, dt):
if self.dtype is None or self.dtype == dt:
self.dtype = dt
else:
raise ValueError(
'Dtypes do not Match!', self.dtype, dt)
# RECORD CLASS
class Record(_AudioIO):
"""
Class for duration-based or infinite
recording of WAV or pyfar.Signal-objects with chosen sounddevice.
"""
def __init__(self,
audio_in,
blocksize=2048,
buffersize=20,
device_in=0,
channels_in=2,
sampling_rate=48000,
dtype='float32',):
_AudioIO.__init__(self, blocksize, buffersize, sampling_rate, dtype)
# Initialize valid parameter spaces
self._VALID_TYPES = ["wav", "signal"]
self.audio_in = audio_in
self.device_in = device_in
self.channels_in = channels_in
self.recording = self.previously_recording = False
self.audio_q = queue.Queue()
self.data_array = []
self.check_audio_in()
@property
def device_in(self):
""" Get the Index of the Input Device """
return self._device_in
@device_in.setter
def device_in(self, idx):
""" Set the Index of the Input Device """
if idx in range(len(sd.query_devices())) \
and sd.query_devices(idx)['max_input_channels'] > 0:
self._device_in = int(idx)
else:
raise ValueError('index of input device (device_in) not found')
@property
def channels_in(self):
""" Get number of Input Channels """
return self._channels_in
@channels_in.setter
def channels_in(self, value):
""" Set number of Input Channels """
if value <= sd.query_devices(self._device_in)['max_input_channels']:
self._channels_in = int(value)
else:
raise ValueError('number of input channels exceeds output device, \
max input channels:',
sd.query_devices(
self._device_in)['max_input_channels'])
@property
def audio_in(self):
""" Get the Type of Recorded Audio """
return self._audio_in
@audio_in.setter
def audio_in(self, value):
""" Set the Type of Recorded Audio """
self._audio_in = value
def create_stream(self, device=None):
self.stream = sd.InputStream(
samplerate=self.sampling_rate, device=self.device_in,
channels=self.channels_in, blocksize=self.blocksize,
callback=self.audio_callback, dtype=self.dtype)
self.stream.start()
def audio_callback(self, indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if self.recording is True:
self.audio_q.put(indata.copy())
self.previously_recording = True
else:
if self.previously_recording:
self.audio_q.put(None)
self.previously_recording = False
def check_audio_in(self):
if self.audio_in == 'signal':
self.type_in = self.audio_in
elif self.audio_in == 'wav':
self.type_in = self.audio_in
self.filename = tempfile.mktemp(prefix='Record_',
suffix='.wav',
dir='')
elif isinstance(self.audio_in, str) \
and self.audio_in.split('.')[-1] == 'wav':
self.type_in = 'wav'
if os.path.isfile(self.audio_in):
raise FileExistsError('File already exists!')
else:
self.filename = self.audio_in
else:
raise TypeError("Incorrect type, needs to be wav or Signal.")
def file_writing_thread(self, *, q, **soundfile_args):
"""Write data from queue to file until *None* is received."""
with sf.SoundFile(**soundfile_args) as file:
while True:
data = q.get()
if data is None:
break
file.write(data)
def data_writing_thread(self, *, q):
"""Write data from queue to pyfar.Signal until *None* is received."""
while True:
data = q.get()
if data is None:
break
self.data_array = np.append(self.data_array, | np.array(data) | numpy.array |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import sys
import numpy as np
import pyarrow as pa
tensor_type_pairs = [
('i1', pa.int8()),
('i2', pa.int16()),
('i4', pa.int32()),
('i8', pa.int64()),
('u1', pa.uint8()),
('u2', pa.uint16()),
('u4', pa.uint32()),
('u8', pa.uint64()),
('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())
]
@pytest.mark.parametrize('sparse_tensor_type', [
pa.SparseCSRMatrix,
pa.SparseCOOTensor,
])
def test_sparse_tensor_attrs(sparse_tensor_type):
data = np.array([
[0, 1, 0, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 3, 0, 0, 0],
])
dim_names = ['x', 'y']
sparse_tensor = sparse_tensor_type.from_dense_numpy(data, dim_names)
assert sparse_tensor.ndim == 2
assert sparse_tensor.size == 25
assert sparse_tensor.shape == data.shape
assert sparse_tensor.is_mutable
assert sparse_tensor.dim_name(0) == dim_names[0]
assert sparse_tensor.dim_names == dim_names
assert sparse_tensor.non_zero_length == 4
def test_sparse_tensor_coo_base_object():
data = np.array([[4], [9], [7], [5]])
coords = np.array([[0, 0], [0, 2], [1, 1], [3, 3]])
array = np.array([[4, 0, 9, 0],
[0, 7, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5]])
sparse_tensor = pa.SparseCOOTensor.from_dense_numpy(array)
n = sys.getrefcount(sparse_tensor)
result_data, result_coords = sparse_tensor.to_numpy()
assert sys.getrefcount(sparse_tensor) == n + 2
sparse_tensor = None
assert np.array_equal(data, result_data)
assert np.array_equal(coords, result_coords)
assert result_coords.flags.f_contiguous # column-major
def test_sparse_tensor_csr_base_object():
data = np.array([[1], [2], [3], [4], [5], [6]])
indptr = np.array([0, 2, 3, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
array = np.array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
sparse_tensor = pa.SparseCSRMatrix.from_dense_numpy(array)
n = sys.getrefcount(sparse_tensor)
result_data, result_indptr, result_indices = sparse_tensor.to_numpy()
assert sys.getrefcount(sparse_tensor) == n + 3
sparse_tensor = None
assert np.array_equal(data, result_data)
assert np.array_equal(indptr, result_indptr)
assert np.array_equal(indices, result_indices)
@pytest.mark.parametrize('sparse_tensor_type', [
pa.SparseCSRMatrix,
pa.SparseCOOTensor,
])
def test_sparse_tensor_equals(sparse_tensor_type):
def eq(a, b):
assert a.equals(b)
assert a == b
assert not (a != b)
def ne(a, b):
assert not a.equals(b)
assert not (a == b)
assert a != b
data = np.random.randn(10, 6)[::, ::2]
sparse_tensor1 = sparse_tensor_type.from_dense_numpy(data)
sparse_tensor2 = sparse_tensor_type.from_dense_numpy(
np.ascontiguousarray(data))
eq(sparse_tensor1, sparse_tensor2)
data = data.copy()
data[9, 0] = 1.0
sparse_tensor2 = sparse_tensor_type.from_dense_numpy(
np.ascontiguousarray(data))
ne(sparse_tensor1, sparse_tensor2)
@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs)
def test_sparse_tensor_coo_from_dense(dtype_str, arrow_type):
dtype = np.dtype(dtype_str)
data = np.array([[4], [9], [7], [5]]).astype(dtype)
coords = np.array([[0, 0], [0, 2], [1, 1], [3, 3]])
array = np.array([[4, 0, 9, 0],
[0, 7, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5]]).astype(dtype)
tensor = pa.Tensor.from_numpy(array)
# Test from numpy array
sparse_tensor = pa.SparseCOOTensor.from_dense_numpy(array)
repr(sparse_tensor)
assert sparse_tensor.type == arrow_type
result_data, result_coords = sparse_tensor.to_numpy()
assert np.array_equal(data, result_data)
assert np.array_equal(coords, result_coords)
# Test from Tensor
sparse_tensor = pa.SparseCOOTensor.from_tensor(tensor)
repr(sparse_tensor)
assert sparse_tensor.type == arrow_type
result_data, result_coords = sparse_tensor.to_numpy()
assert np.array_equal(data, result_data)
assert np.array_equal(coords, result_coords)
@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs)
def test_sparse_tensor_csr_from_dense(dtype_str, arrow_type):
dtype = np.dtype(dtype_str)
dense_data = np.array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]]).astype(dtype)
data = np.array([[1], [2], [3], [4], [5], [6]])
indptr = np.array([0, 2, 3, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
tensor = pa.Tensor.from_numpy(dense_data)
# Test from numpy array
sparse_tensor = pa.SparseCSRMatrix.from_dense_numpy(dense_data)
repr(sparse_tensor)
result_data, result_indptr, result_indices = sparse_tensor.to_numpy()
assert np.array_equal(data, result_data)
assert np.array_equal(indptr, result_indptr)
assert np.array_equal(indices, result_indices)
# Test from Tensor
sparse_tensor = pa.SparseCSRMatrix.from_tensor(tensor)
repr(sparse_tensor)
assert sparse_tensor.type == arrow_type
result_data, result_indptr, result_indices = sparse_tensor.to_numpy()
assert np.array_equal(data, result_data)
assert np.array_equal(indptr, result_indptr)
assert np.array_equal(indices, result_indices)
@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs)
def test_sparse_tensor_coo_numpy_roundtrip(dtype_str, arrow_type):
dtype = | np.dtype(dtype_str) | numpy.dtype |
# This file is part of pyfesom
#
################################################################################
#
# Original matlab/python code by <NAME>, <NAME> and <NAME>.
#
# Contributers: <NAME>, <NAME>
#
# Modifications:
#
################################################################################
import numpy as np
import math as mt
import matplotlib as mpl
def scalar_r2g(al, be, ga, rlon, rlat):
'''
Converts rotated coordinates to geographical coordinates.
Parameters
----------
al : float
alpha Euler angle
be : float
beta Euler angle
ga : float
gamma Euler angle
rlon : array
1d array of longitudes in rotated coordinates
rlat : array
1d araay of latitudes in rotated coordinates
Returns
-------
lon : array
1d array of longitudes in geographical coordinates
lat : array
1d array of latitudes in geographical coordinates
'''
rad=mt.pi/180
al=al*rad
be=be*rad
ga=ga*rad
rotate_matrix=np.zeros(shape=(3,3))
rotate_matrix[0,0]=np.cos(ga)*np.cos(al)-np.sin(ga)*np.cos(be)*np.sin(al)
rotate_matrix[0,1]=np.cos(ga)*np.sin(al)+np.sin(ga)*np.cos(be)*np.cos(al)
rotate_matrix[0,2]=np.sin(ga)*np.sin(be)
rotate_matrix[1,0]=-np.sin(ga)*np.cos(al)-np.cos(ga)*np.cos(be)*np.sin(al)
rotate_matrix[1,1]=-np.sin(ga)*np.sin(al)+np.cos(ga)*np.cos(be)*np.cos(al)
rotate_matrix[1,2]=np.cos(ga)*np.sin(be)
rotate_matrix[2,0]=np.sin(be)*np.sin(al)
rotate_matrix[2,1]=-np.sin(be)*np.cos(al)
rotate_matrix[2,2]=np.cos(be)
rotate_matrix=np.linalg.pinv(rotate_matrix)
rlat=rlat*rad
rlon=rlon*rad
#Rotated Cartesian coordinates:
xr=np.cos(rlat)*np.cos(rlon)
yr=np.cos(rlat)*np.sin(rlon)
zr=np.sin(rlat)
#Geographical Cartesian coordinates:
xg=rotate_matrix[0,0]*xr + rotate_matrix[0,1]*yr + rotate_matrix[0,2]*zr
yg=rotate_matrix[1,0]*xr + rotate_matrix[1,1]*yr + rotate_matrix[1,2]*zr
zg=rotate_matrix[2,0]*xr + rotate_matrix[2,1]*yr + rotate_matrix[2,2]*zr
#Geographical coordinates:
lat = np.arcsin(zg)
lon= np.arctan2(yg, xg)
a = np.where((np.abs(xg)+np.abs(yg))==0)
if a: lon[a]=0
lat = lat/rad
lon = lon/rad
return (lon,lat)
def scalar_g2r(al, be, ga, lon, lat):
'''
Converts geographical coordinates to rotated coordinates.
Parameters
----------
al : float
alpha Euler angle
be : float
beta Euler angle
ga : float
gamma Euler angle
lon : array
1d array of longitudes in geographical coordinates
lat : array
1d array of latitudes in geographical coordinates
Returns
-------
rlon : array
1d array of longitudes in rotated coordinates
rlat : array
1d araay of latitudes in rotated coordinates
'''
rad=mt.pi/180
al=al*rad
be=be*rad
ga=ga*rad
rotate_matrix=np.zeros(shape=(3,3))
rotate_matrix[0,0]=np.cos(ga)*np.cos(al)-np.sin(ga)*np.cos(be)*np.sin(al)
rotate_matrix[0,1]=np.cos(ga)*np.sin(al)+np.sin(ga)*np.cos(be)*np.cos(al);
rotate_matrix[0,2]=np.sin(ga)*np.sin(be)
rotate_matrix[1,0]=-np.sin(ga)*np.cos(al)-np.cos(ga)* | np.cos(be) | numpy.cos |
import argparse
import torch
import os
from dassl.utils import setup_logger, set_random_seed, collect_env_info
from dassl.config import get_cfg_default
from dassl.engine import build_trainer
import numpy as np
import pandas as pd
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from submission.NeurIPS_2.util.support import (
expand_data_dim, normalization_channels,normalization_time, generate_common_chan_test_data, load_Cho2017, load_Physionet, load_BCI_IV,
correct_EEG_data_order, relabel, process_target_data, relabel_target, load_dataset_A, load_dataset_B, modify_data,reformat,
filterBank
)
from train_util import (
setup_cfg,print_args,reset_cfg,convert_to_dict,CustomModelCheckPoint,CustomeCSVLogger,CustomExperimentWriter,generate_excel_report,
generate_model_info_config,trainer_setup,generate_setup
)
from dassl.data.datasets.data_util import EuclideanAlignment
from collections import defaultdict
from numpy.random import RandomState
def generate_pred_MI_label(fold_predict_results, output_dir, predict_folder="predict_folder",
relabel=False):
probs = fold_predict_results[0]["probs"]
preds = fold_predict_results[0]["preds"]
final_pred = np.zeros(probs.shape)
final_prob = np.zeros(preds.shape)
for predict_result in fold_predict_results:
current_prob = predict_result["probs"]
current_pred = predict_result["preds"]
final_pred = final_pred + current_pred
final_prob = final_prob + current_prob
pred_output = list()
for trial_idx in range(len(final_pred)):
trial_pred = final_pred[trial_idx]
trial_prob = final_prob[trial_idx]
best_idx = -1
best_pred = -1
best_prob = -1
for idx in range(len(trial_pred)):
pred = trial_pred[idx]
prob = trial_prob[idx]
if pred > best_pred:
best_pred = pred
best_idx = idx
best_prob = prob
elif pred == best_pred:
if prob > best_prob:
best_idx = idx
best_prob = prob
pred_output.append(best_idx)
pred_output = np.array(pred_output)
if relabel:
pred_output = np.array([relabel_target(l) for l in pred_output])
print("update pred output : ",pred_output)
combine_folder = os.path.join(output_dir, predict_folder)
print("save folder : ",combine_folder)
np.savetxt(os.path.join(combine_folder, "pred_MI_label.txt"), pred_output, delimiter=',', fmt="%d")
def generate_assemble_result(fold_predict_results, output_dir,
predict_folder="predict_folder", relabel=False):
# unique_test_fold = for fold_result in fold_predict_results:
group_test_folds = defaultdict(list)
final_fold_result = list()
for fold_result in fold_predict_results:
test_fold = fold_result["test_fold"]
group_test_folds[test_fold].append(fold_result)
for test_fold,test_fold_result in group_test_folds.items():
probs = test_fold_result[0]["probs"]
preds = test_fold_result[0]["preds"]
final_label = test_fold_result[0]["labels"]
final_pred = np.zeros(probs.shape)
final_prob = np.zeros(preds.shape)
for predict_result in test_fold_result:
current_prob = predict_result["probs"]
current_pred = predict_result["preds"]
final_pred = final_pred + current_pred
final_prob = final_prob + current_prob
pred_output = list()
for trial_idx in range(len(final_pred)):
trial_pred = final_pred[trial_idx]
trial_prob = final_prob[trial_idx]
best_idx = -1
best_pred = -1
best_prob = -1
for idx in range(len(trial_pred)):
pred = trial_pred[idx]
prob = trial_prob[idx]
if pred > best_pred:
best_pred = pred
best_idx = idx
best_prob = prob
elif pred == best_pred:
if prob > best_prob:
best_idx = idx
best_prob = prob
pred_output.append(best_idx)
pred_output = np.array(pred_output)
if relabel:
pred_output = np.array([relabel_target(l) for l in pred_output])
final_label = np.array([relabel_target(l) for l in final_label])
acc = np.mean(pred_output == final_label)
print("test fold {} has acc {} ".format(test_fold, acc))
# current_test_fold = test_fold_prefix + str(test_fold + 1)
result = {
"test_fold": test_fold,
"test_acc": acc
}
final_fold_result.append(result)
result = pd.DataFrame.from_dict(final_fold_result)
result_output_dir = os.path.join(output_dir, predict_folder)
if not os.path.isdir(result_output_dir):
os.makedirs(result_output_dir)
result_filename = 'ensemble_result.xlsx'
result.to_excel(os.path.join(result_output_dir, result_filename), index=False)
#
from scipy.io import loadmat
def load_test_data_from_file(provide_path,dataset_type):
temp = loadmat(provide_path)
datasets = temp['datasets'][0]
target_dataset = None
list_r_op = None
if len(datasets) == 1:
dataset = datasets[0]
dataset = dataset[0][0]
target_dataset = dataset
else:
for dataset in datasets:
dataset = dataset[0][0]
dataset_name = dataset['dataset_name'][0]
if dataset_name == dataset_type:
target_dataset = dataset
# data = target_dataset['data'].astype(np.float32)
data = target_dataset['data'].astype(np.float32)
label = | np.squeeze(target_dataset['label']) | numpy.squeeze |
"""
<NAME>
camera.py
Construct a camera matrix and apply it to project points onto an image plane.
___
/ _ \
| / \ |
| \_/ |
\___/ ___
_|_|_/[_]\__==_
[---------------]
| O /---\ |
| | | |
| \___/ |
[---------------]
[___]
| |\\
| | \\
[ ] \\_
/|_|\ ( \
//| |\\ \ \
// | | \\ \ \
// |_| \\ \_\
// | | \\
//\ | | /\\
// \ | | / \\
// \ | | / \\
// \|_|/ \\
// [_] \\
// H \\
// H \\
// H \\
// H \\
// H \\
// \\
// \\
Lights...camera...Comp Vis!
"""
import sys
import numpy as np
from numpy import sin, cos
def getIntrinsic(f, d, ic, jc):
"""
Get intrinsic camera matrix, K, from the camera's focal length (f), pixel
dimensions (d), and optical axis center (ic, jc)
Convert pixel dimensions to millimeters by dividing by 1,000
Get the adjusted focal length s_f by dividing f by d
Construct and return K
"""
d /= 1000
s = f / d
K = np.asmatrix([[s, 0, ic],
[0, s, jc],
[0, 0, 1]])
return K
def getExtrinsic(rotVec, transVec):
"""
Get extrinsic camera matrix, R_t, from the rotation and translation vectors
of the camera
Convert rotational vector to radians
Construct the x, y, and z components of the camera's rotation matrix
Multiply the x, y, and z componets to get the camera's rotation matrix, R
Concatenate the transposed rotation matrix and translation matrix
(transposed translation vector) multiplied by the transposed rotation
matrix and -1
Compute the center of the camera and it's axis direction
Return R_t, camera center, and axis direction
"""
rx, ry, rz = (np.pi * rotVec) / 180
Rx = np.asmatrix([[1, 0, 0 ],
[0, cos(rx), -1*sin(rx)],
[0, sin(rx), cos(rx) ]])
Ry = np.asmatrix([[cos(ry), 0, sin(ry)],
[0, 1, 0 ],
[-1*sin(ry), 0, cos(ry)]])
Rz = np.asmatrix([[cos(rz), -1*sin(rz), 0],
[ | sin(rz) | numpy.sin |
import csv
import numpy as np
from multiprocessing import Pool
from scipy.stats import kurtosis
from scipy.stats import skew
def uniform(n, seed, min = 0, max = 10000):
return np.random.default_rng(seed).integers(min, max, n)
def normal(n, seed, loc = 0.0, scale = 1.0):
return np.random.default_rng(seed).normal(loc, scale, n)
def gamma(n, seed, shape = 2.0, scale = 2.0):
return np.random.default_rng(seed).gamma(shape, scale, n)
def bimodal(n, seed, min, max, loc = 0.0, scale = 1.0):
g = np.random.default_rng(seed)
scale2 = g.uniform(0.5, 1.0)
loc2 = g.uniform(min*(scale+scale2), max*(scale+scale2))
proportion = g.uniform(0.3, 0.7)
sample1 = normal(int(n*proportion), seed, loc, scale)
sample2 = normal(n-int(n*proportion), seed+100000, loc2, scale2)
s = np.concatenate((sample1, sample2))
return s
def bin_normal_moments(params):
actual_bins = params[2]
n = params[0]
seed = params[1]
distribution = normal(n, seed)
am = np.mean(distribution)
av = np.var(distribution)
ac = skew(distribution)
ak = kurtosis(distribution)
o = {
'samples': n,
'seed': seed,
'loc': 0.0,
'scale': 1.0,
'actual_moments': {
'actual_mean': am,
'actual_variance': av,
'actual_skew': ac,
'actual_kurtosis': ak,
'range': abs(np.min(distribution)) + abs( | np.max(distribution) | numpy.max |
''' IO utility functions for MDA filetype
source: https://github.com/flatironinstitute/mountainsort/blob/master/packages/pyms/mlpy/mdaio.py
'''
import struct
import numpy as np
class MdaHeader:
def __init__(self, dt0, dims0):
uses64bitdims=(max(dims0)>2e9)
self.uses64bitdims=uses64bitdims
self.dt_code=_dt_code_from_dt(dt0)
self.dt=dt0
self.num_bytes_per_entry=get_num_bytes_per_entry_from_dt(dt0)
self.num_dims=len(dims0)
self.dimprod= | np.prod(dims0) | numpy.prod |
import numpy as np
from gips.gistmodel.fitting import MC_fitter
from gips.gistmodel._numerical_ext import gist_functional_6p_ext
from gips.gistmodel._numerical_ext import gist_functional_5p_ext
from gips.gistmodel._numerical_ext import gist_functional_4p_ext
from gips.gistmodel._numerical_ext import gist_restraint_ext
from gips.gistmodel._numerical_ext import merge_casedata_ext
from gips.gistmodel._numerical_ext import pair_difference_ext
from gips.utils.misc import parms_error
from gips import FLOAT
from gips import DOUBLE
MODE=3
class mode3(MC_fitter):
def __init__(self, gdatarec_dict,
gdata_dict,
ref_energy=-11.108,
parms=6,
pairs=False,
radiusadd=[0.,3.],
softness=1.0,
softcut=2.0,
boundsdict=None,
pairlist=None,
exclude=None,
scaling=2.0,
select=None,
decomp_E=False,
decomp_S=False,
verbose=False):
super(mode3, self).__init__(gdatarec_dict=gdatarec_dict,
gdata_dict=gdata_dict,
ref_energy=ref_energy,
mode=MODE,
radiusadd=radiusadd,
softness=softness,
softcut=softcut,
exclude=exclude,
scaling=scaling,
verbose=verbose)
self.pairs = pairs
self.parms = parms
self._parms = parms
self._gist_functional_ext = None
self.boundsdict = boundsdict
self.pairlist = pairlist
if self.pairs:
self.set_pairs()
self.set_selection(select)
self.set_functional()
self.set_bounds()
self.set_step()
self.set_x0()
self.w = self.w.astype(DOUBLE)
self.w_cplx = self.w_cplx.astype(DOUBLE)
self.w_lig = self.w_lig.astype(DOUBLE)
def gist_functional(self, x):
### &PyArray_Type, &E,
### &PyArray_Type, &S,
### &PyArray_Type, &g,
### &PyArray_Type, &vol,
### &PyArray_Type, &ind,
### &PyArray_Type, &x,
### &PyArray_Type, &dx,
### &PyArray_Type, &fun,
### &PyArray_Type, &grad,
### &verbose
### x[0] = E_aff
### x[1] = e_co
### x[2] = S_aff
### x[3] = s_co
### x[4] = g_co
### x[5] = C
_x = np.zeros(self.parms, dtype=DOUBLE)
_x[:-1] = x[:-1]
### Make sure all types are DOUBLE
if not self.pairs:
if not self._gist_functional_ext(self.E, self.S, self.g, self.vol,
self.ind_rec, _x, self._dx, self._calc_data,
self._gradients, 0, int(self.anal_grad)):
raise ValueError("Something went wrong in gist functional calculation.")
if not self._gist_functional_ext(self.E_cplx, self.S_cplx, self.g_cplx, self.vol_cplx,
self.ind_rec_cplx, _x, self._dx, self._calc_data_cplx,
self._gradients_cplx, 0, int(self.anal_grad)):
raise ValueError("Something went wrong in gist functional calculation.")
if not self._gist_functional_ext(self.E_lig, self.S_lig, self.g_lig, self.vol_lig,
self.ind_rec_lig, _x, self._dx, self._calc_data_lig,
self._gradients_lig, 0, int(self.anal_grad)):
raise ValueError("Something went wrong in gist functional calculation.")
### &PyArray_Type, &x,
### &PyArray_Type, &xmin,
### &PyArray_Type, &xmax,
### &k,
### &restraint,
### &PyArray_Type, &restraint_grad
if self.anal_boundary:
self._restraint = gist_restraint_ext(x,
self.xmin,
self.xmax,
self.kforce_f,
self.kforce,
self._restraint_grad)
def _f_process(self, x):
__doc___= """
returns the squared sum of residuals
objective function is the free energy
"""
self.gist_functional(x)
self._f[:] = 0.
if self.anal_grad:
self._g[:] = 0.
### Complex and Ligand contributions
### &PyArray_Type, &source,
### &PyArray_Type, &assign,
### &PyArray_Type, &factor,
### &PyArray_Type, &assign_factor
if self.pairs:
_f = merge_casedata_ext(self._calc_data_cplx, self.ind_case_cplx, self.w_cplx, self.ind_case_cplx)
_f -= merge_casedata_ext(self._calc_data_lig, self.ind_case_lig, self.w_lig, self.ind_case_lig)
self._f[:] += pair_difference_ext(_f, self.pairidx)
for i in range(self.parms-1):
_g = merge_casedata_ext(self._gradients_cplx[:,i], self.ind_case_cplx, self.w_cplx, self.ind_case_cplx)
_g -= merge_casedata_ext(self._gradients_lig[:, i], self.ind_case_lig, self.w_lig, self.ind_case_lig)
self._g[:,i] += pair_difference_ext(_g, self.pairidx)
else:
self._f[:] = merge_casedata_ext(self._calc_data_cplx, self.ind_case_cplx, self.w_cplx, self.ind_case_cplx)
self._f[:] -= merge_casedata_ext(self._calc_data, self.ind_case, self.w, self.ind_rec)
self._f[:] -= merge_casedata_ext(self._calc_data_lig, self.ind_case_lig, self.w_lig, self.ind_case_lig)
for i in range(self.parms):
self._g[:,i] = merge_casedata_ext(self._gradients_cplx[:,i], self.ind_case_cplx, self.w_cplx, self.ind_case_cplx)
self._g[:,i] -= merge_casedata_ext(self._gradients[:,i], self.ind_case, self.w, self.ind_rec)
self._g[:,i] -= merge_casedata_ext(self._gradients_lig[:, i], self.ind_case_lig, self.w_lig, self.ind_case_lig)
self._f[:] += x[-1]
self._g[:,-1] = 1
else:
if self.pairs:
### Complex and Ligand contributions
_f = merge_casedata_ext(self._calc_data_cplx, self.ind_case_cplx, self.w_cplx, self.ind_case_cplx)
_f -= merge_casedata_ext(self._calc_data_lig, self.ind_case_lig, self.w_lig, self.ind_case_lig)
self._f[:] += pair_difference_ext(_f, self.pairidx)
else:
### Receptor contributions
self._f[:] = merge_casedata_ext(self._calc_data_cplx, self.ind_case_cplx, self.w_cplx, self.ind_case_cplx)
self._f[:] -= merge_casedata_ext(self._calc_data, self.ind_case, self.w, self.ind_rec)
self._f[:] -= merge_casedata_ext(self._calc_data_lig, self.ind_case_lig, self.w_lig, self.ind_case_lig)
self._f[:] += x[-1]
def set_bounds(self):
__doc__ = """
Ensures that we don't run out of bounds during MC steps.
"""
self.xmin = np.zeros(self._parms, dtype=DOUBLE)
self.xmax = np.zeros(self._parms, dtype=DOUBLE)
self._restraint_grad = np.zeros(self._parms, dtype=DOUBLE)
self._restraint = 0.
self.kforce_f = np.zeros(self._parms, dtype=DOUBLE)
_E = np.min([np.min(self.E_cplx),np.min(self.E),np.min(self.E_lig)]),\
np.max([np.max(self.E_cplx),np.max(self.E),np.max(self.E_lig)])
_S = np.min([np.min(self.S_cplx),np.min(self.S),np.min(self.S_lig)]),\
np.max([np.max(self.S_cplx),np.max(self.S),np.max(self.S_lig)])
_g = np.min([np.min(self.g_cplx),np.min(self.g),np.min(self.g_lig)]),\
np.max([np.max(self.g_cplx),np.max(self.g),np.max(self.g_lig)])
if isinstance(self.boundsdict, dict):
self.xmin[-1], self.xmax[-1] = self.boundsdict['C'][0], self.boundsdict['C'][1] ### C
else:
self.xmin[-1], self.xmax[-1] = -10. , 10. ### C
self.kforce_f[-1] = 1.
if self.parms==6:
if isinstance(self.boundsdict, dict):
self.xmin[0], self.xmax[0] = self.boundsdict['E'][0], self.boundsdict['E'][1] ### E_aff
self.xmin[1], self.xmax[1] = self.boundsdict['e_co'][0], self.boundsdict['e_co'][1] ### e_co
self.xmin[2], self.xmax[2] = self.boundsdict['S'][0], self.boundsdict['S'][1] ### S_aff
self.xmin[3], self.xmax[3] = self.boundsdict['s_co'][0], self.boundsdict['s_co'][1] ### s_co
self.xmin[4], self.xmax[4] = self.boundsdict['g_co'][0], self.boundsdict['g_co'][1] ### g_co
else:
self.xmin[0], self.xmax[0] = -10 , 10. ### E_aff
self.xmin[1], self.xmax[1] = np.min(_E), np.max(_E) ### e_co
self.xmin[2], self.xmax[2] = -10. , 10. ### S_aff
self.xmin[3], self.xmax[3] = np.min(_S), np.max(_S) ### s_co
self.xmin[4], self.xmax[4] = 1. , np.max(_g) ### g_co
self.kforce_f[0] = 1.
self.kforce_f[1] = 10.
self.kforce_f[2] = 1.
self.kforce_f[3] = 10.
self.kforce_f[4] = 10.
elif self.parms==5:
if isinstance(self.boundsdict, dict):
self.xmin[0], self.xmax[0] = self.boundsdict['E'][0], self.boundsdict['E'][1] ### Aff
self.xmin[1], self.xmax[1] = self.boundsdict['e_co'][0], self.boundsdict['e_co'][1] ### e_co
self.xmin[2], self.xmax[2] = self.boundsdict['s_co'][0], self.boundsdict['s_co'][1] ### s_co
self.xmin[3], self.xmax[3] = self.boundsdict['g_co'][0], self.boundsdict['g_co'][1] ### g_co
else:
self.xmin[0], self.xmax[0] = -10 , 10. ### Aff
self.xmin[1], self.xmax[1] = np.min(_E), np.max(_E) ### e_co
self.xmin[2], self.xmax[2] = np.min(_S), np.max(_S) ### s_co
self.xmin[3], self.xmax[3] = 1. , np.max(_g) ### g_co
self.kforce_f[0] = 1.
self.kforce_f[1] = 10.
self.kforce_f[2] = 10.
self.kforce_f[3] = 10.
elif self.parms==4:
if isinstance(self.boundsdict, dict):
self.xmin[0], self.xmax[0] = self.boundsdict['e_co'][0], self.boundsdict['e_co'][1] ### e_co
self.xmin[1], self.xmax[1] = self.boundsdict['s_co'][0], self.boundsdict['s_co'][1] ### s_co
self.xmin[2], self.xmax[2] = self.boundsdict['g_co'][0], self.boundsdict['g_co'][1] ### g_co
else:
self.xmin[0], self.xmax[0] = np.min(_E), np.max(_E) ### e_co
self.xmin[1], self.xmax[1] = np.min(_S), np.max(_S) ### s_co
self.xmin[2], self.xmax[2] = 1. , np.max(_g) ### g_co
self.kforce_f[0] = 10.
self.kforce_f[1] = 10.
self.kforce_f[2] = 10.
def set_step(self):
self.steps = np.zeros(self._parms, dtype=DOUBLE)
self.steps[-1] = 1.0
if self.parms==6:
self.steps[0] = 1.
self.steps[1] = 2.0
self.steps[2] = 1.
self.steps[3] = 2.0
self.steps[4] = 2.0
elif self.parms==5:
self.steps[0] = 1.
self.steps[1] = 2.0
self.steps[2] = 2.0
self.steps[3] = 2.0
elif self.parms==4:
self.steps[0] = 2.0
self.steps[1] = 2.0
self.steps[2] = 2.0
else:
parms_error(self.parms, self._parms)
def set_functional(self):
### Note, all arrays which are passed to the functionals (such as
### gist_functional_6p_ext), must be DOUBLE (i.e. 32bit floating
### point type in C). This will not checked within the C routine
### (but should be implemented at some point ...).
if self.pairs:
self._exp_data = pair_difference_ext(self.dg.astype(DOUBLE), self.pairidx)
self._f = np.zeros(self.N_pairs, dtype=DOUBLE)
self._g = np.zeros((self.N_pairs, self._parms), dtype=DOUBLE)
else:
self._exp_data = np.copy(self.dg.astype(DOUBLE))
self._f = | np.zeros(self.N_case, dtype=DOUBLE) | numpy.zeros |
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from pathlib import Path
import numpy as np
import cv2
from ..base_evaluator import BaseEvaluator
from ..quantization_model_evaluator import create_dataset_attributes
from ...adapters import create_adapter
from ...config import ConfigError
from ...data_readers import DataRepresentation
from ...launcher import create_launcher
from ...launcher.input_feeder import PRECISION_TO_DTYPE
from ...logging import print_info
from ...preprocessor import PreprocessingExecutor
from ...progress_reporters import ProgressReporter
from ...representation import RawTensorPrediction, RawTensorAnnotation
from ...utils import extract_image_representations, contains_all, get_path
class CocosnetEvaluator(BaseEvaluator):
def __init__(
self, dataset_config, launcher, preprocessor_mask, preprocessor_image,
gan_model, check_model, orig_config
):
self.launcher = launcher
self.dataset_config = dataset_config
self.preprocessor_mask = preprocessor_mask
self.preprocessor_image = preprocessor_image
self.postprocessor = None
self.dataset = None
self.metric_executor = None
self.test_model = gan_model
self.check_model = check_model
self.config = orig_config
self._metrics_results = []
self._part_by_name = {
'gan_network': self.test_model,
}
if self.check_model:
self._part_by_name.update({'verification_network': self.check_model})
@classmethod
def from_configs(cls, config, delayed_model_loading=False, orig_config=None):
launcher_config = config['launchers'][0]
dataset_config = config['datasets']
preprocessor_mask = PreprocessingExecutor(
dataset_config[0].get('preprocessing_mask')
)
preprocessor_image = PreprocessingExecutor(
dataset_config[0].get('preprocessing_image')
)
launcher = create_launcher(launcher_config, delayed_model_loading=True)
network_info = config.get('network_info', {})
cocosnet_network = network_info.get('cocosnet_network', {})
verification_network = network_info.get('verification_network', {})
if not delayed_model_loading:
model_args = config.get('_models', [])
models_is_blob = config.get('_model_is_blob')
if 'model' not in cocosnet_network and model_args:
cocosnet_network['model'] = model_args[0]
cocosnet_network['_model_is_blob'] = models_is_blob
if verification_network and 'model' not in verification_network and model_args:
verification_network['model'] = model_args[1 if len(model_args) > 1 else 0]
verification_network['_model_is_blob'] = models_is_blob
network_info.update({
'cocosnet_network': cocosnet_network,
'verification_network': verification_network
})
if not contains_all(network_info, ['cocosnet_network']):
raise ConfigError('configuration for cocosnet_network does not exist')
gan_model = CocosnetModel(network_info.get('cocosnet_network', {}), launcher, delayed_model_loading)
if verification_network:
check_model = GanCheckModel(network_info.get('verification_network', {}), launcher, delayed_model_loading)
else:
check_model = None
return cls(
dataset_config, launcher, preprocessor_mask, preprocessor_image, gan_model, check_model, orig_config
)
@staticmethod
def get_processing_info(config):
module_specific_params = config.get('module_config')
model_name = config['name']
launcher_config = module_specific_params['launchers'][0]
dataset_config = module_specific_params['datasets'][0]
return (
model_name, launcher_config['framework'], launcher_config['device'], launcher_config.get('tags'),
dataset_config['name']
)
def _preprocessing_for_batch_input(self, batch_annotation, batch_inputs):
for i, _ in enumerate(batch_inputs):
for index_of_input, _ in enumerate(batch_inputs[i].data):
preprocessor = self.preprocessor_mask
if index_of_input % 2:
preprocessor = self.preprocessor_image
batch_inputs[i].data[index_of_input] = preprocessor.process(
images=[DataRepresentation(batch_inputs[i].data[index_of_input])],
batch_annotation=batch_annotation)[0].data
return batch_inputs
def process_dataset(
self, subset=None,
num_images=None,
check_progress=False,
dataset_tag='',
output_callback=None,
allow_pairwise_subset=False,
dump_prediction_to_annotation=False,
**kwargs):
if self.dataset is None or (dataset_tag and self.dataset.tag != dataset_tag):
self.select_dataset(dataset_tag)
self._annotations, self._predictions = [], []
self._create_subset(subset, num_images, allow_pairwise_subset)
if 'progress_reporter' in kwargs:
_progress_reporter = kwargs['progress_reporter']
_progress_reporter.reset(self.dataset.size)
else:
_progress_reporter = None if not check_progress else self._create_progress_reporter(
check_progress, self.dataset.size
)
metric_config = self._configure_intermediate_metrics_results(kwargs)
(compute_intermediate_metric_res, metric_interval, ignore_results_formatting,
ignore_metric_reference) = metric_config
for batch_id, (batch_input_ids, batch_annotation, batch_inputs, batch_identifiers) in enumerate(self.dataset):
batch_inputs = self._preprocessing_for_batch_input(batch_annotation, batch_inputs)
extr_batch_inputs, _ = extract_image_representations(batch_inputs)
batch_predictions, raw_predictions = self.test_model.predict(batch_identifiers, extr_batch_inputs)
annotations, predictions = self.postprocessor.process_batch(batch_annotation, batch_predictions)
if self.metric_executor:
metrics_result, _ = self.metric_executor.update_metrics_on_batch(
batch_input_ids, annotations, predictions
)
check_model_annotations = []
check_model_predictions = []
if self.check_model:
for index_of_metric in range(self.check_model.number_of_metrics):
check_model_annotations.extend(
self.check_model.predict(batch_identifiers, annotations, index_of_metric)
)
check_model_predictions.extend(
self.check_model.predict(batch_identifiers, predictions, index_of_metric)
)
batch_identifiers.extend(batch_identifiers)
check_model_annotations = [
RawTensorAnnotation(batch_identifier, item)
for batch_identifier, item in zip(batch_identifiers, check_model_annotations)]
check_model_predictions = [
RawTensorPrediction(batch_identifier, item)
for batch_identifier, item in zip(batch_identifiers, check_model_predictions)]
if self.metric_executor.need_store_predictions:
self._annotations.extend(check_model_annotations)
self._predictions.extend(check_model_predictions)
if output_callback:
output_callback(
raw_predictions,
metrics_result=metrics_result,
element_identifiers=batch_identifiers,
dataset_indices=batch_input_ids
)
if _progress_reporter:
_progress_reporter.update(batch_id, len(batch_predictions))
if compute_intermediate_metric_res and _progress_reporter.current % metric_interval == 0:
self.compute_metrics(
print_results=True, ignore_results_formatting=ignore_results_formatting,
ignore_metric_reference=ignore_metric_reference
)
self.write_results_to_csv(kwargs.get('csv_result'), ignore_results_formatting, metric_interval)
if _progress_reporter:
_progress_reporter.finish()
return self._annotations, self._predictions
def compute_metrics(self, print_results=True, ignore_results_formatting=False, ignore_metric_reference=False):
if self._metrics_results:
del self._metrics_results
self._metrics_results = []
for result_presenter, evaluated_metric in self.metric_executor.iterate_metrics(
self._annotations, self._predictions):
self._metrics_results.append(evaluated_metric)
if print_results:
result_presenter.write_result(evaluated_metric, ignore_results_formatting, ignore_metric_reference)
return self._metrics_results
def extract_metrics_results(self, print_results=True, ignore_results_formatting=False,
ignore_metric_reference=False):
if not self._metrics_results:
self.compute_metrics(False, ignore_results_formatting, ignore_metric_reference)
result_presenters = self.metric_executor.get_metric_presenters()
extracted_results, extracted_meta = [], []
for presenter, metric_result in zip(result_presenters, self._metrics_results):
result, metadata = presenter.extract_result(metric_result)
if isinstance(result, list):
extracted_results.extend(result)
extracted_meta.extend(metadata)
else:
extracted_results.append(result)
extracted_meta.append(metadata)
if print_results:
presenter.write_result(metric_result, ignore_results_formatting, ignore_metric_reference)
return extracted_results, extracted_meta
def print_metrics_results(self, ignore_results_formatting=False, ignore_metric_reference=False):
if not self._metrics_results:
self.compute_metrics(True, ignore_results_formatting, ignore_metric_reference)
return
result_presenters = self.metric_executor.get_metric_presenters()
for presenter, metric_result in zip(result_presenters, self._metrics_results):
presenter.write_result(metric_result, ignore_results_formatting, ignore_metric_reference)
def release(self):
self.test_model.release()
if self.check_model:
self.check_model.release()
self.launcher.release()
def reset(self):
if self.metric_executor:
self.metric_executor.reset()
if hasattr(self, '_annotations'):
del self._annotations
del self._predictions
del self._input_ids
del self._metrics_results
self._annotations = []
self._predictions = []
self._input_ids = []
self._metrics_results = []
if self.dataset:
self.dataset.reset(self.postprocessor.has_processors)
def load_model(self, network_list):
for network_dict in network_list:
self._part_by_name[network_dict['name']].load_model(network_dict, self.launcher)
def load_network(self, network_list):
for network_dict in network_list:
self._part_by_name[network_dict['name']].load_network(network_dict['model'], self.launcher)
def get_network(self):
return [{'name': key, 'model': model.network} for key, model in self._part_by_name.items()]
def load_network_from_ir(self, models_list):
model_paths = next(iter(models_list))
next(iter(self._part_by_name.values())).load_model(model_paths, self.launcher)
def get_metrics_attributes(self):
if not self.metric_executor:
return {}
return self.metric_executor.get_metrics_attributes()
def register_metric(self, metric_config):
if isinstance(metric_config, str):
self.metric_executor.register_metric({'type': metric_config})
elif isinstance(metric_config, dict):
self.metric_executor.register_metric(metric_config)
else:
raise ValueError('Unsupported metric configuration type {}'.format(type(metric_config)))
def register_postprocessor(self, postprocessing_config):
pass
def register_dumped_annotations(self):
pass
def select_dataset(self, dataset_tag):
if self.dataset is not None and isinstance(self.dataset_config, list):
return
dataset_attributes = create_dataset_attributes(self.dataset_config, dataset_tag)
self.dataset, self.metric_executor, self.preprocessor, self.postprocessor = dataset_attributes
def set_profiling_dir(self, profiler_dir):
self.metric_executor.set_profiling_dir(profiler_dir)
def _create_subset(self, subset=None, num_images=None, allow_pairwise=False):
if self.dataset.batch is None:
self.dataset.batch = 1
if subset is not None:
self.dataset.make_subset(ids=subset, accept_pairs=allow_pairwise)
elif num_images is not None:
self.dataset.make_subset(end=num_images, accept_pairs=allow_pairwise)
@staticmethod
def _create_progress_reporter(check_progress, dataset_size):
pr_kwargs = {}
if isinstance(check_progress, int) and not isinstance(check_progress, bool):
pr_kwargs = {"print_interval": check_progress}
return ProgressReporter.provide('print', dataset_size, **pr_kwargs)
@staticmethod
def _configure_intermediate_metrics_results(config):
compute_intermediate_metric_res = config.get('intermediate_metrics_results', False)
metric_interval, ignore_results_formatting, ignore_metric_reference = None, None, None
if compute_intermediate_metric_res:
metric_interval = config.get('metrics_interval', 1000)
ignore_results_formatting = config.get('ignore_results_formatting', False)
ignore_metric_reference = config.get('ignore_metric_reference', False)
return compute_intermediate_metric_res, metric_interval, ignore_results_formatting, ignore_metric_reference
@property
def dataset_size(self):
return self.dataset.size
def send_processing_info(self, sender):
if not sender:
return {}
model_type = None
details = {}
metrics = self.dataset_config[0].get('metrics', [])
metric_info = [metric['type'] for metric in metrics]
adapter_type = self.test_model.adapter.__provider__
details.update({
'metrics': metric_info,
'model_file_type': model_type,
'adapter': adapter_type,
})
if self.dataset is None:
self.select_dataset('')
details.update(self.dataset.send_annotation_info(self.dataset_config[0]))
return details
class BaseModel:
def __init__(self, network_info, launcher, delayed_model_loading=False):
self.input_blob = None
self.output_blob = None
self.with_prefix = False
if not delayed_model_loading:
self.load_model(network_info, launcher, log=True)
@staticmethod
def auto_model_search(network_info, net_type=""):
model = Path(network_info['model'])
is_blob = network_info.get('_model_is_blob')
if model.is_dir():
if is_blob:
model_list = list(model.glob('*.blob'))
else:
model_list = list(model.glob('*.xml'))
if not model_list and is_blob is None:
model_list = list(model.glob('*.blob'))
if not model_list:
raise ConfigError('Suitable model not found')
if len(model_list) > 1:
raise ConfigError('Several suitable models found')
model = model_list[0]
accepted_suffixes = ['.blob', '.xml']
if model.suffix not in accepted_suffixes:
raise ConfigError('Models with following suffixes are allowed: {}'.format(accepted_suffixes))
print_info('{} - Found model: {}'.format(net_type, model))
if model.suffix == '.blob':
return model, None
weights = get_path(network_info.get('weights', model.parent / model.name.replace('xml', 'bin')))
accepted_weights_suffixes = ['.bin']
if weights.suffix not in accepted_weights_suffixes:
raise ConfigError('Weights with following suffixes are allowed: {}'.format(accepted_weights_suffixes))
print_info('{} - Found weights: {}'.format(net_type, weights))
return model, weights
@property
def inputs(self):
if self.network:
return self.network.input_info if hasattr(self.network, 'input_info') else self.network.inputs
return self.exec_network.input_info if hasattr(self.exec_network, 'input_info') else self.exec_network.inputs
def predict(self, identifiers, input_data):
raise NotImplementedError
def release(self):
del self.network
del self.exec_network
def load_model(self, network_info, launcher, log=False):
model, weights = self.auto_model_search(network_info, self.net_type)
if weights:
self.network = launcher.read_network(model, weights)
self.exec_network = launcher.ie_core.load_network(self.network, launcher.device)
else:
self.network = None
self.exec_network = launcher.ie_core.import_network(str(model))
self.set_input_and_output()
if log:
self.print_input_output_info()
def load_network(self, network, launcher):
self.network = network
self.exec_network = launcher.ie_core.load_network(self.network, launcher.device)
self.set_input_and_output()
def set_input_and_output(self):
pass
def print_input_output_info(self):
print_info('{} - Input info:'.format(self.net_type))
has_info = hasattr(self.network if self.network is not None else self.exec_network, 'input_info')
if self.network:
if has_info:
network_inputs = OrderedDict(
[(name, data.input_data) for name, data in self.network.input_info.items()]
)
else:
network_inputs = self.network.inputs
network_outputs = self.network.outputs
else:
if has_info:
network_inputs = OrderedDict([
(name, data.input_data) for name, data in self.exec_network.input_info.items()
])
else:
network_inputs = self.exec_network.inputs
network_outputs = self.exec_network.outputs
for name, input_info in network_inputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(input_info.precision))
print_info('\tshape {}\n'.format(input_info.shape))
print_info('{} - Output info'.format(self.net_type))
for name, output_info in network_outputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(output_info.precision))
print_info('\tshape: {}\n'.format(output_info.shape))
class CocosnetModel(BaseModel):
def __init__(self, network_info, launcher, delayed_model_loading=False):
self.net_type = "cocosnet_network"
self.adapter = create_adapter(network_info.get('adapter'))
super().__init__(network_info, launcher, delayed_model_loading)
self.adapter.output_blob = self.output_blob
def set_input_and_output(self):
has_info = hasattr(self.exec_network, 'input_info')
if has_info:
inputs_data = OrderedDict([(name, data.input_data) for name, data in self.exec_network.input_info.items()])
else:
inputs_data = self.exec_network.inputs
self.inputs_names = list(inputs_data.keys())
if self.output_blob is None:
self.output_blob = next(iter(self.exec_network.outputs))
if self.adapter.output_blob is None:
self.adapter.output_blob = self.output_blob
def fit_to_input(self, input_data):
inputs = {}
for value, key in zip(input_data, self.inputs_names):
value = np.expand_dims(value, 0)
value = np.transpose(value, (0, 3, 1, 2))
inputs[key] = value.astype(PRECISION_TO_DTYPE[self.inputs[key].precision])
return inputs
def predict(self, identifiers, inputs):
results = []
for current_input in inputs:
prediction = self.exec_network.infer(self.fit_to_input(current_input))
results.append(*self.adapter.process(prediction, identifiers, [{}]))
return results, prediction
class GanCheckModel(BaseModel):
def __init__(self, network_info, launcher, delayed_model_loading=False):
self.net_type = "verification_network"
self.additional_layers = network_info.get('additional_layers')
super().__init__(network_info, launcher, delayed_model_loading)
def load_model(self, network_info, launcher, log=False):
model, weights = self.auto_model_search(network_info, self.net_type)
if weights:
self.network = launcher.read_network(model, weights)
for layer in self.additional_layers:
self.network.add_outputs(layer)
self.exec_network = launcher.ie_core.load_network(self.network, launcher.device)
else:
self.network = None
self.exec_network = launcher.ie_core.import_network(str(model))
self.set_input_and_output()
if log:
self.print_input_output_info()
def set_input_and_output(self):
has_info = hasattr(self.exec_network, 'input_info')
input_info = self.exec_network.input_info if has_info else self.exec_network.inputs
self.input_blob = next(iter(input_info))
self.input_shape = tuple(input_info[self.input_blob].input_data.shape)
self.output_blob = list(self.exec_network.outputs.keys())
self.number_of_metrics = len(self.output_blob)
def fit_to_input(self, input_data):
input_data = cv2.cvtColor(input_data, cv2.COLOR_RGB2BGR)
input_data = cv2.resize(input_data, dsize=self.input_shape[2:])
input_data = | np.expand_dims(input_data, 0) | numpy.expand_dims |
import numpy as np
import pandas as pd
import scipy.optimize
# Import own modules
import lbfcs.varfuncs as varfuncs
import lbfcs.multitau as multitau
#%%
def trace_ac(df,NoFrames,field = 'photons',compute_ac=True):
'''
Get fluorescence trace for single pick and normalized multitau autocorrelation function (AC) employing multitau.autocorrelate().
Args:
df (pandas.DataFrame): Single group picked localizations. See picasso.render and picasso_addon.autopick.
NoFrames (int): No. of frames in measurement, i.e. duration in frames.
Returns:
list:
- [0] (numpy.array): Fluorescence trace of ``len=NoFrames``
- [1] (numpy.array): First column corresponds to lagtimes, second to autocorrelation value.
'''
############################# Prepare trace
df[field] = df[field].abs() # Sometimes nagative values??
df_sum = df[['frame',field]].groupby('frame').sum() # Sum multiple localizations in single frame
trace = np.zeros(NoFrames)
trace[df_sum.index.values] = df_sum[field].values # Add (summed) photons to trace for each frame
############################# Autocorrelate trace
if compute_ac:
ac = multitau.autocorrelate(trace,
m=32,
deltat=1,
normalize=True,
copy=False,
dtype=np.float64(),
)
else:
ac = 0
return [trace,ac]
#%%
def fit_ac_lin(ac,max_it=10):
'''
Linearized iterative version of AC fit.
'''
###################################################### Define start parameters
popt=np.empty([2]) # Init
popt[0]=ac[1,1]-1 # Amplitude
l_max=8 # Maximum lagtime
try: l_max_nonan=np.where(np.isnan(-np.log(ac[1:,1]-1)))[0][0] # First lagtime with NaN occurence
except: l_max_nonan=len(ac)-1
l_max=min(l_max,l_max_nonan) # Finite value check
popt[1]=(-np.log(ac[l_max,1]-1)+np.log(ac[1,1]-1)) # Correlation time tau corresponds to inverse of slope
popt[1]/=(l_max-1)
popt[1]=1/popt[1]
###################################################### Fit boundaries
lowbounds= | np.array([0,0]) | numpy.array |
import numpy as np
from pathlib import Path
from typing import List
from loguru import logger
from dataclasses import dataclass
from bg_atlasapi.bg_atlas import BrainGlobeAtlas
from myterial.utils import rgb2hex
@dataclass
class ActiveElectrode:
idx: int
probe_position: int # distance in um along the Y axis of the probe
shank: int = 0 # shank number
x_position: int = 0 # distance in um along the X axis , between shanks
def prepare_electrodes_positions(
configuration: str, n_sites: int = 384
) -> List[ActiveElectrode]:
"""
Defines the position along the probe (in coordinates from the first electrode)
of each active electrode
"""
if configuration == "b0":
Y = 20 * np.repeat(np.arange(0, int(n_sites / 2)), 2)
ids = np.arange(1, n_sites + 1)
elif configuration == "longcolumn":
Y = 20 * np.arange(n_sites)
# odd numbers for bank 0 and even for bank 1
_ids = np.arange(n_sites + 1)
ids = np.hstack([_ids[1::2], _ids[2::2]])
elif configuration in ["r32", "r48", "r64", "r72", "r96", "r128"]:
row = int(configuration[1:])
# get coordinates on the 4 shanks of a np24 with a horizontal row starting at row channel.
one = np.ones(96)
shank_id = np.hstack([one * 0, one * 1, one * 2, one * 3])
geometry = np.zeros((384, 2))
geometry[:, 0] = shank_id * 250 # x coordinates
geometry[1::2, 0] = geometry[::2, 0] + 32
v_half = np.arange(0, 96 / 2)
geometry[::2, 1] = (
np.hstack([v_half, v_half, v_half, v_half]) * 15 + row * 15
) # y coordinates
geometry[1::2, 1] = geometry[::2, 1]
ids = | np.arange(1, n_sites + 1) | numpy.arange |
import numpy as np
import pytest
import scipy.sparse as sp
from lightfm import LightFM
def test_empty_matrix():
no_users, no_items = (10, 100)
train = sp.coo_matrix((no_users,
no_items),
dtype=np.int32)
model = LightFM()
model.fit_partial(train)
def test_matrix_types():
mattypes = (sp.coo_matrix,
sp.lil_matrix,
sp.csr_matrix,
sp.csc_matrix)
dtypes = (np.int32,
np.int64,
np.float32,
np.float64)
no_users, no_items = (10, 100)
no_features = 20
for mattype in mattypes:
for dtype in dtypes:
train = mattype((no_users,
no_items),
dtype=dtype)
user_features = mattype((no_users,
no_features),
dtype=dtype)
item_features = mattype((no_items,
no_features),
dtype=dtype)
model = LightFM()
model.fit_partial(train,
user_features=user_features,
item_features=item_features)
model.predict(np.random.randint(0, no_users, 10).astype(np.int32),
np.random.randint(0, no_items, 10).astype(np.int32),
user_features=user_features,
item_features=item_features)
def test_predict():
no_users, no_items = (10, 100)
train = sp.coo_matrix((no_users,
no_items),
dtype=np.int32)
model = LightFM()
model.fit_partial(train)
for uid in range(no_users):
scores_arr = model.predict(np.repeat(uid, no_items),
np.arange(no_items))
scores_int = model.predict(uid,
np.arange(no_items))
assert | np.allclose(scores_arr, scores_int) | numpy.allclose |
import sys
import pytest
import logging
logger = logging.getLogger(__name__)
@pytest.mark.skipif("sys.version_info < (2, 5)")
def test_memoize_method_clear():
from pytools import memoize_method
class SomeClass:
def __init__(self):
self.run_count = 0
@memoize_method
def f(self):
self.run_count += 1
return 17
sc = SomeClass()
sc.f()
sc.f()
assert sc.run_count == 1
sc.f.clear_cache(sc) # pylint: disable=no-member
def test_memoize_method_with_uncached():
from pytools import memoize_method_with_uncached
class SomeClass:
def __init__(self):
self.run_count = 0
@memoize_method_with_uncached(uncached_args=[1], uncached_kwargs=["z"])
def f(self, x, y, z):
del x, y, z
self.run_count += 1
return 17
sc = SomeClass()
sc.f(17, 18, z=19)
sc.f(17, 19, z=20)
assert sc.run_count == 1
sc.f(18, 19, z=20)
assert sc.run_count == 2
sc.f.clear_cache(sc) # pylint: disable=no-member
def test_memoize_method_nested():
from pytools import memoize_method_nested
class SomeClass:
def __init__(self):
self.run_count = 0
def f(self):
@memoize_method_nested
def inner(x):
self.run_count += 1
return 2*x
inner(5)
inner(5)
sc = SomeClass()
sc.f()
assert sc.run_count == 1
def test_p_convergence_verifier():
pytest.importorskip("numpy")
from pytools.convergence import PConvergenceVerifier
pconv_verifier = PConvergenceVerifier()
for order in [2, 3, 4, 5]:
pconv_verifier.add_data_point(order, 0.1**order)
pconv_verifier()
pconv_verifier = PConvergenceVerifier()
for order in [2, 3, 4, 5]:
pconv_verifier.add_data_point(order, 0.5**order)
pconv_verifier()
pconv_verifier = PConvergenceVerifier()
for order in [2, 3, 4, 5]:
pconv_verifier.add_data_point(order, 2)
with pytest.raises(AssertionError):
pconv_verifier()
def test_memoize():
from pytools import memoize
count = [0]
@memoize(use_kwargs=True)
def f(i, j=1):
count[0] += 1
return i + j
assert f(1) == 2
assert f(1, 2) == 3
assert f(2, j=3) == 5
assert count[0] == 3
assert f(1) == 2
assert f(1, 2) == 3
assert f(2, j=3) == 5
assert count[0] == 3
def test_memoize_keyfunc():
from pytools import memoize
count = [0]
@memoize(key=lambda i, j=(1,): (i, len(j)))
def f(i, j=(1,)):
count[0] += 1
return i + len(j)
assert f(1) == 2
assert f(1, [2]) == 2
assert f(2, j=[2, 3]) == 4
assert count[0] == 2
assert f(1) == 2
assert f(1, (2,)) == 2
assert f(2, j=(2, 3)) == 4
assert count[0] == 2
@pytest.mark.parametrize("dims", [2, 3])
def test_spatial_btree(dims, do_plot=False):
pytest.importorskip("numpy")
import numpy as np
nparticles = 2000
x = -1 + 2* | np.random.rand(dims, nparticles) | numpy.random.rand |
import os
import pickle
import sys
import multiprocessing as mp
import mdtraj as md
import numpy as np
from . import exmax, nnutils, utils, data_processing
import copy
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils import data as torch_data
class Dataset(torch_data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, train_inds, labels, data):
'Initialization'
self.labels = labels
self.train_inds = train_inds
self.data = data
def __len__(self):
'Denotes the total number of samples'
return len(self.train_inds)
def __getitem__(self, index):
'Generates one sample of data'
#If data needs to be loaded
ID = self.train_inds[index]
if type(self.data) is str:
# Load data and get label
X = torch.load(self.data + "/ID-%s" % ID + '.pt')
else:
X = torch.from_numpy(self.data[ID]).type(torch.FloatTensor)
y = self.labels[ID]
return X, y, ID
class Trainer:
def __init__(self,job):
"""Object to train your DiffNet.
Parameters:
-----------
job : dict
Dictionary with all training parameters. See training_dict.txt
for all keys. All keys are required. See train_submit.py for an
example.
"""
self.job = job
def set_training_data(self, job, train_inds, test_inds, labels, data):
"""Construct generators out of the dataset for training, validation,
and expectation maximization.
Parameters
----------
job : dict
See training_dict.tx for all keys.
train_inds : np.ndarray
Indices in data that are to be trained on
test_inds : np.ndarray
Indices in data that are to be validated on
labels : np.ndarray,
classification labels used for training
data : np.ndarray, shape=(n_frames,3*n_atoms) OR str to path
All data
"""
batch_size = job['batch_size']
cpu_cores = job['em_n_cores']
test_batch_size = job['test_batch_size']
em_batch_size = job['em_batch_size']
subsample = job['subsample']
data_dir = job["data_dir"]
n_train_inds = len(train_inds)
random_inds = np.random.choice(np.arange(n_train_inds),int(n_train_inds/subsample),replace=False)
sampler=torch_data.SubsetRandomSampler(random_inds)
params_t = {'batch_size': batch_size,
'shuffle':False,
'num_workers': cpu_cores,
'sampler': sampler}
params_v = {'batch_size': test_batch_size,
'shuffle':True,
'num_workers': cpu_cores}
params_e = {'batch_size': em_batch_size,
'shuffle':True,
'num_workers': cpu_cores}
n_snapshots = len(train_inds) + len(test_inds)
training_set = Dataset(train_inds, labels, data)
training_generator = torch_data.DataLoader(training_set, **params_t)
validation_set = Dataset(test_inds, labels, data)
validation_generator = torch_data.DataLoader(validation_set, **params_v)
em_set = Dataset(train_inds, labels, data)
em_generator = torch_data.DataLoader(em_set, **params_e)
return training_generator, validation_generator, em_generator
def em_parallel(self, net, em_generator, train_inds, em_batch_size,
indicators, em_bounds, em_n_cores, label_str, epoch):
"""Use expectation maximization to update all training classification
labels.
Parameters
----------
net : nnutils neural network object
Neural network
em_generator : Dataset object
Training data
train_inds : np.ndarray
Indices in data that are to be trained on
em_batch_size : int
Number of examples that are have their classification labels
updated in a single round of expectation maximization.
indicators : np.ndarray, shape=(len(data),)
Value to indicate which variant each data frame came from.
em_bounds : np.ndarray, shape=(n_variants,2)
A range that sets what fraction of conformations you
expect a variant to have biochemical property. Rank order
of variants is more important than the ranges themselves.
em_n_cores : int
CPU cores to use for expectation maximization calculation
Returns
-------
new_labels : np.ndarray, shape=(len(data),)
Updated classification labels for all training examples
"""
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
n_em = np.ceil(train_inds.shape[0]*1.0/em_batch_size)
freq_output = np.floor(n_em/10.0)
train_inds = []
inputs = []
i = 0
##To save DiffNet labels before each EM update
pred_labels = -1 * np.ones(indicators.shape[0])
for local_batch, local_labels, t_inds in em_generator:
t_inds = np.array(t_inds)
local_batch, local_labels = local_batch.to(device), local_labels.to(device)
if hasattr(net, "decode"):
if hasattr(net, "reparameterize"):
x_pred, latent, logvar, class_pred = net(local_batch)
else:
x_pred, latent, class_pred = net(local_batch)
else:
class_pred = net(local_batch)
cur_labels = class_pred.cpu().detach().numpy()
pred_labels[t_inds] = cur_labels.flatten()
inputs.append([cur_labels, indicators[t_inds], em_bounds])
if i % freq_output == 0:
print(" %d/%d" % (i, n_em))
i += 1
train_inds.append(t_inds)
pred_label_fn = os.path.join(self.job['outdir'],"tmp_labels_%s_%s.npy" % (label_str,epoch))
np.save(pred_label_fn,pred_labels)
pool = mp.Pool(processes=em_n_cores)
res = pool.map(self.apply_exmax, inputs)
pool.close()
train_inds = np.concatenate(np.array(train_inds))
new_labels = -1 * np.ones((indicators.shape[0], 1))
new_labels[train_inds] = np.concatenate(res)
return new_labels
def apply_exmax(self, inputs):
"""Apply expectation maximization to a batch of data.
Parameters
----------
inputs : list
list where the 0th index is a list of current classification
labels of length == batch_size. 1st index is a corresponding
list of variant simulation indicators. 2nd index is em_bounds.
Returns
-------
Updated labels -- length == batch size
"""
cur_labels, indicators, em_bounds = inputs
n_vars = em_bounds.shape[0]
for i in range(n_vars):
inds = np.where(indicators == i)[0]
lower = np.int(np.floor(em_bounds[i, 0] * inds.shape[0]))
upper = np.int(np.ceil(em_bounds[i, 1] * inds.shape[0]))
cur_labels[inds] = exmax.expectation_range_CUBIC(cur_labels[inds], lower, upper).reshape(cur_labels[inds].shape)
bad_inds = np.where(np.isnan(cur_labels))
cur_labels[bad_inds] = 0
try:
assert((cur_labels >= 0.).all() and (cur_labels <= 1.).all())
except AssertionError:
neg_inds = np.where(cur_labels<0)[0]
pos_inds = np.where(cur_labels>1)[0]
bad_inds = neg_inds.tolist() + pos_inds.tolist()
for iis in bad_inds:
print(" ", indicators[iis], cur_labels[iis])
print(" #bad neg, pos", len(neg_inds), len(pos_inds))
#np.save("tmp.npy", tmp_labels)
cur_labels[neg_inds] = 0.0
cur_labels[pos_inds] = 1.0
#sys.exit(1)
return cur_labels.reshape((cur_labels.shape[0], 1))
def train(self, data, training_generator, validation_generator, em_generator,
targets, indicators, train_inds, test_inds,net, label_str,
job, lr_fact=1.0):
"""Core method for training
Parameters
----------
data : np.ndarray, shape=(n_frames,3*n_atoms) OR str to path
Training data
training_generator: Dataset object
Generator to sample training data
validation_generator: Dataset object
Generator to sample validation data
em_generator: Dataset object
Generator to sample training data in batches for expectation
maximization
targets : np.ndarray, shape=(len(data),)
classification labels used for training
indicators : np.ndarray, shape=(len(data),)
Value to indicate which variant each data frame came from.
train_inds : np.ndarray
Indices in data that are to be trained on
test_inds : np.ndarray
Indices in data that are to be validated on
net : nnutils neural network object
Neural network
label_str: int
For file naming. Indicates what iteration of training we're
on. Training goes through several iterations where neural net
architecture is progressively built deeper.
job : dict
See training_dict.tx for all keys.
lr_fact : float
Factor to multiply the learning rate by.
Returns
-------
best_nn : nnutils neural network object
Neural network that has the lowest reconstruction error
on the validation set.
targets : np.ndarry, shape=(len(data),)
Classification labels after training.
"""
job = self.job
do_em = job['do_em']
n_epochs = job['n_epochs']
lr = job['lr'] * lr_fact
subsample = job['subsample']
batch_size = job['batch_size']
batch_output_freq = job['batch_output_freq']
epoch_output_freq = job['epoch_output_freq']
test_batch_size = job['test_batch_size']
em_bounds = job['em_bounds']
nntype = job['nntype']
em_batch_size = job['em_batch_size']
em_n_cores = job['em_n_cores']
outdir = job['outdir']
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
n_test = test_inds.shape[0]
lam_cls = 1.0
lam_corr = 1.0
n_batch = np.ceil(train_inds.shape[0]*1.0/subsample/batch_size)
optimizer = optim.Adam(net.parameters(), lr=lr)
bce = nn.BCELoss()
training_loss_full = []
test_loss_full = []
epoch_test_loss = []
best_loss = np.inf
best_nn = None
for epoch in range(n_epochs):
# go through mini batches
running_loss = 0
i = 0
for local_batch, local_labels, _ in training_generator:
if use_cuda:
local_labels = local_labels.type(torch.cuda.FloatTensor)
else:
local_labels = local_labels.type(torch.FloatTensor)
local_batch, local_labels = local_batch.to(device), local_labels.to(device)
optimizer.zero_grad()
x_pred, latent, class_pred = net(local_batch)
loss = nnutils.my_mse(local_batch, x_pred)
loss += nnutils.my_l1(local_batch, x_pred)
if class_pred is not None:
loss += bce(class_pred, local_labels).mul_(lam_cls)
#Minimize correlation between latent variables
n_feat = net.sizes[-1]
my_c00 = torch.einsum('bi,bo->io', (latent, latent)).mul(1.0/local_batch.shape[0])
my_mean = torch.mean(latent, 0)
my_mean = torch.einsum('i,o->io', (my_mean, my_mean))
ide = np.identity(n_feat)
if use_cuda:
ide = torch.from_numpy(ide).type(torch.cuda.FloatTensor)
else:
ide = torch.from_numpy(ide).type(torch.FloatTensor)
#ide = Variable(ide)
#ide = torch.from_numpy(np.identity(n_feat))
#ide = ide.to(device)
zero_inds = np.where(1-ide.cpu().numpy()>0)
corr_penalty = nnutils.my_mse(ide[zero_inds], my_c00[zero_inds]-my_mean[zero_inds])
loss += corr_penalty
loss.backward()
optimizer.step()
running_loss += loss.item()
if i%batch_output_freq == 0:
train_loss = running_loss
if i != 0:
train_loss /= batch_output_freq
training_loss_full.append(train_loss)
test_loss = 0
for local_batch, local_labels, _ in validation_generator:
local_batch, local_labels = local_batch.to(device), local_labels.to(device)
x_pred, latent, class_pred = net(local_batch)
loss = nnutils.my_mse(local_batch,x_pred)
test_loss += loss.item() * local_batch.shape[0] # mult for averaging across samples, as in train_loss
#print(" ", test_loss)
test_loss /= n_test # division averages across samples, as in train_loss
test_loss_full.append(test_loss)
print(" [%s %d, %5d/%d] train loss: %0.6f test loss: %0.6f" % (label_str, epoch, i, n_batch, train_loss, test_loss))
running_loss = 0
if test_loss < best_loss:
best_loss = test_loss
best_nn = copy.deepcopy(net)
i += 1
if do_em and hasattr(nntype, "classify"):
print(" Doing EM")
targets = self.em_parallel(net, em_generator, train_inds,
em_batch_size, indicators, em_bounds,
em_n_cores, label_str, epoch)
training_generator, validation_generator, em_generator = \
self.set_training_data(job, train_inds, test_inds, targets, data)
if epoch % epoch_output_freq == 0:
print("my_l1", nnutils.my_l1(local_batch, x_pred))
print("corr penalty",corr_penalty)
print("classify", bce(class_pred, local_labels).mul_(lam_cls))
print("my_mse", nnutils.my_mse(local_batch, x_pred))
epoch_test_loss.append(test_loss)
out_fn = os.path.join(outdir, "epoch_test_loss_%s.npy" % label_str)
np.save(out_fn, epoch_test_loss)
out_fn = os.path.join(outdir, "training_loss_%s.npy" % label_str)
np.save(out_fn, training_loss_full)
out_fn = os.path.join(outdir, "test_loss_%s.npy" % label_str)
np.save(out_fn, test_loss_full)
# nets need be on cpu to load multiple in parallel, e.g. with multiprocessing
net.cpu()
out_fn = os.path.join(outdir, "nn_%s_e%d.pkl" % (label_str, epoch))
pickle.dump(net, open(out_fn, 'wb'))
if use_cuda:
net.cuda()
if hasattr(nntype, "classify"):
out_fn = os.path.join(outdir, "tmp_targets_%s_%s.npy" % (label_str,epoch))
np.save(out_fn, targets)
# save best net every epoch
best_nn.cpu()
out_fn = os.path.join(outdir, "nn_best_%s.pkl" % label_str)
pickle.dump(best_nn, open(out_fn, 'wb'))
if use_cuda:
best_nn.cuda()
return best_nn, targets
def get_targets(self,act_map,indicators,label_spread=None):
"""Convert variant indicators into classification labels.
Parameters
----------
act_map : np.ndarray, shape=(n_variants,)
Initial classification labels to give each variant.
indicators : np.ndarray, shape=(len(data),)
Value to indicate which variant each data frame came from.
Returns
-------
targets : np.ndarry, shape=(len(data),)
Classification labels for training.
"""
targets = np.zeros((len(indicators), 1))
print(targets.shape)
if label_spread == 'gaussian':
targets = np.array([ | np.random.normal(act_map[i],0.1) | numpy.random.normal |
# Copyright (C) 2017-2020 JCT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author : <NAME> (<EMAIL>)
#
#
"""
This file contains utility functions
NOTE: This file must NOT have dependencies on other files in the macro
"""
# python imports
import os
import numpy as np
# ------------------------------------------------------------------------------
def CartGrid(x, y, z=None):
"""Build a cartesian grid data (nodes and connections). Returns a tuple with:
(ndarray nodes coordinate, ndarray cells connectivities)"""
if z is None:
nodes = np.array([[i, j, 0.] for j in y for i in x])
nx = x.size
ny = y.size
i, j = np.mgrid[0:nx, 0:ny]
ij = np.ravel_multi_index(
[list(i.ravel()), list(j.ravel())], (nx+1, ny+1), order='F')
cells = np.array([[i, i+1, i+1+nx+1, i+nx+1]
for i in ij], dtype='uint64')
else:
nodes = np.array([[i, j, k] for k in z for j in y for i in x])
nx = x.size - 1
ny = y.size - 1
nz = z.size - 1
i, j, k = np.mgrid[0:nx, 0:ny, 0:nz]
ijk = np.ravel_multi_index(
[list(i.ravel()), list(j.ravel()), list(
k.ravel())], (nx + 1, ny + 1, nz + 1),
order='F')
cells = np.array([[i, i+1, i+1+(nx+1), i+(nx+1),
i+(nx+1)*(ny+1), i+1+(nx+1) *
(ny+1), i+1+(nx+1)+(nx+1)*(ny+1),
i+(nx+1)+(nx+1)*(ny+1)]
for i in ijk], dtype='uint64')
return (nodes, cells)
# ------------------------------------------------------------------------------
def find_indexes(b):
"""This function is similar to the 'find' a MATLAB function"""
return [i for (i, vals) in enumerate(b) if vals]
# ------------------------------------------------------------------------------
def write_unv(fname, nodes, cells, mat=None):
"""
Write the UNV (Universal) file dataset format
reference in: https://docs.plm.automation.siemens.com/tdoc/nx/12/nx_help#uid:xid1128419:index_advanced:xid1404601:xid1404604
"""
# consts
sep = " -1"
si, coordsys, vertices, elements = 164, 2420, 2411, 2412
# settings
if mat is None:
mat = np.zeros((cells.shape[0],), dtype=np.int64) + 1
# write unv file
# print("-- writing file: {}".format(fname))
with open(fname, "w") as unv:
# unit system (164)
unv.write('{}\n'.format(sep))
unv.write('{:6g}\n'.format(si)) # unv code
unv.write('{:10d}{:20s}{:10d}\n'.format(1, "SI: Meters (newton)", 2))
unv.write('{:25.17E}{:25.17E}{:25.17E}\n{:25.17E}\n'.format(
1, 1, 1, 273.15))
unv.write('{}\n'.format(sep))
# coordinate system (2420)
unv.write('{}\n'.format(sep))
unv.write('{:6g}\n'.format(coordsys)) # unv code
unv.write('{:10d}\n'.format(1)) # coordsys label (uid)
unv.write('{:40s}\n'.format("SMESH_Mesh from Salome Geomechanics"))
# coordsys label, coordsys type (0: cartesian), coordsys color
unv.write('{:10d}{:10d}{:10d}\n'.format(1, 0, 0))
unv.write('{:40s}\n'.format("Global cartesian coord. system"))
unv.write('{:25.16E}{:25.16E}{:25.16E}\n'.format(1, 0, 0))
unv.write('{:25.16E}{:25.16E}{:25.16E}\n'.format(0, 1, 0))
unv.write('{:25.16E}{:25.16E}{:25.16E}\n'.format(0, 0, 1))
unv.write('{:25.16E}{:25.16E}{:25.16E}\n'.format(0, 0, 0))
unv.write('{}\n'.format(sep))
# write nodes coordinates
unv.write('{}\n'.format(sep))
unv.write('{:6g}\n'.format(vertices)) # unv code
for n in range(nodes.shape[0]):
# node-id, coordinate system label, displ. coord. system, color(11)
unv.write('{:10d}{:10d}{:10d}{:10d}\n'.format(n + 1, 1, 1, 11))
unv.write('{:25.16E}{:25.16E}{:25.16E}'.format(
nodes[n, 0], nodes[n, 1], nodes[n, 2]))
unv.write('\n')
unv.write('{}\n'.format(sep))
# write cells connectivities
unv.write('{}\n'.format(sep))
unv.write('{:6g}\n'.format(elements)) # unv code
for c in range(cells.shape[0]):
# node-id, coordinate system label, displ. coord. system, color(11)
unv.write('{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}\n'.format(
c + 1, 115, mat[c], mat[c], mat[c], 8))
unv.write('{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}'.format(
cells[c, 0], cells[c, 1], cells[c, 2], cells[c, 3],
cells[c, 4], cells[c, 5], cells[c, 6], cells[c, 7]))
unv.write('\n')
unv.write('{}\n'.format(sep))
# write cells regions
unv.write('{}\n'.format(sep))
unv.write('{:6g}\n'.format(2467)) # unv code
regions = np.unique(mat)
for region in regions:
ind = find_indexes(mat == region)
unv.write('{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}\n'.format(
region, 0, 0, 0, 0, 0, 0, len(ind)))
unv.write('Region_{}\n'.format(region))
i = 0
for c in range(len(ind)):
unv.write('{:10d}{:10d}{:10d}{:10d}'.format( 8, ind[c] + 1, 0, 0))
i += 1
if i == 2:
i = 0
unv.write('\n')
if i == 1:
unv.write('\n')
unv.write('{}\n'.format(sep))
# ------------------------------------------------------------------------------
def write_mesh(fname, smesh, boundaries=None, mat=None):
"""
Write the mesh file format (mfem). Only works for hexahedron (cube)
TODO: impl. other finite elements
"""
import SMESH
# consts
header = """# automatically generated by hydrogeo_salome plugin
MFEM mesh v1.0
#
# MFEM Geometry Types (see mesh/geom.hpp):
#
# POINT = 0
# SEGMENT = 1
# TRIANGLE = 2
# SQUARE = 3
# TETRAHEDRON = 4
# CUBE = 5
#
"""
# settings
ncells = smesh.NbHexas()
nnodes = smesh.NbNodes()
dim = 3
if mat is None:
mat = | np.ones((ncells,), dtype=np.int64) | numpy.ones |
import os
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from sklearn.grid_search import GridSearchCV
from sklearn.externals import joblib
from pprint import pprint
class RemoveColumns(TransformerMixin):
def __init__(self, cols):
self.cols = cols
def fit(self, X, y=None):
# stateless transformer
return self
def transform(self, x):
x_cols = x.drop(self.cols, axis=1)
return x_cols
class EstimatorSelectionHelper:
def __init__(self, models, params):
if not set(models.keys()).issubset(set(params.keys())):
missing_params = list(set(models.keys()) - set(params.keys()))
raise ValueError("Some estimators are missing parameters: %s" % missing_params)
self.models = models
self.params = params
self.keys = models.keys()
self.grid_searches = {}
def fit(self, X, y, cv=3, n_jobs=1, verbose=1, scoring=None, refit=False):
for key in self.keys:
print("\n%s:" % key)
model = self.models[key]
params = self.params[key]
gs = GridSearchCV(model, params, cv=cv, n_jobs=n_jobs,
verbose=verbose, scoring=scoring, refit=refit)
gs.fit(X, y)
current_dir = os.path.dirname(os.path.realpath(__file__))
joblib.dump(gs, current_dir + '/../trained_models/' + str(key) + '.pkl', compress=1)
self.grid_searches[key] = gs
def score_summary(self, sort_by='mean_score'):
def row(key, scores, params):
d = {
'estimator': key,
'min_score': min(scores),
'max_score': max(scores),
'mean_score': np.mean(scores),
'std_score': | np.std(scores) | numpy.std |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&294': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&295': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&296': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&300': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&301': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&305': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&306': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&307': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&308': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&309': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&310': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&311': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&2&0': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&1': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&2': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&3': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&4': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&5': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&6': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&7': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&8': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&9': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&10': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&11': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&12': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&13': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&14': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&15': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&16': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&17': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&18': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&19': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&20': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&21': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&22': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&23': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&24': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&25': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&26': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&27': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&28': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&29': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&30': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&31': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&32': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&33': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&34': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&35': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&36': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&37': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&38': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&39': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&40': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&41': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&42': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&43': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&44': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&45': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&46': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&50': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&51': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&52': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&53': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&54': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&55': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&56': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&60': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&61': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&65': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&66': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&67': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&68': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&69': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&70': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&71': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&75': np.array([0.0, -0.47562425924289314]),
'setosa&2&76': np.array([0.0, -0.48543689565931186]),
'setosa&2&77': np.array([0.0, -0.7348263896003956]),
'setosa&2&78': np.array([0.0, -0.7920887571493729]),
'setosa&2&79': np.array([0.0, -0.507614207038711]),
'setosa&2&80': np.array([0.0, -0.47562425924289314]),
'setosa&2&81': np.array([0.0, -0.47562425924289314]),
'setosa&2&82': np.array([0.0, -0.47562425924289314]),
'setosa&2&83': np.array([0.0, -0.47562425924289314]),
'setosa&2&84': np.array([0.0, -0.48543689565931186]),
'setosa&2&85': np.array([0.0, -0.48543689565931186]),
'setosa&2&86': np.array([0.0, -0.48543689565931186]),
'setosa&2&87': np.array([0.0, -0.7348263896003956]),
'setosa&2&88': np.array([0.0, -0.7348263896003956]),
'setosa&2&89': np.array([0.0, -0.7920887571493729]),
'setosa&2&90': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&91': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&92': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&93': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&94': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&95': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&96': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&97': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&98': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&99': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&100': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&101': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&102': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&103': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&104': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&105': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&106': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&107': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&108': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&109': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&110': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&111': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&112': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&113': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&114': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&115': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&116': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&117': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&118': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&119': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&120': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&121': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&122': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&123': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&124': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&125': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&126': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&127': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&128': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&129': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&130': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&131': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&132': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&133': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&134': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&135': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&136': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&137': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&138': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&139': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&140': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&141': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&142': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&143': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&144': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&145': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&146': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&147': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&148': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&149': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&150': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&151': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&152': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&153': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&154': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&155': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&156': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&157': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&158': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&159': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&160': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&161': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&162': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&163': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&164': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&165': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&166': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&167': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&168': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&169': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&170': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&171': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&172': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&173': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&174': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&175': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&176': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&177': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&178': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&179': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&180': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&181': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&182': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&183': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&184': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&185': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&186': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&187': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&188': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&189': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&190': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&191': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&192': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&193': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&194': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&195': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&196': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&197': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&198': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&199': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&200': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&201': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&202': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&203': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&204': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&205': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&206': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&207': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&208': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&209': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&210': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&211': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&212': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&213': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&214': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&215': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&216': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&217': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&218': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&219': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&220': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&221': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&222': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&223': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&224': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&225': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&226': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&227': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&228': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&229': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&230': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&231': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&232': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&233': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&234': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&235': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&236': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&237': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&238': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&239': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&240': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&241': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&242': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&243': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&244': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&245': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&246': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&247': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&248': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&249': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&250': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&251': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&252': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&253': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&254': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&255': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&256': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&257': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&258': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&259': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&260': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&261': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&262': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&263': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&264': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&265': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&266': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&267': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&268': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&269': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&270': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&271': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&275': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&276': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&277': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&278': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&279': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&280': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&281': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&285': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&286': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&290': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&291': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&292': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&293': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&294': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&295': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&296': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&300': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&301': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&305': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&306': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&307': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&308': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&309': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&310': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&311': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&2': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&3': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&6': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&7': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&9': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&10': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&12': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&13': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&14': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&17': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&18': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&21': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&22': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&24': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&25': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&27': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&28': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&29': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&32': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&33': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&36': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&37': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&39': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&40': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&42': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&43': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&44': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&45': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&46': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&50': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&51': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&52': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&53': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&54': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&55': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&56': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&66': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&67': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&69': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&70': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&75': np.array([0.0, -0.95124502153736]),
'versicolor&0&76': np.array([0.0, -0.9708703761803881]),
'versicolor&0&77': np.array([0.0, 0.5659706098422994]),
'versicolor&0&78': np.array([0.0, 0.3962828716108186]),
'versicolor&0&79': np.array([0.0, 0.2538069363248767]),
'versicolor&0&80': np.array([0.0, -0.9708703761803881]),
'versicolor&0&81': np.array([0.0, -0.3631376646911367]),
'versicolor&0&82': np.array([0.0, -0.5804857652839247]),
'versicolor&0&83': np.array([0.0, -0.8943993997517804]),
'versicolor&0&84': np.array([0.0, -0.4231275527222919]),
'versicolor&0&85': np.array([0.0, -0.6164235822373675]),
'versicolor&0&86': np.array([0.0, -0.9166476163222441]),
'versicolor&0&87': np.array([0.0, 0.5659706098422994]),
'versicolor&0&88': np.array([0.0, 0.5659706098422994]),
'versicolor&0&89': np.array([0.0, 0.3962828716108186]),
'versicolor&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&92': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&93': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&96': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&97': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&99': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&100': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&102': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&103': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&104': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&107': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&108': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&111': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&112': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&114': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&115': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&117': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&118': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&119': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&120': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&121': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&122': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&123': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&124': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&125': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&126': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&127': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&128': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&129': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&130': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&131': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&132': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&133': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&134': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&137': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&138': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&141': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&142': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&144': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&145': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&147': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&148': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&149': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&152': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&153': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&156': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&157': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&159': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&160': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&162': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&163': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&164': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&167': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&168': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&171': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&172': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&174': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&175': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&177': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&178': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&179': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&180': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&181': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&182': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&183': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&184': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&185': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&186': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&187': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&188': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&189': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&190': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&191': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&192': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&193': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&194': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&197': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&198': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&201': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&202': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&204': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&205': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&207': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&208': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&209': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&212': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&213': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&216': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&217': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&219': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&220': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&222': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&223': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&224': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&225': np.array([-0.04777085826693217, -0.931704979630315]),
'versicolor&0&226': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&227': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&228': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&229': np.array([-0.46216647196120714, 0.35468591243823655]),
'versicolor&0&230': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&231': np.array([-0.3707180757031537, -0.1977196581472426]),
'versicolor&0&232': np.array([-0.1043459833293615, -0.5233314327065356]),
'versicolor&0&233': np.array([-0.049289647556763364, -0.8736084405111605]),
'versicolor&0&234': np.array([-0.34078174031874375, -0.25874482325965437]),
'versicolor&0&235': np.array([-0.050841051273783675, -0.5877587283589205]),
'versicolor&0&236': np.array([-0.0161720977425142, -0.9096817855236822]),
'versicolor&0&237': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&238': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&239': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'versicolor&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&242': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&243': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'versicolor&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&246': np.array([-0.6425009695928476, -0.24851992476830956]),
'versicolor&0&247': np.array([-0.5151243662384031, -0.3255567772442641]),
'versicolor&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'versicolor&0&249': np.array([-0.6300442788906601, -0.28361140069713875]),
'versicolor&0&250': np.array([-0.4875864856121089, -0.3614122096616301]),
'versicolor&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'versicolor&0&252': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&253': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&254': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&257': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&258': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&261': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&262': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&264': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&265': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&267': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&268': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&269': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&270': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&271': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&275': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&276': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&277': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&278': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&279': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&280': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&281': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&285': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&286': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&290': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&291': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&292': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&293': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&294': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&295': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&296': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&300': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&301': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&305': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&306': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&307': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&308': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&309': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&310': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&311': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&1&0': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&1': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&2': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&3': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&4': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&5': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&6': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&7': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&8': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&9': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&10': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&11': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&12': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&13': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&14': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&15': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&16': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&17': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&18': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&19': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&20': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&21': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&22': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&23': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&24': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&25': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&26': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&27': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&28': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&29': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&30': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&31': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&32': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&33': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&34': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&35': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&36': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&37': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&38': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&39': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&40': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&41': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&42': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&43': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&44': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&45': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&46': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&50': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&51': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&52': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&53': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&54': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&55': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&56': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&60': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&61': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&65': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&66': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&67': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&68': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&69': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&70': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&71': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&75': np.array([0.0, 0.4756207622944677]),
'versicolor&1&76': np.array([0.0, 0.4854334805210761]),
'versicolor&1&77': np.array([0.0, 0.16885577975809635]),
'versicolor&1&78': np.array([0.0, 0.395805885538554]),
'versicolor&1&79': np.array([0.0, 0.2538072707138344]),
'versicolor&1&80': np.array([0.0, 0.4854334805210761]),
'versicolor&1&81': np.array([0.0, 0.7613919530844643]),
'versicolor&1&82': np.array([0.0, 0.6668230985485095]),
'versicolor&1&83': np.array([0.0, 0.4904755652105692]),
'versicolor&1&84': np.array([0.0, 0.8121046082359693]),
'versicolor&1&85': np.array([0.0, 0.6855766903749089]),
'versicolor&1&86': np.array([0.0, 0.5008471974438506]),
'versicolor&1&87': np.array([0.0, 0.16885577975809635]),
'versicolor&1&88': np.array([0.0, 0.16885577975809635]),
'versicolor&1&89': np.array([0.0, 0.395805885538554]),
'versicolor&1&90': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&91': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&92': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&93': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&94': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&95': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&96': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&97': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&98': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&99': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&100': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&101': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&102': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&103': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&104': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&105': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&106': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&107': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&108': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&109': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&110': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&111': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&112': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&113': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&114': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&115': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&116': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&117': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&118': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&119': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&120': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&121': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&122': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&123': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&124': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&125': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&126': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&127': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&128': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&129': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&130': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&131': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&132': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&133': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&134': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&135': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&136': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&137': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&138': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&139': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&140': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&141': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&142': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&143': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&144': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&145': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&146': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&147': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&148': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&149': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&150': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&151': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&152': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&153': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&154': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&155': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&156': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&157': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&158': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&159': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&160': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&161': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&162': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&163': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&164': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&165': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&166': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&167': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&168': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&169': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&170': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&171': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&172': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&173': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&174': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&175': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&176': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&177': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&178': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&179': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&180': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&181': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&182': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&183': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&184': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&185': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&186': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&187': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&188': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&189': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&190': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&191': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&192': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&193': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&194': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&195': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&196': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&197': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&198': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&199': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&200': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&201': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&202': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&203': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&204': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&205': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&206': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&207': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&208': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&209': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&210': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&211': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&212': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&213': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&214': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&215': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&216': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&217': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&218': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&219': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&220': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&221': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&222': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&223': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&224': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&225': np.array([0.6253337666017573, 0.21983620140147825]),
'versicolor&1&226': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&227': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&228': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&229': np.array([0.7182033715159247, 0.0970420677941148]),
'versicolor&1&230': np.array([0.6178968870349187, 0.22747652768125623]),
'versicolor&1&231': np.array([0.4976586558055923, 0.5393318265947251]),
'versicolor&1&232': np.array([0.4361093214026388, 0.4279491486345008]),
'versicolor&1&233': np.array([0.613985959011319, 0.23148898930908424]),
'versicolor&1&234': np.array([0.46747697713468217, 0.586607956360002]),
'versicolor&1&235': np.array([0.41044950174869577, 0.45415985894965977]),
'versicolor&1&236': np.array([0.6057447478066579, 0.23993389556303918]),
'versicolor&1&237': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&238': np.array([0.7245803616608639, 0.18141483095066183]),
'versicolor&1&239': np.array([0.6762617119303499, 0.19305674697949574]),
'versicolor&1&240': np.array([0.056623968925773045, 0.43360725859686644]),
'versicolor&1&241': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&242': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&243': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&244': np.array([0.3351719071445682, 0.20616862401308342]),
'versicolor&1&245': np.array([0.020169511418752378, 0.47015948158260334]),
'versicolor&1&246': np.array([0.24022705822940116, 0.7185371033867092]),
'versicolor&1&247': np.array([0.010447231513465048, 0.6616528865917504]),
'versicolor&1&248': np.array([0.024556360933646205, 0.4723948285969902]),
'versicolor&1&249': np.array([0.21321406009810842, 0.7648907754638917]),
'versicolor&1&250': np.array([-0.027450681014480036, 0.6999336015080245]),
'versicolor&1&251': np.array([-0.0164329511444131, 0.5132208276383963]),
'versicolor&1&252': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&253': np.array([0.5806365328450954, 0.47262706807712623]),
'versicolor&1&254': np.array([0.4146290154471569, 0.4964318942067898]),
'versicolor&1&255': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&256': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&257': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&258': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&259': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&260': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&261': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&262': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&263': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&264': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&265': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&266': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&267': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&268': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&269': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&270': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&271': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&275': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&276': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&277': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&278': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&279': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&280': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&281': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&285': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&286': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&290': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&291': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&292': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&293': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&294': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&295': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&296': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&300': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&301': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&305': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&306': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&307': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&308': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&309': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&310': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&311': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&2&0': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&1': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&2': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&3': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&4': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&5': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&6': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&7': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&8': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&9': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&10': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&11': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&12': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&13': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&14': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&15': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&16': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&17': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&18': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&19': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&20': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&21': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&22': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&23': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&24': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&25': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&26': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&27': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&28': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&29': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&30': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&31': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&32': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&33': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&34': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&35': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&36': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&37': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&38': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&39': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&40': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&41': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&42': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&43': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&44': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&45': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&46': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&50': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&51': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&52': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&53': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&54': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&55': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&56': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&60': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&61': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&65': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&66': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&67': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&68': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&69': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&70': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&71': np.array([-0.5903420131350324, 0.384224764046184]),
'versicolor&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&75': np.array([0.0, 0.47562425924289314]),
'versicolor&2&76': np.array([0.0, 0.4854368956593117]),
'versicolor&2&77': np.array([0.0, -0.7348263896003956]),
'versicolor&2&78': np.array([0.0, -0.7920887571493729]),
'versicolor&2&79': np.array([0.0, -0.507614207038711]),
'versicolor&2&80': np.array([0.0, 0.4854368956593117]),
'versicolor&2&81': np.array([0.0, -0.3982542883933272]),
'versicolor&2&82': np.array([0.0, -0.08633733326458487]),
'versicolor&2&83': np.array([0.0, 0.4039238345412103]),
'versicolor&2&84': np.array([0.0, -0.38897705551367706]),
'versicolor&2&85': np.array([0.0, -0.06915310813754129]),
'versicolor&2&86': np.array([0.0, 0.41580041887839214]),
'versicolor&2&87': np.array([0.0, -0.7348263896003956]),
'versicolor&2&88': np.array([0.0, -0.7348263896003956]),
'versicolor&2&89': np.array([0.0, -0.7920887571493729]),
'versicolor&2&90': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&91': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&92': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&93': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&94': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&95': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&96': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&97': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&98': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&99': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&100': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&101': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&102': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&103': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&104': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&105': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&106': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&107': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&108': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&109': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&110': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&111': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&112': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&113': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&114': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&115': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&116': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&117': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&118': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&119': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&120': np.array([-0.7638917827493686, 0.868015757634957]),
'versicolor&2&121': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&122': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&123': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&124': np.array([-0.32941440381886555, -0.4173178729969913]),
'versicolor&2&125': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&126': np.array([-0.18291442454393395, -0.2654898014002494]),
'versicolor&2&127': np.array([-0.5797728557269727, 0.3163189837954924]),
'versicolor&2&128': np.array([-0.7579323596667402, 0.8054136823046655]),
'versicolor&2&129': np.array([-0.1948624323669993, -0.23753953755286383]),
'versicolor&2&130': np.array([-0.6437698977881832, 0.3909540110317858]),
'versicolor&2&131': np.array([-0.7963046521980063, 0.846536369471985]),
'versicolor&2&132': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&133': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&134': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&135': np.array([-0.3219660907491514, 0.7482043503408669]),
'versicolor&2&136': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&137': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&138': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&139': np.array([0.2626914501948546, -0.5596191134224637]),
'versicolor&2&140': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&141': np.array([0.4734444929420575, -0.6150974537943872]),
'versicolor&2&142': np.array([0.5369392542176313, -0.430867927332838]),
'versicolor&2&143': np.array([-0.19892251970509112, 0.5718543863753405]),
'versicolor&2&144': np.array([0.5071047612208237, -0.6507546896558788]),
'versicolor&2&145': np.array([0.5629877361048359, -0.4485515113017818]),
'versicolor&2&146': np.array([-0.3047657227470458, 0.6788631774846587]),
'versicolor&2&147': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&148': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&149': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&150': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&151': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&152': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&153': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&154': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&155': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&156': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&157': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&158': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&159': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&160': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&161': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&162': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&163': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&164': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&165': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&166': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&167': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&168': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&169': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&170': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&171': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&172': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&173': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&174': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&175': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&176': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&177': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&178': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&179': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&180': np.array([-0.7638917827493686, 0.868015757634957]),
'versicolor&2&181': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&182': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&183': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&184': np.array([-0.32941440381886555, -0.4173178729969913]),
'versicolor&2&185': np.array([-0.8001553485824509, 0.9049358162753539]),
'versicolor&2&186': np.array([-0.18291442454393395, -0.2654898014002494]),
'versicolor&2&187': np.array([-0.5797728557269727, 0.3163189837954924]),
'versicolor&2&188': np.array([-0.7579323596667402, 0.8054136823046655]),
'versicolor&2&189': np.array([-0.1948624323669993, -0.23753953755286383]),
'versicolor&2&190': np.array([-0.6437698977881832, 0.3909540110317858]),
'versicolor&2&191': np.array([-0.7963046521980063, 0.846536369471985]),
'versicolor&2&192': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&193': np.array([-0.26179245521040034, -0.7067672760776678]),
'versicolor&2&194': np.array([-0.14690789675963867, -0.7352367260447958]),
'versicolor&2&195': np.array([-0.3219660907491514, 0.7482043503408669]),
'versicolor&2&196': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&197': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&198': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&199': np.array([0.2626914501948546, -0.5596191134224637]),
'versicolor&2&200': np.array([-0.43839553940476644, 0.8642446918440131]),
'versicolor&2&201': np.array([0.4734444929420575, -0.6150974537943872]),
'versicolor&2&202': np.array([0.5369392542176313, -0.430867927332838]),
'versicolor&2&203': np.array([-0.19892251970509112, 0.5718543863753405]),
'versicolor&2&204': np.array([0.5071047612208237, -0.6507546896558788]),
'versicolor&2&205': np.array([0.5629877361048359, -0.4485515113017818]),
'versicolor&2&206': np.array([-0.3047657227470458, 0.6788631774846587]),
'versicolor&2&207': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&208': np.array([-0.05474251929945989, -0.7566498134597841]),
'versicolor&2&209': np.array([0.17291299562995102, -0.7651995812779756]),
'versicolor&2&210': np.array([0.37157691321004915, 0.12216227283618836]),
'versicolor&2&211': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&212': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&213': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&214': np.array([0.4741571944522723, -0.3872697414416878]),
'versicolor&2&215': np.array([0.24630541996506908, 0.24630541996506994]),
'versicolor&2&216': np.array([0.68663266357557, -0.6475988779804592]),
'versicolor&2&217': np.array([0.8701760330833639, -0.5914646440996656]),
'versicolor&2&218': np.array([0.6273836195848199, -0.15720981251964872]),
'versicolor&2&219': np.array([0.7292373173099087, -0.6975400952780954]),
'versicolor&2&220': np.array([0.9270035696082471, -0.640582639672401]),
'versicolor&2&221': np.array([0.6863652799597699, -0.21335694415409426]),
'versicolor&2&222': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&223': np.array([0.04449246321056282, -0.709644945972203]),
'versicolor&2&224': np.array([0.2953784217387408, -0.6750352694420283]),
'versicolor&2&225': np.array([-0.5775629083348267, 0.7118687782288384]),
'versicolor&2&226': np.array([-0.6016445709024666, 0.7366089009875252]),
'versicolor&2&227': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&228': np.array([-0.0917622729715107, -0.7645776302158537]),
'versicolor&2&229': np.array([-0.25603689955471853, -0.451727980232351]),
'versicolor&2&230': np.array([-0.6016445709024666, 0.7366089009875252]),
'versicolor&2&231': np.array([-0.1269405801024398, -0.34161216844748166]),
'versicolor&2&232': np.array([-0.33176333807327857, 0.09538228407203546]),
'versicolor&2&233': np.array([-0.564696311454556, 0.6421194512020755]),
'versicolor&2&234': np.array([-0.12669523681593967, -0.32786313310034665]),
'versicolor&2&235': np.array([-0.35960845047491363, 0.1335988694092619]),
'versicolor&2&236': np.array([-0.589572650064144, 0.6697478899606418]),
'versicolor&2&237': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&238': np.array([-0.28356111726513855, -0.739741315226852]),
'versicolor&2&239': np.array([-0.0917622729715107, -0.7645776302158537]),
'versicolor&2&240': np.array([0.05667262840030629, 0.4335746514880877]),
'versicolor&2&241': np.array([0.0202211257171063, 0.470123810164804]),
'versicolor&2&242': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&243': np.array([0.22461127196921116, -0.7375780139111495]),
'versicolor&2&244': np.array([0.3463149754241171, -0.5568366400939154]),
'versicolor&2&245': np.array([0.0202211257171063, 0.470123810164804]),
'versicolor&2&246': np.array([0.4022739113634462, -0.4700171786183992]),
'versicolor&2&247': np.array([0.5046771347249378, -0.33609610934748635]),
'versicolor&2&248': np.array([0.1370187510624256, 0.30303755274337163]),
'versicolor&2&249': np.array([0.41683021879255133, -0.4812793747667524]),
'versicolor&2&250': np.array([0.5150371666265885, -0.33852139184639396]),
'versicolor&2&251': np.array([0.10611499646955676, 0.33589829339460586]),
'versicolor&2&252': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&253': np.array([-0.052990507284891984, -0.7625494034929868]),
'versicolor&2&254': np.array([0.22461127196921116, -0.7375780139111495]),
'versicolor&2&255': np.array([0.5188517506916897, 0.036358567813067386]),
'versicolor&2&256': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&257': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&258': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&259': np.array([0.5354807894355184, -0.3418054346754283]),
'versicolor&2&260': np.array([0.5131939273945454, 0.04199748266790813]),
'versicolor&2&261': np.array([0.5761361484884252, -0.44637460220261904]),
'versicolor&2&262': np.array([0.7268664040181829, -0.40159406680426807]),
'versicolor&2&263': np.array([0.5917672401610737, -0.061499563231173816]),
'versicolor&2&264': np.array([0.5921993039887428, -0.46498571089163954]),
'versicolor&2&265': np.array([0.7470482158282458, -0.4169281153671854]),
'versicolor&2&266': np.array([0.5967658480721675, -0.06546963852548916]),
'versicolor&2&267': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&268': np.array([0.06285591932387405, -0.6914253444924359]),
'versicolor&2&269': np.array([0.34904320225465857, -0.6233384360811872]),
'versicolor&2&270': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&271': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&275': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&276': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&277': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&278': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&279': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&280': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&281': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&285': np.array([-0.8252668830593566, 0.11450866713130668]),
'versicolor&2&286': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'versicolor&2&290': np.array([-0.8211795643076095, 0.11869650771610692]),
'versicolor&2&291': np.array([-0.8470213454017305, -0.0910504504559782]),
'versicolor&2&292': np.array([-0.8783521565540571, 0.01381094589198601]),
'versicolor&2&293': np.array([-0.8388485924434891, 0.09800790238640067]),
'versicolor&2&294': np.array([-0.8495871633670822, -0.08820642363054954]),
'versicolor&2&295': np.array([-0.8784816772224661, 0.017184907022714958]),
'versicolor&2&296': np.array([-0.835455914569297, 0.10189258327760495]),
'versicolor&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'versicolor&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'versicolor&2&300': np.array([-0.5227340800279543, 0.4209267574088147]),
'versicolor&2&301': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'versicolor&2&305': np.array([-0.5140708637198534, 0.4305361238057349]),
'versicolor&2&306': np.array([-0.56940429361245, -0.3442345437882425]),
'versicolor&2&307': np.array([-0.6452502612229726, -0.04686872432129788]),
'versicolor&2&308': np.array([-0.596973015481227, 0.37395461795328944]),
'versicolor&2&309': np.array([-0.5760086048531655, -0.3353570725513232]),
'versicolor&2&310': np.array([-0.6488228567611906, -0.03186184826812757]),
'versicolor&2&311': np.array([-0.5903420131350324, 0.384224764046184]),
'versicolor&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'versicolor&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'virginica&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&2': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&3': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&6': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&7': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&9': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&10': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&12': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&13': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&14': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&17': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&18': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&21': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&22': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&24': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&25': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&27': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&28': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&29': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&32': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&33': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&36': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&37': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&39': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&40': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&42': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&43': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&44': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&45': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&46': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&47': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&48': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&49': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&50': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&51': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&52': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&53': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&54': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&55': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&56': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&57': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&58': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&59': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'virginica&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&62': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&63': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'virginica&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&66': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&67': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'virginica&0&69': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&70': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'virginica&0&72': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&73': np.array([0.11200181312407695, -0.5330612470996793]),
'virginica&0&74': np.array([0.19998284600732558, -0.3489062419702088]),
'virginica&0&75': np.array([0.0, -0.95124502153736]),
'virginica&0&76': np.array([0.0, -0.9708703761803881]),
'virginica&0&77': np.array([0.0, -0.5659706098422994]),
'virginica&0&78': np.array([0.0, -0.3962828716108186]),
'virginica&0&79': np.array([0.0, 0.2538069363248767]),
'virginica&0&80': np.array([0.0, -0.9708703761803881]),
'virginica&0&81': np.array([0.0, -0.5659706098422994]),
'virginica&0&82': np.array([0.0, -0.3962828716108186]),
'virginica&0&83': np.array([0.0, -0.8943993997517804]),
'virginica&0&84': np.array([0.0, -0.5659706098422994]),
'virginica&0&85': np.array([0.0, -0.3962828716108186]),
'virginica&0&86': np.array([0.0, -0.9166476163222441]),
'virginica&0&87': np.array([0.0, -0.3962828716108186]),
'virginica&0&88': np.array([0.0, -0.5466925844560601]),
'virginica&0&89': np.array([0.0, -0.38529908946531777]),
'virginica&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&92': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&93': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&96': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&97': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&99': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&100': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&102': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&103': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&104': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&107': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&108': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&111': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&112': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&114': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&115': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&117': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&118': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&119': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&120': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&121': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&122': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&123': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&124': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&125': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&126': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&127': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&128': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&129': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&130': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&131': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&132': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&133': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&134': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'virginica&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&137': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&138': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'virginica&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&141': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&142': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'virginica&0&144': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&145': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'virginica&0&147': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&148': np.array([-0.6898990333725056, -0.2534947697713122]),
'virginica&0&149': np.array([-0.769491694075929, -0.22884642137519118]),
'virginica&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&152': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&153': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&156': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&157': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&159': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&160': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&162': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&163': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&164': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&167': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&168': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&171': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&172': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&174': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&175': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&177': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&178': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&179': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&180': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&181': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&182': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&183': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&184': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&185': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&186': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&187': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&188': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&189': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&190': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&191': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&192': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&193': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&194': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'virginica&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&197': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&198': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'virginica&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'virginica&0&201': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&202': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'virginica&0&204': np.array([-0.6782037543706109, -0.29560073676989834]),
'virginica&0&205': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'virginica&0&207': np.array([-0.7694171988675237, -0.276633135028249]),
'virginica&0&208': np.array([-0.6898990333725056, -0.2534947697713122]),
'virginica&0&209': np.array([-0.769491694075929, -0.22884642137519118]),
'virginica&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'virginica&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&212': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&213': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'virginica&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'virginica&0&216': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&217': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'virginica&0&219': np.array([-0.9550700362273441, -0.025428672111930138]),
'virginica&0&220': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'virginica&0&222': np.array([-0.9672121512728677, -0.012993005706020504]),
'virginica&0&223': np.array([-0.9569238464170641, -0.02354905845282574]),
'virginica&0&224': np.array([-0.9677320606992984, -0.012432557482778654]),
'virginica&0&225': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&226': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&227': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&228': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&229': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&230': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&231': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&232': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&233': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&234': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&235': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&236': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&237': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&238': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&239': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'virginica&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'virginica&0&242': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&243': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'virginica&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'virginica&0&246': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&247': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'virginica&0&249': np.array([-0.5276460255602035, -0.28992233541586077]),
'virginica&0&250': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'virginica&0&252': np.array([-0.6392402874163683, -0.24114611970435948]),
'virginica&0&253': np.array([-0.544626974647221, -0.24972982107967573]),
'virginica&0&254': np.array([-0.6426355680762406, -0.20016519137103667]),
'virginica&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'virginica&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&257': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&258': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'virginica&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'virginica&0&261': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&262': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'virginica&0&264': np.array([-0.7770298852793477, -0.029443430477147373]),
'virginica&0&265': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'virginica&0&267': np.array([-0.7936433456054744, -0.012583752076496493]),
'virginica&0&268': np.array([-0.7799744386472778, -0.026476616324402506]),
'virginica&0&269': np.array([-0.7942342242967624, -0.0119572163963601]),
'virginica&0&270': np.array([-0.04201361383207032, -0.9372571358382161]),
'virginica&0&271': np.array([-0.014237661899709955, -0.9660323357290304]),
'virginica&0&272': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&273': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&274': np.array([-0.4167677904879879, 0.22207334821665425]),
'virginica&0&275': np.array([-0.014237661899709955, -0.9660323357290304]),
'virginica&0&276': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&277': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&278': np.array([-0.07857689135903215, -0.8696882596532965]),
'virginica&0&279': np.array([-0.04813346258022244, -0.5416229439456887]),
'virginica&0&280': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&281': np.array([-0.05160969201296555, -0.9000166344885441]),
'virginica&0&282': np.array([-0.3109532939139045, -0.22759134703604383]),
'virginica&0&283': np.array([-0.0766197045034485, -0.5080325256323984]),
'virginica&0&284': np.array([-0.32767091750230254, -0.19689316772421933]),
'virginica&0&285': np.array([-0.05031696218434577, -0.929227611211748]),
'virginica&0&286': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&287': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&288': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&289': np.array([-0.4656481363306145, 0.007982539480288167]),
'virginica&0&290': np.array([-0.017148644765919676, -0.9632117581295891]),
'virginica&0&291': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&292': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&293': np.array([-0.14241819268815753, -0.8424615476000691]),
'virginica&0&294': np.array([-0.061515713893900315, -0.524561199322281]),
'virginica&0&295': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&296': np.array([-0.1140907502997574, -0.8737800276630269]),
'virginica&0&297': np.array([-0.4329463382004908, -0.057167210150691136]),
'virginica&0&298': np.array([-0.14198277461566922, -0.4577720226157396]),
'virginica&0&299': np.array([-0.4385442121294165, -0.05333645823279597]),
'virginica&0&300': np.array([0.029402442458921384, -0.9481684282717414]),
'virginica&0&301': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&302': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&303': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'virginica&0&305': np.array([0.009887859354111524, -0.9698143912008228]),
'virginica&0&306': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&307': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&308': np.array([0.19002455311770447, -0.8848597943731074]),
'virginica&0&309': np.array([0.009595083643662688, -0.5643652067423869]),
'virginica&0&310': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&311': np.array([0.1746467870122951, -0.9073062742839755]),
'virginica&0&312': np.array([0.13694026920485936, -0.36331091829858003]),
'virginica&0&313': np.array([0.11200181312407695, -0.5330612470996793]),
'virginica&0&314': np.array([0.19998284600732558, -0.3489062419702088]),
'virginica&1&0': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&1': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&2': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&3': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&4': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&5': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&6': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&7': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&8': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&9': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&10': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&11': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&12': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&13': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&14': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&15': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&16': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&17': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&18': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&19': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&20': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&21': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&22': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&23': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&24': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&25': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&26': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&27': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&28': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&29': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&30': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&31': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&32': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&33': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&34': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&35': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&36': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&37': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&38': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&39': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&40': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&41': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&42': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&43': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&44': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&45': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&46': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&47': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&48': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&49': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&50': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&51': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&52': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&53': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&54': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&55': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&56': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&57': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&58': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&59': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&60': np.array([0.4933316375690332, 0.5272416708629276]),
'virginica&1&61': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&62': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&63': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'virginica&1&65': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&66': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&67': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&68': np.array([0.40694846236352233, 0.5109051764198169]),
'virginica&1&69': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&70': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&71': np.array([0.415695226122737, 0.5230815102377903]),
'virginica&1&72': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&73': np.array([0.28313251310829024, -0.10978015869508362]),
'virginica&1&74': np.array([0.20013484983664692, -0.3483612449300506]),
'virginica&1&75': np.array([0.0, 0.4756207622944677]),
'virginica&1&76': np.array([0.0, 0.4854334805210761]),
'virginica&1&77': np.array([0.0, -0.16885577975809632]),
'virginica&1&78': np.array([0.0, -0.39580588553855395]),
'virginica&1&79': np.array([0.0, 0.2538072707138344]),
'virginica&1&80': np.array([0.0, 0.4854334805210761]),
'virginica&1&81': np.array([0.0, -0.16885577975809632]),
'virginica&1&82': np.array([0.0, -0.39580588553855395]),
'virginica&1&83': np.array([0.0, 0.4904755652105692]),
'virginica&1&84': np.array([0.0, -0.16885577975809632]),
'virginica&1&85': np.array([0.0, -0.39580588553855395]),
'virginica&1&86': np.array([0.0, 0.5008471974438506]),
'virginica&1&87': np.array([0.0, -0.39580588553855395]),
'virginica&1&88': np.array([0.0, -0.14423919730424817]),
'virginica&1&89': np.array([0.0, -0.3847817540585927]),
'virginica&1&90': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&91': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&92': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&93': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&94': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&95': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&96': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&97': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&98': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&99': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&100': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&101': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&102': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&103': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&104': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&105': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&106': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&107': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&108': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&109': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&110': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&111': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&112': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&113': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&114': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&115': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&116': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&117': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&118': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&119': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&120': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&121': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&122': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&123': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&124': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&125': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&126': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&127': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&128': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&129': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&130': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&131': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&132': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&133': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&134': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&135': np.array([0.5188109114552927, 0.03638964581864269]),
'virginica&1&136': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&137': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&138': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&139': np.array([0.5436097000280874, 0.1461891067488832]),
'virginica&1&140': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&141': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&142': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&143': np.array([0.4788153032824012, 0.08625929936974323]),
'virginica&1&144': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&145': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&146': np.array([0.46583127837967303, 0.09875847161509169]),
'virginica&1&147': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&148': np.array([0.7419884013108898, -0.4595742931114029]),
'virginica&1&149': np.array([0.6092194175719845, -0.5086479426935605]),
'virginica&1&150': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&151': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&152': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&153': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&154': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&155': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&156': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&157': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&158': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&159': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&160': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&161': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&162': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&163': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&164': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&165': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&166': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&167': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&168': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&169': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&170': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&171': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&172': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&173': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&174': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&175': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&176': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&177': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&178': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&179': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&180': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&181': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&182': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&183': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&184': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&185': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&186': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&187': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&188': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&189': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&190': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&191': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&192': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&193': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&194': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&195': np.array([0.5188109114552927, 0.03638964581864269]),
'virginica&1&196': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&197': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&198': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&199': np.array([0.5436097000280874, 0.1461891067488832]),
'virginica&1&200': np.array([0.5131478569192371, 0.04203387599862816]),
'virginica&1&201': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&202': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&203': np.array([0.4788153032824012, 0.08625929936974323]),
'virginica&1&204': np.array([0.7329462736700701, -0.4610490766898857]),
'virginica&1&205': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&206': np.array([0.46583127837967303, 0.09875847161509169]),
'virginica&1&207': np.array([0.5965042032375719, -0.48856644624972617]),
'virginica&1&208': np.array([0.7419884013108898, -0.4595742931114029]),
'virginica&1&209': np.array([0.6092194175719845, -0.5086479426935605]),
'virginica&1&210': np.array([0.37157553889555184, 0.1221600832023858]),
'virginica&1&211': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&212': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&213': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&214': np.array([0.4964962439921071, 0.3798215458387346]),
'virginica&1&215': np.array([0.2463036871609408, 0.24630368716093934]),
'virginica&1&216': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&217': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&218': np.array([0.22125635302655813, 0.2925832702358638]),
'virginica&1&219': np.array([0.9105775730167809, -0.6842162738602727]),
'virginica&1&220': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&221': np.array([0.10063786451829529, 0.4085974066833644]),
'virginica&1&222': np.array([0.6718337295341265, -0.6620422637360074]),
'virginica&1&223': np.array([0.8441748651745272, -0.6057436494968107]),
'virginica&1&224': np.array([0.6453274192140858, -0.6334259878992301]),
'virginica&1&225': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&226': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&227': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&228': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&229': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&230': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&231': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&232': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&233': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&234': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&235': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&236': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&237': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&238': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&239': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&240': np.array([0.056623968925773045, 0.43360725859686644]),
'virginica&1&241': np.array([0.020169511418752378, 0.47015948158260334]),
'virginica&1&242': np.array([0.5806365328450952, -0.4726270680771261]),
'virginica&1&243': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&244': np.array([0.3351719071445682, 0.20616862401308342]),
'virginica&1&245': np.array([0.020169511418752378, 0.47015948158260334]),
'virginica&1&246': np.array([0.5806365328450952, -0.4726270680771261]),
'virginica&1&247': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&248': np.array([0.024556360933646205, 0.4723948285969902]),
'virginica&1&249': np.array([0.5806365328450952, -0.4726270680771261]),
'virginica&1&250': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&251': np.array([-0.0164329511444131, 0.5132208276383963]),
'virginica&1&252': np.array([0.41462901544715686, -0.4964318942067897]),
'virginica&1&253': np.array([0.581569928198426, -0.46134543884925855]),
'virginica&1&254': np.array([0.42361197252581306, -0.5068181610814407]),
'virginica&1&255': np.array([-0.32199975656257646, 0.7482293552463756]),
'virginica&1&256': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&257': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&258': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&259': np.array([0.2619265016777598, 0.33491141590339474]),
'virginica&1&260': np.array([-0.43843349141088417, 0.8642740701867917]),
'virginica&1&261': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&262': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&263': np.array([-0.2562642052727569, 0.6920266972283227]),
'virginica&1&264': np.array([0.7141739659554729, -0.661981914015288]),
'virginica&1&265': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&266': np.array([-0.34479806250338163, 0.7789143553916729]),
'virginica&1&267': np.array([0.4446001433508151, -0.6107546840046901]),
'virginica&1&268': np.array([0.6253066100206679, -0.5612970743228719]),
'virginica&1&269': np.array([0.4159041613345079, -0.5802838287107943]),
'virginica&1&270': np.array([-0.6288817118959938, 0.6849987400957501]),
'virginica&1&271': np.array([-0.6491819158994796, 0.7060292771859485]),
'virginica&1&272': np.array([-0.36354251586275393, 0.01503732165107865]),
'virginica&1&273': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&274': np.array([-0.3507937472799825, 0.22709708691079003]),
'virginica&1&275': np.array([-0.6491819158994796, 0.7060292771859485]),
'virginica&1&276': np.array([-0.36354251586275393, 0.01503732165107865]),
'virginica&1&277': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&278': np.array([-0.6219129029345898, 0.6860569455333333]),
'virginica&1&279': np.array([-0.36354251586275393, 0.01503732165107865]),
'virginica&1&280': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&281': np.array([-0.6423063482710314, 0.7078274136226649]),
'virginica&1&282': np.array([-0.2224264339516076, -0.2751400010362469]),
'virginica&1&283': np.array([-0.38798262782075055, 0.05152547330256509]),
'virginica&1&284': np.array([-0.23804537254556749, -0.24790919248823104]),
'virginica&1&285': np.array([-0.7749499208750119, 0.8147189440804429]),
'virginica&1&286': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&287': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&288': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&289': np.array([-0.4079256832347186, 0.038455640985860955]),
'virginica&1&290': np.array([-0.8040309195416899, 0.8445152504134819]),
'virginica&1&291': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&292': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&293': np.array([-0.6964303997553315, 0.7444536452136676]),
'virginica&1&294': np.array([-0.582650696375085, 0.22335655671229132]),
'virginica&1&295': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&296': np.array([-0.7213651642695392, 0.7718874443854203]),
'virginica&1&297': np.array([-0.33108168891715994, -0.1364781674635115]),
'virginica&1&298': np.array([-0.5538416840542331, 0.2026191723113616]),
'virginica&1&299': np.array([-0.3472412936248763, -0.1219322389673262]),
'virginica&1&300': np.array([0.4933316375690332, 0.5272416708629276]),
'virginica&1&301': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&302': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&303': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'virginica&1&305': np.array([0.5041830043657418, 0.5392782673950876]),
'virginica&1&306': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&307': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&308': np.array([0.40694846236352233, 0.5109051764198169]),
'virginica&1&309': np.array([0.25657760110071476, -0.12592645350389117]),
'virginica&1&310': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&311': np.array([0.415695226122737, 0.5230815102377903]),
'virginica&1&312': np.array([0.13717260713320115, -0.36277799079016637]),
'virginica&1&313': np.array([0.28313251310829024, -0.10978015869508362]),
'virginica&1&314': np.array([0.20013484983664692, -0.3483612449300506]),
'virginica&2&0': np.array([0.37157691321004915, 0.12216227283618836]),
'virginica&2&1': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&2': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&3': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&4': np.array([0.4741571944522723, -0.3872697414416878]),
'virginica&2&5': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&6': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&7': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&8': np.array([0.6273836195848199, -0.15720981251964872]),
'virginica&2&9': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&10': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&11': np.array([0.6863652799597699, -0.21335694415409426]),
'virginica&2&12': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&13': np.array([0.11274898124253621, 0.6292927079496371]),
'virginica&2&14': np.array([0.32240464148521225, 0.645858545382009]),
'virginica&2&15': np.array([0.37157691321004915, 0.12216227283618836]),
'virginica&2&16': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&17': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&18': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&19': np.array([0.4741571944522723, -0.3872697414416878]),
'virginica&2&20': np.array([0.24630541996506908, 0.24630541996506994]),
'virginica&2&21': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&22': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&23': np.array([0.6273836195848199, -0.15720981251964872]),
'virginica&2&24': np.array([0.04449246321056297, 0.7096449459722027]),
'virginica&2&25': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&26': np.array([0.6863652799597699, -0.21335694415409426]),
'virginica&2&27': np.array([0.2953784217387408, 0.6750352694420284]),
'virginica&2&28': np.array([0.11274898124253621, 0.6292927079496371]),
'virginica&2&29': np.array([0.32240464148521225, 0.645858545382009]),
'virginica&2&30': np.array([0.5188517506916897, 0.036358567813067386]),
'virginica&2&31': np.array([0.5131939273945454, 0.04199748266790813]),
'virginica&2&32': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&33': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&34': np.array([0.5354807894355184, -0.3418054346754283]),
'virginica&2&35': np.array([0.5131939273945454, 0.04199748266790813]),
'virginica&2&36': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&37': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&38': np.array([0.5917672401610737, -0.061499563231173816]),
'virginica&2&39': np.array([0.06285591932387397, 0.6914253444924359]),
'virginica&2&40': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&41': np.array([0.5967658480721675, -0.06546963852548916]),
'virginica&2&42': np.array([0.34904320225465857, 0.6233384360811872]),
'virginica&2&43': np.array([0.15466782862660866, 0.5877736906472755]),
'virginica&2&44': np.array([0.37833006296225374, 0.5922410451071548]),
'virginica&2&45': np.array([0.8252668830593566, 0.11450866713130668]),
'virginica&2&46': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&47': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&48': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&49': np.array([0.8735738195653328, -0.046438180466149094]),
'virginica&2&50': np.array([0.8211795643076095, 0.11869650771610692]),
'virginica&2&51': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&52': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&53': np.array([0.8388485924434891, 0.09800790238640067]),
'virginica&2&54': np.array([0.644166410268985, 0.30120464260998964]),
'virginica&2&55': np.array([0.7640280271176497, 0.19364537761420375]),
'virginica&2&56': np.array([0.835455914569297, 0.10189258327760495]),
'virginica&2&57': | np.array([0.7640280271176497, 0.19364537761420375]) | numpy.array |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import librosa
import numpy as np
from PIL import Image
import subprocess
from options.test_options import TestOptions
import torchvision.transforms as transforms
import torch
from models.models import ModelBuilder
from models.audioVisual_model import AudioVisualModel
from data.audioVisual_dataset import generate_spectrogram
def audio_normalize(samples, desired_rms = 0.1, eps = 1e-4):
rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))
samples = samples * (desired_rms / rms)
return rms / desired_rms, samples
def main():
#load test arguments
opt = TestOptions().parse()
opt.device = torch.device("cuda")
# network builders
builder = ModelBuilder()
net_visual = builder.build_visual(weights=opt.weights_visual)
net_audio = builder.build_audio(
ngf=opt.unet_ngf,
input_nc=opt.unet_input_nc,
output_nc=opt.unet_output_nc,
weights=opt.weights_audio)
nets = (net_visual, net_audio)
# construct our audio-visual model
model = AudioVisualModel(nets, opt)
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model.to(opt.device)
model.eval()
#load the audio to perform separation
audio, audio_rate = librosa.load(opt.input_audio_path, sr=opt.audio_sampling_rate, mono=False)
audio_channel1 = audio[0,:]
audio_channel2 = audio[1,:]
#define the transformation to perform on visual frames
vision_transform_list = [transforms.Resize((224,448)), transforms.ToTensor()]
vision_transform_list.append(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
vision_transform = transforms.Compose(vision_transform_list)
#perform spatialization over the whole audio using a sliding window approach
overlap_count = np.zeros((audio.shape)) #count the number of times a data point is calculated
binaural_audio = np.zeros((audio.shape))
#perform spatialization over the whole spectrogram in a siliding-window fashion
sliding_window_start = 0
data = {}
samples_per_window = int(opt.audio_length * opt.audio_sampling_rate)
while sliding_window_start + samples_per_window < audio.shape[-1]:
sliding_window_end = sliding_window_start + samples_per_window
normalizer, audio_segment = audio_normalize(audio[:,sliding_window_start:sliding_window_end])
audio_segment_channel1 = audio_segment[0,:]
audio_segment_channel2 = audio_segment[1,:]
audio_segment_mix = audio_segment_channel1 + audio_segment_channel2
data['audio_diff_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
data['audio_mix_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
#get the frame index for current window
frame_index = int(round((((sliding_window_start + samples_per_window / 2.0) / audio.shape[-1]) * opt.input_audio_length + 0.05) * 10 ))
image = Image.open(os.path.join(opt.video_frame_path, str(frame_index).zfill(6) + '.png')).convert('RGB')
#image = image.transpose(Image.FLIP_LEFT_RIGHT)
frame = vision_transform(image).unsqueeze(0) #unsqueeze to add a batch dimension
data['frame'] = frame
output = model.forward(data)
predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()
#ISTFT to convert back to audio
reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])
reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)
reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2
reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2
reconstructed_binaural = np.concatenate((np.expand_dims(reconstructed_signal_left, axis=0), np.expand_dims(reconstructed_signal_right, axis=0)), axis=0) * normalizer
binaural_audio[:,sliding_window_start:sliding_window_end] = binaural_audio[:,sliding_window_start:sliding_window_end] + reconstructed_binaural
overlap_count[:,sliding_window_start:sliding_window_end] = overlap_count[:,sliding_window_start:sliding_window_end] + 1
sliding_window_start = sliding_window_start + int(opt.hop_size * opt.audio_sampling_rate)
#deal with the last segment
normalizer, audio_segment = audio_normalize(audio[:,-samples_per_window:])
audio_segment_channel1 = audio_segment[0,:]
audio_segment_channel2 = audio_segment[1,:]
data['audio_diff_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
data['audio_mix_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
#get the frame index for last window
frame_index = int(round(((opt.input_audio_length - opt.audio_length / 2.0) + 0.05) * 10))
image = Image.open(os.path.join(opt.video_frame_path, str(frame_index).zfill(6) + '.png')).convert('RGB')
#image = image.transpose(Image.FLIP_LEFT_RIGHT)
frame = vision_transform(image).unsqueeze(0) #unsqueeze to add a batch dimension
data['frame'] = frame
output = model.forward(data)
predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()
#ISTFT to convert back to audio
reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])
reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)
reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2
reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2
reconstructed_binaural = np.concatenate(( | np.expand_dims(reconstructed_signal_left, axis=0) | numpy.expand_dims |
"""
physical_models_vec.py
A module for material strength behavior to be imported into python scripts for
optimizaton or training emulators. Adapted from strength_models_add_ptw.py
Authors:
<NAME>, <EMAIL>
<NAME>, <EMAIL>
<NAME>, <EMAIL>
"""
import numpy as np
np.seterr(all = 'raise')
#import ipdb
import copy
from math import pi
from scipy.special import erf
## Error Definitions
class ConstraintError(ValueError):
pass
class PTWStressError(FloatingPointError):
pass
## Model Definitions
class BaseModel(object):
"""
Base Class for property Models (flow stress, specific heat, melt, density,
etc.). Must be instantiated as a child of MaterialModel
"""
params = []
consts = []
def value(self, *args):
return None
def update_parameters(self, x):
self.parent.parameters.update_parameters(x, self.params)
return
def __init__(self, parent):
self.parent = parent
return
# Specific Heat Models
class Constant_Specific_Heat(BaseModel):
"""
Constant Specific Heat Model
"""
consts = ['Cv0']
def value(self, *args):
return self.parent.parameters.Cv0
class Linear_Specific_Heat(BaseModel):
"""
Linear Specific Heat Model
"""
consts = ['Cv0', 'T0', 'dCdT']
def value(self, *args):
c0=self.parent.parameters.Cv0
t0=self.parent.parameters.T0
dcdt=self.parent.parameters.dCdT
tnow=self.parent.state.T
cnow=c0+(tnow-t0)*dcdt
return cnow
# Density Models
class Constant_Density(BaseModel):
"""
Constant Density Model
"""
consts = ['rho0']
def value(self, *args):
return self.parent.parameters.rho0 * np.ones(len(self.parent.state.T))
class Linear_Density(BaseModel):
"""
Linear Density Model
"""
consts = ['rho0', 'T0', 'dRhodT']
def value(self, *args):
r0=self.parent.parameters.rho0
t0=self.parent.parameters.T0
drdt=self.parent.parameters.dRhodT
tnow=self.parent.state.T
rnow=r0+drdt*(tnow-t0)
return rnow
# Melt Temperature Models
class Constant_Melt_Temperature(BaseModel):
"""
Constant Melt Temperature Model
"""
consts = ['Tmelt0']
def value(self, *args):
return self.parent.parameters.Tmelt0
class Linear_Melt_Temperature(BaseModel):
"""
Linear Melt Temperature Model
"""
consts=['Tmelt0', 'rho0', 'dTmdRho']
def value(self, *args):
tm0=self.parent.parameters.Tmelt0
rnow=self.parent.state.rho
dtdr=self.parent.parameters.dTmdRho
r0=self.parent.parameters.rho0
tmeltnow=tm0+dtdr*(rnow-r0)
return tmeltnow
class BGP_Melt_Temperature(BaseModel):
consts = ['Tm_0', 'rho_m', 'gamma_1', 'gamma_3', 'q3']
def value(self, *args):
mp = self.parent.parameters
rho = self.parent.state.rho
melt_temp = mp.Tm_0*np.power(rho/mp.rho_m, 1./3.)*np.exp(6*mp.gamma_1*(np.power(mp.rho_m,-1./3.)-np.power(rho,-1./3.))\
+2.*mp.gamma_3/mp.q3*(np.power(mp.rho_m,-mp.q3)-np.power(rho,-mp.q3)))
return melt_temp
# Shear Modulus Models
class Constant_Shear_Modulus(BaseModel):
consts = ['G0']
def value(self, *args):
return self.parent.parameters.G0
class Linear_Shear_Modulus(BaseModel):
consts = ['G0', 'rho0', 'dGdRho' ]
def value(self, *args):
g0=self.parent.parameters.G0
rho0=self.parent.parameters.rho0
dgdr=self.parent.parameters.dGdRho
rnow=self.parent.state.rho
gnow=g0+dgdr*(rnow-rho0)
return gnow
class Simple_Shear_Modulus(BaseModel):
consts = ['G0', 'alpha']
def value(self, *args):
mp = self.parent.parameters
temp = self.parent.state.T
tmelt = self.parent.state.Tmelt
return mp.G0 * (1. - mp.alpha * (temp / tmelt))
class BGP_PW_Shear_Modulus(BaseModel):
#BPG model provides cold shear, i.e. shear modulus at zero temperature as a function of density.
#PW describes the (lienar) temperature dependence of the shear modulus. (Same dependency as
#in Simple_Shear_modulus.)
#With these two models combined, we get the shear modulus as a function of density and temperature.
consts = ['G0', 'rho_0', 'gamma_1', 'gamma_2', 'q2', 'alpha']
def value(self, *args):
mp = self.parent.parameters
rho = self.parent.state.rho
temp = self.parent.state.T
tmelt = self.parent.state.Tmelt
cold_shear = mp.G0*np.exp(6.*mp.gamma_1*(np.power(mp.rho_0,-1./3.)-np.power(rho,-1./3.))\
+ 2*mp.gamma_2/mp.q2*(np.power(mp.rho_0,-mp.q2)-np.power(rho,-mp.q2)))
gnow = cold_shear*(1.- mp.alpha* (temp/tmelt))
gnow[np.where(temp >= tmelt)] = 0.
gnow[np.where(gnow < 0)] = 0.
#if temp >= tmelt: gnow = 0.0
#if gnow < 0.0: gnow = 0.0
return gnow
class Stein_Shear_Modulus(BaseModel):
#consts = ['G0', 'sgA', 'sgB']
#assuming constant density and pressure
#so we only include the temperature dependence
consts = ['G0', 'sgB']
eta = 1.0
def value(self, *args):
mp = self.parent.parameters
temp = self.parent.state.T
tmelt = self.parent.state.Tmelt
#just putting this here for completeness
#aterm = a/eta**(1.0/3.0)*pressure
aterm = 0.0
bterm = mp.sgB * (temp - 300.0)
gnow = mp.G0 * (1.0 + aterm - bterm)
#if temp >= tmelt: gnow = 0.0
#if gnow < 0.0: gnow = 0.0
gnow[np.where(temp >= tmelt)] = 0.
gnow[np.where(gnow < 0)] = 0.
return gnow
# Yield Stress Models
class Constant_Yield_Stress(BaseModel):
"""
Constant Yield Stress Model
"""
consts = ['yield_stress']
def value(self, *args):
return self.parent.parameters.yield_stress
def fast_pow(a, b):
"""
Numpy power is slow, this is faster. Gets a**b for a and b np arrays.
"""
cond = a>0
out = a * 0.
out[cond] = np.exp(b[cond] * np.log(a[cond]))
return out
pos = lambda a: (abs(a) + a) / 2 # same as max(0,a)
class JC_Yield_Stress(BaseModel):
params = ['A','B','C','n','m']
consts = ['Tref','edot0','chi']
def value(self, edot):
mp = self.parent.parameters
eps = self.parent.state.strain
t = self.parent.state.T
tmelt = self.parent.state.Tmelt
#th = np.max([(t - mp.Tref) / (tmelt - mp.Tref), 0.])
th = pos((t - mp.Tref) / (tmelt - mp.Tref))
Y = (
(mp.A + mp.B * fast_pow(eps, mp.n)) *
(1. + mp.C * np.log(edot / mp.edot0)) *
(1. - fast_pow(th, mp.m))
)
return Y
class PTW_Yield_Stress(BaseModel):
params = ['theta','p','s0','sInf','kappa','lgamma','y0','yInf','y1', 'y2']
consts = ['beta', 'matomic', 'chi']
#@profile
def value(self, edot):
"""
function used to define PTW flow stress model
arguments are:
- edot: scalar, strain rate
- material: an instance of MaterialModel class
returns the flow stress at the current material state
and specified strain rate
"""
mp = self.parent.parameters
eps = self.parent.state.strain
temp = self.parent.state.T
tmelt = self.parent.state.Tmelt
shear = self.parent.state.G
#if (np.any(mp.sInf > mp.s0) or np.any(mp.yInf > mp.y0) or
# np.any(mp.y0 > mp.s0) or np.any(mp.yInf > mp.sInf) or np.any(mp.y1 < mp.s0) or np.any(mp.y2 < mp.beta)):
# raise ConstraintError
good = (
(mp.sInf < mp.s0) * (mp.yInf < mp.y0) * (mp.y0 < mp.s0) *
(mp.yInf < mp.sInf) * (mp.y1 > mp.s0) * (mp.y2 > mp.beta)
)
if | np.any(~good) | numpy.any |
from itertools import cycle
from json import load
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
with open('bgm_anime_dataset.json', 'r', encoding='utf8') as f:
data = load(f)
scores = np.array(
[bangumi['rating']['score'] for bangumi in data],
dtype=np.float64
)
count = scores.size
mean = np.mean(scores, dtype=np.float64)
median = | np.median(scores) | numpy.median |
from afqa_toolbox.features import block_properties
import numpy as np
import cv2
class FeatMOW:
"""Feature extraction for Mean Object Width"""
def __init__(self, blk_size=32, foreground_ratio=0.8):
"""Initialize
:param blk_size: Size of individual blocks
:param foreground_ratio : Ratio of minimal mask pixels to determine foreground
"""
self.blk_size = blk_size
self.foreground_ratio = foreground_ratio
def mow(self, image, maskim):
"""Divides the input image into individual blocks and calculates the MOW metric
:param image: Input fingerprint image
:param maskim: Input fingerprint segmentation mask
:return: Resulting quality map in form of a matrix
"""
rows, cols = image.shape
map_rows, map_cols = block_properties(image.shape, self.blk_size)
result = | np.full((map_rows, map_cols), np.nan, dtype=np.float64) | numpy.full |
import pytest
import numpy as np
from numpy.linalg import norm
from sklearn.linear_model import LogisticRegression
from andersoncd.logreg import solver_logreg
pCmins = [2, 5, 10]
algos = [("cd", True), ("pgd", True), ("fista", False)]
@pytest.mark.parametrize("algo, use_acc", algos)
@pytest.mark.parametrize("pCmin", pCmins)
def test_logreg_solver(algo, use_acc, pCmin):
# data generation
| np.random.seed(0) | numpy.random.seed |
import oommfc as oc
import discretisedfield as df
import numpy as np
import matplotlib.pyplot as plt
import colorsys
plt.style.use('styles/lato_style.mplstyle')
mu0 = 4 * np.pi * 1e-7
def convert_to_RGB(hls_color):
return np.array(colorsys.hls_to_rgb(hls_color[0] / (2 * np.pi),
hls_color[1],
hls_color[2]))
def generate_RGBs(field_data):
"""
field_data :: (n, 3) array
"""
hls = np.ones_like(field_data)
hls[:, 0] = np.arctan2(field_data[:, 1],
field_data[:, 0]
)
hls[:, 0][hls[:, 0] < 0] = hls[:, 0][hls[:, 0] < 0] + 2 * np.pi
hls[:, 1] = 0.5 * (field_data[:, 2] + 1)
rgbs = np.apply_along_axis(convert_to_RGB, 1, hls)
# Redefine colours less than zero
# rgbs[rgbs < 0] += 2 * np.pi
return rgbs
# def init_dot(pos):
#
# x, y = pos[0], pos[1]
# r = np.sqrt(x ** 2 + y ** 2)
#
# if r < R:
# mz = -1
# else:
# mz = 1
#
# return (0, 0, mz)
def init_type2bubble_bls_II(pos, R=80e-9):
"""
Initial state to obtain a type II bubble
We set a Bloch-like skyrmion profile across the sample thickness
"""
x, y = pos[0], pos[1]
r = | np.sqrt(x ** 2 + y ** 2) | numpy.sqrt |
import flowws
from flowws import Argument as Arg
import freud
import numpy as np
import plato
import plato.draw.vispy as draw
import rowan
def circle_patterns(locations, radii, Npoints=128, z=0):
locations = np.array(locations)
thetas = np.linspace(0, 2*np.pi, Npoints, endpoint=False)
circle_template = np.zeros((Npoints, 3))
circle_template[:, 0] = | np.cos(thetas) | numpy.cos |
import OpenGL
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import glm
import numpy as np
from PIL import Image, ImageOps
from pyrr import Matrix44, Vector4, Vector3, Quaternion
import pyrr
import argparse
import os
import xml.dom.minidom
import glob
from tqdm import tqdm
VERT_DATA = np.array([1.0, 1.0, 0.0,
1.0, -1.0, 0.0,
-1.0, -1.0, 0.0,
-1.0, 1.0, 0.0],
dtype="float32")
TEXTURE_COORD_DATA = np.array([1.0, 1.0,
1.0, -1.0,
-1.0, -1.0,
-1.0, 1.0],
dtype="float32")
INDICES = np.array([0, 1, 3,
1, 2, 3],
dtype="int32")
WINDOW_WIDTH, WINDOW_HEIGHT = 432, 368
# camera params
FAR_CLIP = 2500.0
NEAR_CLIP = 2.0
FOV = 45.0
ORIGIN = np.array([-4.21425, 105.008, 327.119], dtype="float32")
TARGET = np.array([-4.1969, 104.951, 326.12], dtype="float32")
UP = np.array([0.0, 1.0, 0.0], dtype="float32")
# RECT PARAMS
RECT_SCALE = Vector3([15.0, 30.0, 1.0])
RECT_TRANSLATE = Vector3([0.0, 110.0, 15.0])
BG_TEXTURE_PATH = 'master_v2.jpg'
class GLProgram:
def __init__(self, x = 50.0, y=0.0, z =-50, angle=1.5):
self.gl_program = glCreateProgram()
self.shaders()
self.gl_buffers()
self.mvp_matrix = self.compute_mvp(Vector3([x, y, z]), angle)
self.gl_init()
self.rendered = False
def gl_init(self):
#glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
def compute_mvp(self, translation, rotation):
# model matrix is correct
identity_matrix = np.identity(4)
scale_matrix = np.transpose(pyrr.matrix44.create_from_scale(RECT_SCALE))
trans_matrix = np.transpose(pyrr.matrix44.create_from_translation(RECT_TRANSLATE))
rot_matrix = np.transpose(pyrr.matrix44.create_from_y_rotation(np.radians(360.0 - rotation)))
trans_matrix_cur = np.transpose(pyrr.matrix44.create_from_translation(translation))
model_matrix = identity_matrix
model_matrix = np.matmul(model_matrix, trans_matrix_cur)
model_matrix = np.matmul(model_matrix, rot_matrix)
model_matrix = np.matmul(model_matrix, trans_matrix)
model_matrix = np.matmul(model_matrix, scale_matrix)
view_matrix = np.transpose(
pyrr.matrix44.create_look_at(
ORIGIN,
TARGET,
UP
)
)
proj_matrix = np.transpose(
pyrr.matrix44.create_perspective_projection(
FOV,
WINDOW_WIDTH / WINDOW_HEIGHT,
NEAR_CLIP,
FAR_CLIP
)
)
cam_matrix = np.matmul(proj_matrix, view_matrix)
m = np.matmul(cam_matrix, model_matrix)
return | np.transpose(m) | numpy.transpose |
from utils.ocpdl import Online_CPDL
import numpy as np
from PIL import Image
from skimage.transform import downscale_local_mean
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.decomposition import SparseCoder
from time import time
import itertools
import matplotlib.pyplot as plt
DEBUG = False
class Image_Reconstructor_OCPDL():
### Use Online CP Dictionary Learning for patch-based image processing
def __init__(self,
path,
n_components=100, # number of dictionary elements -- rank
iterations=50, # number of iterations for the ONTF algorithm
sub_iterations = 20, # number of i.i.d. subsampling for each iteration of ONTF
batch_size=20, # number of patches used in i.i.d. subsampling
num_patches = 1000, # number of patches that ONTF algorithm learns from at each iteration
sub_num_patches = 10000, # number of patches to optimize H after training W
downscale_factor=2,
patch_size=7,
patches_file='',
alpha=1,
learn_joint_dict=False,
is_matrix=False,
unfold_space=False,
unfold_all=False,
is_color=True):
'''
batch_size = number of patches used for training dictionaries per ONTF iteration
sources: array of filenames to make patches out of
patches_array_filename: numpy array file which contains already read-in images
'''
self.path = path
self.n_components = n_components
self.iterations = iterations
self.sub_iterations = sub_iterations
self.num_patches = num_patches
self.sub_num_patches = sub_num_patches
self.batch_size = batch_size
self.downscale_factor = downscale_factor
self.patch_size = patch_size
self.patches_file = patches_file
self.learn_joint_dict = learn_joint_dict
self.is_matrix = is_matrix
self.unfold_space = unfold_space
self.unfold_all = unfold_all
self.is_color = is_color
self.alpha = alpha ## sparsity regularizer
self.W = np.zeros(shape=(patch_size, n_components))
self.code = np.zeros(shape=(n_components, iterations*batch_size))
# read in image as array
self.data = self.read_img_as_array()
def read_img_as_array(self):
'''
Read input image as a narray
'''
if self.is_matrix:
img = np.load(self.path)
data = (img + 1) / 2 # it was +-1 matrix; now it is 0-1 matrix
else:
img = Image.open(self.path)
if self.is_color:
img = img.convert('RGB')
else:
img = img.convert('L')
# normalize pixel values (range 0-1)
data = np.asarray(img) / 255
print('data.shape', data.shape)
return data
def extract_random_patches(self):
'''
Extract 'num_patches' many random patches of given size
Three tensor data types depending on how to unfold k by k by 3 color patches:
unfold_space : k**2 by 3
unfold_all : 3*k**2 by 1
else: k by k by 3
'''
x = self.data.shape
k = self.patch_size
num_patches = self.num_patches
if self.unfold_all:
X = np.zeros(shape=(3 * (k ** 2), 1, 1))
elif self.unfold_space:
X = np.zeros(shape=(k ** 2, 3, 1))
else:
X = np.zeros(shape=(k, k, 3, 1))
for i in np.arange(num_patches):
a = np.random.choice(x[0] - k) # x coordinate of the top left corner of the random patch
b = np.random.choice(x[1] - k) # y coordinate of the top left corner of the random patch
Y = self.data[a:a + k, b:b + k, :] # k by k by 3
if self.unfold_all:
Y = Y.reshape(3 * (k ** 2), 1, 1)
elif self.unfold_space:
Y = Y.reshape(k ** 2, 3, 1)
else:
Y = Y.reshape(k, k, 3, 1)
if i == 0:
X = Y
elif self.unfold_space or self.unfold_all:
X = np.append(X, Y, axis=2) # x is class ndarray
else:
X = np.append(X, Y, axis=3) # x is class ndarray
return X
def image_to_patches(self, path, patch_size=10, downscale_factor=2, is_matrix=False, is_recons=False):
'''
#*****
args:
path (string): Path and filename of input image
patch_size (int): Pixel dimension of square patches taken of image
color (boolean): Specifies conversion of image to RGB (True) or grayscale (False).
Default value = false. When color = True, images in gray colorspace will still appear
gray, but will thereafter be represented in RGB colorspace using three channels.
downscale_factor: Specifies the extent to which the image will be downscaled. Greater values
will result in more downscaling but faster speed. For no downscaling, use downscale_factor=1.
returns: #***
'''
#open image and convert it to either RGB (three channel) or grayscale (one channel)
if is_matrix:
img = np.load(path)
data = (img + 1) / 2 # it was +-1 matrix; now it is 0-1 matrix
else:
img = Image.open(path)
if self.is_color:
img = img.convert('RGB')
else:
img = img.convert('L')
# normalize pixel values (range 0-1)
data = np.asarray(img) / 255
if DEBUG:
print(np.asarray(img))
patches = self.extract_random_patches()
print('patches.shape=', patches.shape)
return patches
def out(self, loading):
### given loading, take outer product of respected columns to get CPdict
CPdict = {}
for i in np.arange(self.n_components):
A = np.array([1])
for j in np.arange(len(loading.keys())):
loading_factor = loading.get('U' + str(j)) ### I_i X n_components matrix
# print('loading_factor', loading_factor)
A = np.multiply.outer(A, loading_factor[:, i])
A = A[0]
CPdict.update({'A' + str(i): A})
return CPdict
def display_dictionary_CP(self, W, plot_shape_N_color=False):
k = self.patch_size
num_rows = np.ceil( | np.sqrt(self.n_components) | numpy.sqrt |
import orbit_prediction.spacetrack_etl as etl
import orbit_prediction.ml_model as ml
import orbit_prediction.build_training_data as training
import kernels.quantum as q_kernel
import kernels.classical as c_kernel
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import pickle
from sklearn.svm import SVR
SPACETRACK_USERNAME='<EMAIL>'
SPACETRACK_PASSWORD='password'
N_PRED_DAYS = 1
EARTH_RAD = 6.378e6
MEAN_ORBIT_SPEED = 7800.
SECONDS_IN_DAY = 60.*60.*24.
plt.rcParams.update({'font.size': 20})
def query_norm_X_data(n_data, X_data_raw):
X_data = | np.zeros((n_data,13)) | numpy.zeros |
"""
Module implementing various uncertainty based query strategies.
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import numpy as np
from sklearn.utils.validation import check_array
from ..base import SingleAnnotPoolBasedQueryStrategy, SkactivemlClassifier
from ..utils import check_cost_matrix, simple_batch, check_classes, \
fit_if_not_fitted, check_type
class UncertaintySampling(SingleAnnotPoolBasedQueryStrategy):
"""Uncertainty Sampling
This class implement various uncertainty based query strategies, i.e., the
standard uncertainty measures [1], cost-sensitive ones [2], and one
optimizing expected average precision [3].
Parameters
----------
method : string (default='least_confident')
The method to calculate the uncertainty, entropy, least_confident,
margin_sampling, and expected_average_precision are possible.
cost_matrix : array-like, shape (n_classes, n_classes)
Cost matrix with cost_matrix[i,j] defining the cost of predicting class
j for a sample with the actual class i. Only supported for
`least_confident` and `margin_sampling` variant.
random_state : numeric | np.random.RandomState
The random state to use.
Attributes
----------
method : string
The method to calculate the uncertainty. Only entropy, least_confident,
margin_sampling and expected_average_precision.
cost_matrix : array-like, shape (n_classes, n_classes)
Cost matrix with C[i, j] defining the cost of predicting class j for a
sample with the actual class i. Only supported for least confident
variant.
random_state : numeric | np.random.RandomState
Random state to use.
References
----------
[1] Settles, Burr. Active learning literature survey.
University of Wisconsin-Madison Department of Computer Sciences, 2009.
[2] Chen, Po-Lung, and <NAME>. "Active learning for multiclass
cost-sensitive classification using probabilistic models." 2013
Conference on Technologies and Applications of Artificial Intelligence.
IEEE, 2013.
[3] Wang, Hanmo, et al. "Uncertainty sampling for action recognition
via maximizing expected average precision."
IJCAI International Joint Conference on Artificial Intelligence. 2018.
"""
def __init__(self, method='least_confident', cost_matrix=None,
random_state=None):
super().__init__(random_state=random_state)
self.method = method
self.cost_matrix = cost_matrix
def query(self, X_cand, clf, X=None, y=None, sample_weight=None,
batch_size=1,
return_utilities=False):
"""
Queries the next instance to be labeled.
Parameters
----------
X_cand : array-like, shape (n_candidate_samples, n_features)
Candidate samples from which the strategy can select.
clf : skactiveml.base.SkactivemlClassifier
Model implementing the methods `fit` and `predict_proba`.
X: array-like, shape (n_samples, n_features), optional (default=None)
Complete training data set.
y: array-like, shape (n_samples), optional (default=None)
Labels of the training data set.
sample_weight: array-like, shape (n_samples), optional
(default=None)
Weights of training samples in `X`.
batch_size : int, optional (default=1)
The number of samples to be selected in one AL cycle.
return_utilities : bool, optional (default=False)
If true, also return the utilities based on the query strategy.
Returns
-------
query_indices : numpy.ndarray, shape (batch_size)
The query_indices indicate for which candidate sample a label is
to queried, e.g., `query_indices[0]` indicates the first selected
sample.
utilities : numpy.ndarray, shape (batch_size, n_samples)
The utilities of all candidate samples after each selected
sample of the batch, e.g., `utilities[0]` indicates the utilities
used for selecting the first sample (with index `query_indices[0]`)
of the batch.
"""
# Validate input parameters.
X_cand, return_utilities, batch_size, random_state = \
self._validate_data(X_cand, return_utilities, batch_size,
self.random_state, reset=True)
# Validate classifier type.
check_type(clf, SkactivemlClassifier, 'clf')
# Validate method.
if not isinstance(self.method, str):
raise TypeError('{} is an invalid type for method. Type {} is '
'expected'.format(type(self.method), str))
# Fit the classifier.
clf = fit_if_not_fitted(clf, X, y, sample_weight)
# Predict class-membership probabilities.
probas = clf.predict_proba(X_cand)
# Choose the method and calculate corresponding utilities.
with np.errstate(divide='ignore'):
if self.method in ['least_confident', 'margin_sampling',
'entropy']:
utilities = uncertainty_scores(
probas=probas, method=self.method,
cost_matrix=self.cost_matrix
)
elif self.method == 'expected_average_precision':
classes = clf.classes_
utilities = expected_average_precision(classes, probas)
else:
raise ValueError(
"The given method {} is not valid. Supported methods are "
"'entropy', 'least_confident', 'margin_sampling' and "
"'expected_average_precision'".format(self.method))
return simple_batch(utilities, random_state,
batch_size=batch_size,
return_utilities=return_utilities)
def uncertainty_scores(probas, cost_matrix=None, method='least_confident'):
"""Computes uncertainty scores. Three methods are available: least
confident ('least_confident'), margin sampling ('margin_sampling'),
and entropy based uncertainty ('entropy') [1]. For the least confident and
margin sampling methods cost-sensitive variants are implemented in case of
a given cost matrix (see [2] for more information).
Parameters
----------
probas : array-like, shape (n_samples, n_classes)
Class membership probabilities for each sample.
cost_matrix : array-like, shape (n_classes, n_classes)
Cost matrix with C[i,j] defining the cost of predicting class j for a
sample with the actual class i. Only supported for least confident
variant.
method : {'least_confident', 'margin_sampling', 'entropy'},
optional (default='least_confident')
Least confidence (lc) queries the sample whose maximal posterior
probability is minimal. In case of a given cost matrix, the maximial
expected cost variant is used. Smallest margin (sm) queries the sample
whose posterior probability gap between the most and the second most
probable class label is minimal. In case of a given cost matrix, the
cost-weighted minimum margin is used. Entropy ('entropy') queries the
sample whose posterior's have the maximal entropy. There is no
cost-sensitive variant of entropy based uncertainty sampling.
References
----------
[1] <NAME>. "Active learning literature survey".
University of Wisconsin-Madison Department of Computer Sciences, 2009.
[2] <NAME>, and <NAME>. "Active learning for multiclass
cost-sensitive classification using probabilistic models." 2013
Conference on Technologies and Applications of Artificial Intelligence.
IEEE, 2013.
"""
# Check probabilities.
probas = check_array(probas, accept_sparse=False,
accept_large_sparse=True, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1,
ensure_min_features=1, estimator=None)
if not np.allclose(np.sum(probas, axis=1), 1, rtol=0, atol=1.e-3):
raise ValueError(
"'probas' are invalid. The sum over axis 1 must be one."
)
n_classes = probas.shape[1]
# Check cost matrix.
if cost_matrix is not None:
cost_matrix = check_cost_matrix(cost_matrix, n_classes=n_classes)
# Compute uncertainties.
if method == 'least_confident':
if cost_matrix is None:
return 1 - np.max(probas, axis=1)
else:
costs = probas @ cost_matrix
costs = np.partition(costs, 1, axis=1)[:, :2]
return costs[:, 0]
elif method == 'margin_sampling':
if cost_matrix is None:
probas = -(np.partition(-probas, 1, axis=1)[:, :2])
return 1 - np.abs(probas[:, 0] - probas[:, 1])
else:
costs = probas @ cost_matrix
costs = np.partition(costs, 1, axis=1)[:, :2]
return -np.abs(costs[:, 0] - costs[:, 1])
elif method == 'entropy':
with np.errstate(divide='ignore', invalid='ignore'):
return np.nansum(-probas * | np.log(probas) | numpy.log |
import numpy as np
from util import softmax, sigmoid, dsigmoid, adam, rmsprop
import pickle
class vrnn:
def __init__(self, i_size, h_size, o_size, optimize='rmsprop', wb=None):
self.i_size = i_size
self.h_size = h_size
self.o_size = o_size
self.optimize = optimize
if wb:
self.w, self.b = self.load_model(wb)
else:
self.w={}
self.b={}
# input to hidden weights
self.w['ih'] = np.random.normal(0,0.01,(h_size, i_size))
self.b['ih'] = np.zeros((h_size, 1))
# prev hidden to hidden weights
self.w['ph'] = np.random.normal(0,0.01,(h_size, h_size))
self.b['ph'] = np.zeros((h_size, 1))
# hidden to output weights
self.w['ho'] = np.random.normal(0,0.01,(o_size, h_size))
self.b['ho'] = np.zeros((o_size, 1))
if optimize == 'rmsprop' or optimize == 'adam':
self.m={}
self.m['ih'] = np.zeros((h_size, i_size))
self.m['ph'] = np.zeros((h_size, h_size))
self.m['ho'] = np.zeros((o_size, h_size))
if optimize == 'adam':
self.v={}
self.v['ih'] = np.zeros((h_size, i_size))
self.v['ph'] = np.zeros((h_size, h_size))
self.v['ho'] = np.zeros((o_size, h_size))
self.weight_update = adam
elif optimize == 'rmsprop':
self.weight_update = rmsprop
def forward_pass(self, inputs):
self.inputs = inputs
self.n_inp = len(inputs)
self.o = []; self.h = {}
self.vh = []; self.vo = []
self.h[-1] = np.zeros((self.h_size, 1))
for i in range(self.n_inp):
# calculation for hidden activation
self.vh.append(np.dot(self.w['ih'],inputs[i]) + np.dot(self.w['ph'], self.h[i-1]) + self.b['ih'])
self.h[i] = (sigmoid(self.vh[i]))
# calculation for output activation
self.vo.append(np.dot(self.w['ho'],self.h[i]) + self.b['ho'])
self.o.append(softmax(self.vo[i]))
return self.o
def backward_pass(self, t):
# error calculation
e = self.error(t)
# dw variables
dw={}
db= {}
dw['ih'] = np.zeros((self.h_size, self.i_size))
db['ih'] = np.zeros((self.h_size, 1))
# hidden-2-output dw
dw['ho'] = np.zeros((self.o_size, self.h_size))
db['ho'] = np.zeros((self.o_size, 1))
# hidden-2-hidden dw
dw['ph'] = np.zeros((self.h_size, self.h_size))
db['ph'] = np.zeros((self.h_size, 1))
dh = 0
for i in reversed(range(self.n_inp)):
# gradient at output layer
do = self.o[i] - t[i]
# hidden to outpur weight's dw
dw['ho'] += np.dot(do, self.h[i].T)
db['ho'] += do
# gradient at hidden layer
dh += np.dot(self.w['ho'].T, do) * dsigmoid(self.vh[i])
# input to hidden weight's dw
dw['ih'] += np.dot(dh, self.inputs[i].T)
db['ih'] += dh
# hidden to prev hidden weight's dw
dw['ph'] += np.dot(dh, self.h[i-1].T)
db['ph'] += dh
dh = | np.dot(self.w['ph'].T, dh) | numpy.dot |
#!/usr/bin/python3
r'''Tests the python-wrapped C API
'''
import sys
import numpy as np
import numpysane as nps
import os
testdir = os.path.dirname(os.path.realpath(__file__))
# I import the LOCAL mrcal since that's what I'm testing
sys.path[:0] = f"{testdir}/..",
import mrcal
import testutils
model_splined = mrcal.cameramodel(f"{testdir}/data/cam0.splined.cameramodel")
ux,uy = mrcal.knots_for_splined_models(model_splined.intrinsics()[0])
testutils.confirm_equal(ux,
| np.array([-1.33234678,-1.15470054,-0.9770543,-0.79940807,-0.62176183,-0.44411559,-0.26646936,-0.08882312,0.08882312,0.26646936,0.44411559,0.62176183,0.79940807,0.9770543,1.15470054,1.33234678]) | numpy.array |
# -*- coding: utf-8 -*-
import numpy
import random
import requests
from config import KEYS
from typing import Union
fuzz_margin = 0.02
def compute_center(points: list) -> list:
"""
Computes the center from a list of point coordinates
:param points: list of points (lon, lat)
"""
polygon = numpy.array(points)
length = polygon.shape[0]
sum_lon = | numpy.sum(polygon[:, 0]) | numpy.sum |
__author__ = 'Ardalan'
CODE_FOLDER = "/home/ardalan/Documents/kaggle/bnp/"
# CODE_FOLDER = "/home/arda/Documents/kaggle/bnp/"
import os, sys, time, re, zipfile, pickle, operator, glob
import pandas as pd
import numpy as np
from xgboost import XGBClassifier, XGBRegressor
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from sklearn import cross_validation
from sklearn import linear_model
from sklearn import ensemble
from sklearn import naive_bayes
from sklearn import svm
from sklearn import calibration
from keras.preprocessing import text, sequence
from keras.optimizers import *
from keras.models import Sequential
from keras.utils import np_utils
from keras.layers import core, embeddings, recurrent, advanced_activations, normalization
from keras.utils import np_utils
from keras.callbacks import EarlyStopping
def clipProba(ypredproba):
"""
Taking list of proba and returning a list of clipped proba
:param ypredproba:
:return: ypredproba clipped
"""""
ypredproba = np.where(ypredproba <= 0., 1e-5 , ypredproba)
ypredproba = np.where(ypredproba >= 1., 1.-1e-5, ypredproba)
return ypredproba
def reshapePrediction(ypredproba):
result = None
if len(ypredproba.shape) > 1:
if ypredproba.shape[1] == 1: result = ypredproba[:, 0]
if ypredproba.shape[1] == 2: result = ypredproba[:, 1]
else:
result = ypredproba.ravel()
result = clipProba(result)
return result
def eval_func(ytrue, ypredproba):
return metrics.log_loss(ytrue, ypredproba)
def loadFileinZipFile(zip_filename, filename, dtypes=None, parsedate = None, password=None, **kvargs):
"""
Load file to dataframe.
"""
with zipfile.ZipFile(zip_filename, 'r') as myzip:
if password:
myzip.setpassword(password)
if parsedate:
return pd.read_csv(myzip.open(filename), sep=',', parse_dates=parsedate, dtype=dtypes, **kvargs)
else:
return pd.read_csv(myzip.open(filename), sep=',', dtype=dtypes, **kvargs)
def CreateDataFrameFeatureImportance(model, pd_data):
dic_fi = model.get_fscore()
df = pd.DataFrame(dic_fi.items(), columns=['feature', 'fscore'])
df['col_indice'] = df['feature'].apply(lambda r: r.replace('f','')).astype(int)
df['feat_name'] = df['col_indice'].apply(lambda r: pd_data.columns[r])
return df.sort('fscore', ascending=False)
def LoadParseData(l_filenames):
l_X = []
l_X_test=[]
l_Y = []
for filename in l_filenames:
filename = filename[:-2]
print(filename)
dic_log = pickle.load(open(filename + '.p','rb'))
pd_temp = pd.read_csv(filename + '.csv')
test_idx = pd_temp['ID'].values.astype(int)
l_X.append(np.hstack(dic_log['ypredproba']))
l_X_test.append(pd_temp['PredictedProb'].values)
l_Y.append(np.hstack(dic_log['yval']))
X = np.array(l_X).T
X_test = np.array(l_X_test).T
Y = np.array(l_Y).T.mean(1).astype(int)
# Y = np.array(l_Y).T
return X, Y, X_test, test_idx
def xgb_accuracy(ypred, dtrain):
ytrue = dtrain.get_label().astype(int)
ypred = np.where(ypred <= 0., 1e-5 , ypred)
ypred = | np.where(ypred >= 1., 1.-1e-5, ypred) | numpy.where |
import numpy as np
import pytest
from desdeo_mcdm.interactive import NautilusNavigator
from desdeo_tools.scalarization import PointMethodASF
@pytest.fixture()
def pareto_front():
# dummy, non-dominated discreet front
pareto_front = np.array(
[
[-1.2, 0, 2.1, 2],
[1.0, -0.99, 3.2, 2.2],
[0.7, 2.2, 1.1, 1.9],
[1.9, 2.1, 1.01, 0.5],
[-0.4, -0.3, 10.5, 12.3],
]
)
return pareto_front
@pytest.fixture()
def ideal(pareto_front):
return np.min(pareto_front, axis=0)
@pytest.fixture()
def nadir(pareto_front):
return np.max(pareto_front, axis=0)
@pytest.fixture()
def asf_problem():
fun = NautilusNavigator.solve_nautilus_asf_problem
return fun
@pytest.fixture()
def asf(ideal, nadir):
asf = PointMethodASF(nadir, ideal)
return asf
class TestRefPointProjection:
def test_no_bounds(self, asf_problem, pareto_front, ideal, nadir, asf):
"""Test the projection to the Pareto front without specifying any bounds.
"""
bounds = np.repeat(np.nan, ideal.size)
ref_points = [
[0.5, 1, 2, 3],
[1.8, 2.0, 1.05, 0.33],
[0.9, -0.88, 3.1, 2.1],
[100, 100, 100, 100],
[-100, -100, -100, -100],
[0, 0, 0, 0],
]
for ref_point in ref_points:
proj_i = asf_problem(
pareto_front, list(range(0, pareto_front.shape[0])), np.array(ref_point), ideal, nadir, bounds
)
# The projection should be the point on the Pareto front with the shortest distance to the reference point
# (metric dictated by use ASF)
should_be = np.argmin(asf(pareto_front, ref_point))
assert proj_i == should_be
def test_w_subset_i(self, asf_problem, pareto_front, ideal, nadir, asf):
"""Test the projection to a subset of the Pareto front.
"""
bounds = np.repeat(np.nan, ideal.size)
subset = np.array([1, 3, 4], dtype=int)
ref_points = [
[0.5, 1, 2, 3],
[1.8, 2.0, 1.05, 0.33],
[0.9, -0.88, 3.1, 2.1],
[100, 100, 100, 100],
[-100, -100, -100, -100],
[0, 0, 0, 0],
]
pf_mask = np.repeat(False, pareto_front.shape[0])
pf_mask[subset] = True
filtered_pf = np.copy(pareto_front)
filtered_pf[~pf_mask] = np.nan
for ref_point in ref_points:
proj_i = asf_problem(pareto_front, subset, np.array(ref_point), ideal, nadir, bounds)
# The projection should be the point on the Pareto front with the shortest distance to the reference point
# (metric dictated by use ASF)
should_be = np.nanargmin(asf(filtered_pf, ref_point))
print(should_be)
assert proj_i == should_be
def test_w_subset_i_and_bounds(self, asf_problem, pareto_front, ideal, nadir, asf):
"""Test the projection to a subset of the Pareto front.
"""
bounds = | np.array([np.nan, 1.9, np.nan, np.nan]) | numpy.array |
"""Classes for DensePose dataset.
"""
import cv2
import numpy as np
from spml.data.datasets.base_dataset import ListDataset
import spml.data.transforms as transforms
class DenseposeDataset(ListDataset):
"""Class of Densepose dataset which takes a file of paired list of
images and labels for Densepose.
"""
def __init__(self,
data_dir,
data_list,
img_mean=(0, 0, 0),
img_std=(1, 1, 1),
size=None,
random_crop=False,
random_scale=False,
random_mirror=False,
training=False):
"""Base class for Denspose Dataset.
Args:
data_dir: A string indicates root directory of images and labels.
data_list: A list of strings which indicate path of paired images
and labels. 'image_path semantic_label_path instance_label_path'.
img_mean: A list of scalars indicate the mean image value per channel.
img_std: A list of scalars indicate the std image value per channel.
size: A tuple of scalars indicate size of output image and labels.
The output resolution remain the same if `size` is None.
random_crop: enable/disable random_crop for data augmentation.
If True, adopt randomly cropping as augmentation.
random_scale: enable/disable random_scale for data augmentation.
If True, adopt adopt randomly scaling as augmentation.
random_mirror: enable/disable random_mirror for data augmentation.
If True, adopt adopt randomly mirroring as augmentation.
training: enable/disable training to set dataset for training and
testing. If True, set to training mode.
"""
super(DenseposeDataset, self).__init__(
data_dir,
data_list,
img_mean,
img_std,
size,
random_crop,
random_scale,
random_mirror,
training)
self.part_labels = {
0: 'background',
1: 'torso',
2: 'right hand',
3: 'left hand',
4: 'left foot',
5: 'right foot',
6: 'right thigh',
7: 'left thigh',
8: 'right leg',
9: 'left leg',
10: 'left arm',
11: 'right arm',
12: 'left forearm',
13: 'right forearm',
14: 'head'
}
# Remapping part labels (for horizontally flipping).
self.part_label_remap = np.arange(256, dtype=np.uint8)
self.part_label_remap[:15] = (
[0, 1, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 14])
def _training_preprocess(self, idx):
"""Data preprocessing for training.
"""
assert(self.size is not None)
image, semantic_label, instance_label = self._get_datas_by_index(idx)
label = np.stack([semantic_label, instance_label], axis=2)
# The part label should be remapped after mirroring.
if self.random_mirror:
is_flip = np.random.uniform(0, 1.0) >= 0.5
if is_flip:
image = image[:, ::-1, ...]
label = label[:, ::-1, ...]
label[..., 0] = self.part_label_remap[label[..., 0]]
if self.random_scale:
image, label = transforms.random_resize(image, label, 0.5, 1.5)
if self.random_crop:
image, label = transforms.random_crop_with_pad(
image, label, self.size, self.img_mean, 255)
semantic_label, instance_label = label[..., 0], label[..., 1]
return image, semantic_label, instance_label
class DenseposeClassifierDataset(DenseposeDataset):
def __init__(self,
data_dir,
data_list,
img_mean=(0, 0, 0),
img_std=(1, 1, 1),
size=None,
random_crop=False,
random_scale=False,
random_mirror=False,
random_grayscale=False,
random_blur=False,
training=False):
"""Class of Densepose Dataset for training softmax classifier,
where we introduce more data augmentation.
Args:
data_dir: A string indicates root directory of images and labels.
data_list: A list of strings which indicate path of paired images
and labels. 'image_path semantic_label_path instance_label_path'.
img_mean: A list of scalars indicate the mean image value per channel.
img_std: A list of scalars indicate the std image value per channel.
size: A tuple of scalars indicate size of output image and labels.
The output resolution remain the same if `size` is None.
random_crop: enable/disable random_crop for data augmentation.
If True, adopt randomly cropping as augmentation.
random_scale: enable/disable random_scale for data augmentation.
If True, adopt randomly scaling as augmentation.
random_mirror: enable/disable random_mirror for data augmentation.
If True, adopt randomly mirroring as augmentation.
random_grayscale: enable/disable random_grayscale for data augmentation.
If True, adopt randomly converting RGB to grayscale as augmentation.
random_blur: enable/disable random_blur for data augmentation.
If True, adopt randomly applying Gaussian blur as augmentation.
training: enable/disable training to set dataset for training and
testing. If True, set to training mode.
"""
super(DenseposeClassifierDataset, self).__init__(
data_dir,
data_list,
img_mean,
img_std,
size,
random_crop,
random_scale,
random_mirror,
training)
self.random_grayscale = random_grayscale
self.random_blur = random_blur
def _training_preprocess(self, idx):
"""Data preprocessing for training.
"""
assert(self.size is not None)
image, semantic_label, instance_label = self._get_datas_by_index(idx)
label = np.stack([semantic_label, instance_label], axis=2)
# The part label should be changed accordingly.
if self.random_mirror:
is_flip = np.random.uniform(0, 1.0) >= 0.5
if is_flip:
image = image[:, ::-1, ...]
label = label[:, ::-1, ...]
label[..., 0] = self.part_label_remap[label[..., 0]]
if self.random_scale:
image, label = transforms.random_resize(image, label, 0.5, 2.0)
if self.random_crop:
image, label = transforms.random_crop_with_pad(
image, label, self.size, self.img_mean, 255)
# Randomly convert RGB to grayscale.
if self.random_grayscale and np.random.uniform(0, 1.0) < 0.3:
rgb2gray = | np.array([0.3, 0.59, 0.11], dtype=np.float32) | numpy.array |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
import pytest
from _pytest.outcomes import Skipped
import os
import numpy as np
import pyuvdata.tests as uvtest
from pyuvdata import UVData, UVCal, utils as uvutils
from pyuvdata.data import DATA_PATH
from pyuvdata import UVFlag
from ..uvflag import lst_from_uv, flags2waterfall, and_rows_cols
from pyuvdata import __version__
import shutil
import copy
import warnings
import h5py
import pathlib
test_d_file = os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcAA.uvh5")
test_c_file = os.path.join(DATA_PATH, "zen.2457555.42443.HH.uvcA.omni.calfits")
test_f_file = test_d_file.rstrip(".uvh5") + ".testuvflag.h5"
pyuvdata_version_str = " Read/written with pyuvdata version: " + __version__ + "."
pytestmark = pytest.mark.filterwarnings(
"ignore:telescope_location is not set. Using known values for HERA.",
"ignore:antenna_positions is not set. Using known values for HERA.",
)
@pytest.fixture(scope="session")
def uvdata_obj_main():
uvdata_object = UVData()
uvdata_object.read(test_d_file)
yield uvdata_object
# cleanup
del uvdata_object
return
@pytest.fixture(scope="function")
def uvdata_obj(uvdata_obj_main):
uvdata_object = uvdata_obj_main.copy()
yield uvdata_object
# cleanup
del uvdata_object
return
# The following three fixtures are used regularly
# to initizize UVFlag objects from standard files
# We need to define these here in order to set up
# some skips for developers who do not have `pytest-cases` installed
@pytest.fixture(scope="function")
def uvf_from_data(uvdata_obj):
uvf = UVFlag()
uvf.from_uvdata(uvdata_obj)
# yield the object for the test
yield uvf
# do some cleanup
del (uvf, uvdata_obj)
@pytest.fixture(scope="function")
def uvf_from_uvcal():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag()
uvf.from_uvcal(uvc)
# the antenna type test file is large, so downselect to speed up
if uvf.type == "antenna":
uvf.select(antenna_nums=uvf.ant_array[:5])
# yield the object for the test
yield uvf
# do some cleanup
del (uvf, uvc)
@pytest.fixture(scope="function")
def uvf_from_waterfall(uvdata_obj):
uvf = UVFlag()
uvf.from_uvdata(uvdata_obj, waterfall=True)
# yield the object for the test
yield uvf
# do some cleanup
del uvf
# Try to import `pytest-cases` and define decorators used to
# iterate over the three main types of UVFlag objects
# otherwise make the decorators skip the tests that use these iterators
try:
pytest_cases = pytest.importorskip("pytest_cases", minversion="1.12.1")
cases_decorator = pytest_cases.parametrize(
"input_uvf",
[
pytest_cases.fixture_ref(uvf_from_data),
pytest_cases.fixture_ref(uvf_from_uvcal),
pytest_cases.fixture_ref(uvf_from_waterfall),
],
)
cases_decorator_no_waterfall = pytest_cases.parametrize(
"input_uvf",
[
pytest_cases.fixture_ref(uvf_from_data),
pytest_cases.fixture_ref(uvf_from_uvcal),
],
)
# This warning is raised by pytest_cases
# It is due to a feature the developer does
# not know how to handle yet. ignore for now.
warnings.filterwarnings(
"ignore",
message="WARNING the new order is not" + " taken into account !!",
append=True,
)
except Skipped:
cases_decorator = pytest.mark.skipif(
True, reason="pytest-cases not installed or not required version"
)
cases_decorator_no_waterfall = pytest.mark.skipif(
True, reason="pytest-cases not installed or not required version"
)
@pytest.fixture()
def test_outfile(tmp_path):
yield str(tmp_path / "outtest_uvflag.h5")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_check_flag_array(uvdata_obj):
uvf = UVFlag()
uvf.from_uvdata(uvdata_obj, mode="flag")
uvf.flag_array = np.ones((uvf.flag_array.shape), dtype=int)
with pytest.raises(
ValueError, match="UVParameter _flag_array is not the appropriate type.",
):
uvf.check()
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_bad_mode(uvdata_obj):
uv = uvdata_obj
with pytest.raises(ValueError) as cm:
UVFlag(uv, mode="bad_mode", history="I made a UVFlag object", label="test")
assert str(cm.value).startswith("Input mode must be within acceptable")
uv = UVCal()
uv.read_calfits(test_c_file)
with pytest.raises(ValueError) as cm:
UVFlag(uv, mode="bad_mode", history="I made a UVFlag object", label="test")
assert str(cm.value).startswith("Input mode must be within acceptable")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_uvdata(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
assert uvf.metric_array.shape == uv.flag_array.shape
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == uv.flag_array.shape
assert np.all(uvf.weights_array == 1)
assert uvf.type == "baseline"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == uv.time_array)
assert np.all(uvf.lst_array == uv.lst_array)
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.ant_1_array == uv.ant_1_array)
assert np.all(uvf.ant_2_array == uv.ant_2_array)
assert "I made a UVFlag object" in uvf.history
assert 'Flag object with type "baseline"' in uvf.history
assert pyuvdata_version_str in uvf.history
assert uvf.label == "test"
assert uvf.filename == uv.filename
def test_add_extra_keywords(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
uvf.extra_keywords = {"keyword1": 1, "keyword2": 2}
assert "keyword1" in uvf.extra_keywords
assert "keyword2" in uvf.extra_keywords
uvf.extra_keywords["keyword3"] = 3
assert "keyword3" in uvf.extra_keywords
assert uvf.extra_keywords.get("keyword1") == 1
assert uvf.extra_keywords.get("keyword2") == 2
assert uvf.extra_keywords.get("keyword3") == 3
def test_read_extra_keywords(uvdata_obj):
uv = uvdata_obj
uv.extra_keywords = {"keyword1": 1, "keyword2": 2}
assert "keyword1" in uv.extra_keywords
assert "keyword2" in uv.extra_keywords
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
assert "keyword1" in uvf.extra_keywords
assert "keyword2" in uvf.extra_keywords
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_uvdata_x_orientation(uvdata_obj):
uv = uvdata_obj
uv.x_orientation = "east"
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
assert uvf.x_orientation == uv.x_orientation
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_init_uvdata_copy_flags(uvdata_obj, future_shapes):
uv = uvdata_obj
if future_shapes:
uv.use_future_array_shapes()
with uvtest.check_warnings(UserWarning, 'Copying flags to type=="baseline"'):
uvf = UVFlag(uv, copy_flags=True, mode="metric")
# with copy flags uvf.metric_array should be none
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
if future_shapes:
assert np.array_equal(uvf.flag_array[:, 0, :, :], uv.flag_array)
else:
assert np.array_equal(uvf.flag_array, uv.flag_array)
assert uvf.weights_array is None
assert uvf.type == "baseline"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == uv.time_array)
assert np.all(uvf.lst_array == uv.lst_array)
if future_shapes:
assert np.all(uvf.freq_array == uv.freq_array)
else:
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.ant_1_array == uv.ant_1_array)
assert np.all(uvf.ant_2_array == uv.ant_2_array)
assert 'Flag object with type "baseline"' in uvf.history
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_uvdata_mode_flag(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag()
uvf.from_uvdata(uv, copy_flags=False, mode="flag")
# with copy flags uvf.metric_array should be none
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert np.array_equal(uvf.flag_array, uv.flag_array)
assert uvf.weights_array is None
assert uvf.type == "baseline"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == uv.time_array)
assert np.all(uvf.lst_array == uv.lst_array)
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.ant_1_array == uv.ant_1_array)
assert | np.all(uvf.ant_2_array == uv.ant_2_array) | numpy.all |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), | np.abs(data) | numpy.abs |
'''
Created on Jun 21, 2015
@author: <NAME><<EMAIL>>
'''
import argparse
from vector_representation import read_vectors_from_csv
from classfiers import NBClassifier
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('train_file', type=str)
params = parser.parse_args()
train_data = read_vectors_from_csv(params.train_file)
print("Building a model.")
classifier = NBClassifier()
classifier.train(train_data)
means_v, variance_v = classifier.get_model()
num_features = 20
index = | np.arange(num_features) | numpy.arange |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import MITgcmutils as mit
plt.ion()
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
dir0 = 'tmp_energy7/'
filets = 'diag_ocnSnap*'
filepe = 'tracer_wb*'
fileave = 'diag_ocnTave*'
flag_grid = 0
alphat = 2e-4
betas = 7.4e-4
#%==================== LOAD FIELDS ===================================
RC = mit.rdmds(dir0+'RC*')
RA = mit.rdmds(dir0+'RA*')
DRF = mit.rdmds(dir0+'DRF*')
hFacC = mit.rdmds(dir0+'hFacC*')
si_z,si_y,si_x = hFacC.shape
hFacC2 = np.where(hFacC < 1, np.NaN, 1)
RA = RA[None,:,:]
i = 1
iterst = mit.mds.scanforfiles(dir0 + filets)
itersp = mit.mds.scanforfiles(dir0 + filepe)
# t0 = mit.rdmds(dir0 + filets,iterst[i],rec=0)
# t1 = mit.rdmds(dir0 + filets,iterst[i+1],rec=0)
# s0 = mit.rdmds(dir0 + filets,iterst[i],rec=1)
# s1 = mit.rdmds(dir0 + filets,iterst[i+1],rec=1)
#w0 = mit.rdmds(dir0 + filew,iterst[i],rec=0)
#w1 = mit.rdmds(dir0 + filew,iterst[i+1],rec=0)
wav = mit.rdmds(dir0 + fileave,itersp[i],rec=4)
dtdt = mit.rdmds(dir0 + filepe,itersp[i],rec=0)
dsdt = mit.rdmds(dir0 + filepe,itersp[i],rec=1)
advrt = mit.rdmds(dir0 + filepe,itersp[i],rec=2)
advxt = mit.rdmds(dir0 + filepe,itersp[i],rec=3)
advyt = mit.rdmds(dir0 + filepe,itersp[i],rec=4)
advrs = mit.rdmds(dir0 + filepe,itersp[i],rec=5)
advxs = mit.rdmds(dir0 + filepe,itersp[i],rec=6)
advys = mit.rdmds(dir0 + filepe,itersp[i],rec=7)
wb2 = mit.rdmds(dir0 + filepe,itersp[i],rec=8)
wb = mit.rdmds(dir0 + filepe,itersp[i],rec=9)
dtdt = dtdt/86400
dsdt = dsdt/86400
# t0 = np.where(t0 == 0,np.NaN,t0)
# t1 = np.where(t1 == 0,np.NaN,t1)
ix = np.int(si_x/2)
advrt = np.append(advrt,advrt[None,0,:,:],axis=0)
advrs = | np.append(advrs,advrs[None,0,:,:],axis=0) | numpy.append |
from math import pi
from pathlib import Path
from typing import Optional, Sequence, List
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from nevermind.deepq import ValueFunctionApproximation
from nevermind.train import TrainingSummary
def save_or_show(fig: Figure, save_to_file: Optional[Path]):
if save_to_file is None:
plt.show()
else:
Path(save_to_file.parent).mkdir(exist_ok=True, parents=True)
plt.savefig(str(save_to_file))
plt.close(fig)
def plot_training_summaries(summaries: Sequence[TrainingSummary], save_to_file: Path = None):
fig, (ax_episode_reward, ax_episode_length, ax_value_loss, ax_exploration_rate, ax_buffer_size) = \
plt.subplots(nrows=5, figsize=(8, 20))
fig.suptitle('Training summary')
def plot_average(ax, lines: Sequence[List[float]]):
max_iter = np.max([len(l) for l in lines])
padded = np.array([line + ([line[-1]] * (max_iter - len(line))) for line in lines], dtype=np.float)
mean = padded.mean(0)
ax.plot(mean)
ax.fill_between(range(len(list(mean))), padded.min(0), padded.max(0), alpha=.1)
ax_episode_reward.set_ylabel('return')
ax_episode_reward.set_xlabel('episode')
plot_average(ax_episode_reward, [summary.returns for summary in summaries])
ax_episode_length.set_ylabel('episode length')
ax_episode_length.set_xlabel('episode')
plot_average(ax_episode_length, [summary.episode_lengths for summary in summaries])
ax_exploration_rate.set_ylabel('exploration')
ax_exploration_rate.set_xlabel('timestep')
for summary in summaries:
ax_exploration_rate.plot(summary.exploration_rates)
ax_value_loss.set_ylabel(f'mean {"huber" if summary.q.clip_error else "square"} loss for q')
ax_value_loss.set_xlabel('timestep')
plot_average(ax_value_loss, [summary.losses for summary in summaries])
ax_buffer_size.set_ylabel('buffer size')
ax_buffer_size.set_xlabel('timestep')
for summary in summaries:
ax_buffer_size.plot(summary.buffer_sizes)
save_or_show(fig, save_to_file)
def plot_cartpole_value_function(q: ValueFunctionApproximation, save_to_file: Path = None, show_advantage=False):
max_x = 2.4
max_xdot = 3.
max_θ = 12 * pi / 180
max_θdot = 3.
num_x = 5
num_xdot = 5
num_θ = 5
num_θdot = 5
observations = np.array([[[[[x, xdot, θ, θdot]
for x in np.linspace(-max_x, max_x, num=num_x)]
for θ in np.linspace(-max_θ, max_θ, num=num_θ)]
for xdot in np.linspace(-max_xdot, max_xdot, num=num_xdot)]
for θdot in np.linspace(-max_θdot, max_θdot, num=num_θdot)])
values = np.reshape(q.all_action_values_for(observations=np.reshape(observations, [-1, 4])),
list(observations.shape[:-1]) + [q.env.action_space.n])
fig, axes = plt.subplots(nrows=num_θdot, ncols=num_xdot * 2, sharex=True, sharey=True, figsize=(15, 10))
name = 'advantage' if show_advantage else 'value'
fig.canvas.set_window_title(f'cartpole_{name}_function')
fig.suptitle(f'Cartpole {name} function')
if show_advantage:
values -= np.repeat(np.expand_dims( | np.average(values, axis=-1) | numpy.average |
# coding: utf-8
# /*##########################################################################
# Copyright (C) 2016-2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""This module provides functions to read fabio images as an HDF5 file.
>>> import silx.io.fabioh5
>>> f = silx.io.fabioh5.File("foobar.edf")
.. note:: This module has a dependency on the `h5py <http://www.h5py.org/>`_
and `fabio <https://github.com/silx-kit/fabio>`_ libraries,
which are not a mandatory dependencies for `silx`. You might need
to install it if you don't already have it.
"""
import collections
import numpy
import numbers
import logging
_logger = logging.getLogger(__name__)
try:
from silx.third_party import six
except ImportError:
import six
try:
import fabio
except ImportError as e:
_logger.error("Module %s requires fabio", __name__)
raise e
try:
import h5py
except ImportError as e:
_logger.error("Module %s requires h5py", __name__)
raise e
class Node(object):
"""Main class for all fabioh5 classes. Help to manage a tree."""
def __init__(self, name, parent=None):
self.__parent = parent
self.__basename = name
@property
def h5py_class(self):
"""Returns the h5py classes which is mimicked by this class. It can be
one of `h5py.File, h5py.Group` or `h5py.Dataset`
:rtype: Class
"""
raise NotImplementedError()
@property
def parent(self):
"""Returns the parent of the node.
:rtype: Node
"""
return self.__parent
@property
def file(self):
"""Returns the file node of this node.
:rtype: Node
"""
node = self
while node.__parent is not None:
node = node.__parent
if isinstance(node, File):
return node
else:
return None
def _set_parent(self, parent):
"""Set the parent of this node.
It do not update the parent object.
:param Node parent: New parent for this node
"""
self.__parent = parent
@property
def attrs(self):
"""Returns HDF5 attributes of this node.
:rtype: dict
"""
return {}
@property
def name(self):
"""Returns the HDF5 name of this node.
"""
if self.__parent is None:
return "/"
if self.__parent.name == "/":
return "/" + self.basename
return self.__parent.name + "/" + self.basename
@property
def basename(self):
"""Returns the HDF5 basename of this node.
"""
return self.__basename
class Dataset(Node):
"""Class which handle a numpy data as a mimic of a h5py.Dataset.
"""
def __init__(self, name, data, parent=None, attrs=None):
self.__data = data
Node.__init__(self, name, parent)
if attrs is None:
self.__attrs = {}
else:
self.__attrs = attrs
def _set_data(self, data):
"""Set the data exposed by the dataset.
It have to be called only one time before the data is used. It should
not be edited after use.
:param numpy.ndarray data: Data associated to the dataset
"""
self.__data = data
def _get_data(self):
"""Returns the exposed data
:rtype: numpy.ndarray
"""
return self.__data
@property
def attrs(self):
"""Returns HDF5 attributes of this node.
:rtype: dict
"""
return self.__attrs
@property
def h5py_class(self):
"""Returns the h5py classes which is mimicked by this class. It can be
one of `h5py.File, h5py.Group` or `h5py.Dataset`
:rtype: Class
"""
return h5py.Dataset
@property
def dtype(self):
"""Returns the numpy datatype exposed by this dataset.
:rtype: numpy.dtype
"""
return self._get_data().dtype
@property
def shape(self):
"""Returns the shape of the data exposed by this dataset.
:rtype: tuple
"""
if isinstance(self._get_data(), numpy.ndarray):
return self._get_data().shape
else:
return tuple()
@property
def size(self):
"""Returns the size of the data exposed by this dataset.
:rtype: int
"""
if isinstance(self._get_data(), numpy.ndarray):
return self._get_data().size
else:
# It is returned as float64 1.0 by h5py
return numpy.float64(1.0)
def __len__(self):
"""Returns the size of the data exposed by this dataset.
:rtype: int
"""
if isinstance(self._get_data(), numpy.ndarray):
return len(self._get_data())
else:
# It is returned as float64 1.0 by h5py
raise TypeError("Attempt to take len() of scalar dataset")
def __getitem__(self, item):
"""Returns the slice of the data exposed by this dataset.
:rtype: numpy.ndarray
"""
if not isinstance(self._get_data(), numpy.ndarray):
if item == Ellipsis:
return numpy.array(self._get_data())
elif item == tuple():
return self._get_data()
else:
raise ValueError("Scalar can only be reached with an ellipsis or an empty tuple")
return self._get_data().__getitem__(item)
def __str__(self):
basename = self.name.split("/")[-1]
return '<FabIO dataset "%s": shape %s, type "%s">' % \
(basename, self.shape, self.dtype.str)
def __getslice__(self, i, j):
"""Returns the slice of the data exposed by this dataset.
Deprecated but still in use for python 2.7
:rtype: numpy.ndarray
"""
return self.__getitem__(slice(i, j, None))
@property
def value(self):
"""Returns the data exposed by this dataset.
Deprecated by h5py. It is prefered to use indexing `[()]`.
:rtype: numpy.ndarray
"""
return self._get_data()
@property
def compression(self):
"""Returns compression as provided by `h5py.Dataset`.
There is no compression."""
return None
@property
def compression_opts(self):
"""Returns compression options as provided by `h5py.Dataset`.
There is no compression."""
return None
@property
def chunks(self):
"""Returns chunks as provided by `h5py.Dataset`.
There is no chunks."""
return None
class LazyLoadableDataset(Dataset):
"""Abstract dataset which provide a lazy loading of the data.
The class have to be inherited and the :meth:`_create_data` have to be
implemented to return the numpy data exposed by the dataset. This factory
is only called ones, when the data is needed.
"""
def __init__(self, name, parent=None, attrs=None):
super(LazyLoadableDataset, self).__init__(name, None, parent, attrs=attrs)
self.__is_initialized = False
def _create_data(self):
"""
Factory to create the data exposed by the dataset when it is needed.
It have to be implemented to work.
:rtype: numpy.ndarray
"""
raise NotImplementedError()
def _get_data(self):
"""Returns the data exposed by the dataset.
Overwrite Dataset method :meth:`_get_data` to implement the lazy
loading feature.
:rtype: numpy.ndarray
"""
if not self.__is_initialized:
data = self._create_data()
self._set_data(data)
self.__is_initialized = True
return super(LazyLoadableDataset, self)._get_data()
class Group(Node):
"""Class which mimic a `h5py.Group`."""
def __init__(self, name, parent=None, attrs=None):
Node.__init__(self, name, parent)
self.__items = collections.OrderedDict()
if attrs is None:
attrs = {}
self.__attrs = attrs
def _get_items(self):
"""Returns the child items as a name-node dictionary.
:rtype: dict
"""
return self.__items
def add_node(self, node):
"""Add a child to this group.
:param Node node: Child to add to this group
"""
self._get_items()[node.basename] = node
node._set_parent(self)
@property
def h5py_class(self):
"""Returns the h5py classes which is mimicked by this class.
It returns `h5py.Group`
:rtype: Class
"""
return h5py.Group
@property
def attrs(self):
"""Returns HDF5 attributes of this node.
:rtype: dict
"""
return self.__attrs
def items(self):
"""Returns items iterator containing name-node mapping.
:rtype: iterator
"""
return self._get_items().items()
def get(self, name, default=None, getclass=False, getlink=False):
""" Retrieve an item or other information.
If getlink only is true, the returned value is always HardLink
cause this implementation do not use links. Like the original
implementation.
:param str name: name of the item
:param object default: default value returned if the name is not found
:param bool getclass: if true, the returned object is the class of the object found
:param bool getlink: if true, links object are returned instead of the target
:return: An object, else None
:rtype: object
"""
if name not in self._get_items():
return default
if getlink:
node = h5py.HardLink()
else:
node = self._get_items()[name]
if getclass:
obj = node.h5py_class
else:
obj = node
return obj
def __len__(self):
"""Returns the number of child contained in this group.
:rtype: int
"""
return len(self._get_items())
def __iter__(self):
"""Iterate over member names"""
for x in self._get_items().__iter__():
yield x
def __getitem__(self, name):
"""Return a child from is name.
:param name str: name of a member or a path throug members using '/'
separator. A '/' as a prefix access to the root item of the tree.
:rtype: Node
"""
if name is None or name == "":
raise ValueError("No name")
if "/" not in name:
return self._get_items()[name]
if name.startswith("/"):
root = self
while root.parent is not None:
root = root.parent
if name == "/":
return root
return root[name[1:]]
path = name.split("/")
result = self
for item_name in path:
if not isinstance(result, Group):
raise KeyError("Unable to open object (Component not found)")
result = result._get_items()[item_name]
return result
def __contains__(self, name):
"""Returns true is a name is an existing child of this group.
:rtype: bool
"""
return name in self._get_items()
def keys(self):
return self._get_items().keys()
class LazyLoadableGroup(Group):
"""Abstract group which provide a lazy loading of the child.
The class have to be inherited and the :meth:`_create_child` have to be
implemented to add (:meth:`_add_node`) all child. This factory
is only called ones, when child are needed.
"""
def __init__(self, name, parent=None, attrs=None):
Group.__init__(self, name, parent, attrs)
self.__is_initialized = False
def _get_items(self):
"""Returns internal structure which contains child.
It overwrite method :meth:`_get_items` to implement the lazy
loading feature.
:rtype: dict
"""
if not self.__is_initialized:
self.__is_initialized = True
self._create_child()
return Group._get_items(self)
def _create_child(self):
"""
Factory to create the child contained by the group when it is needed.
It have to be implemented to work.
"""
raise NotImplementedError()
class FrameData(LazyLoadableDataset):
"""Expose a cube of image from a Fabio file using `FabioReader` as
cache."""
def __init__(self, name, fabio_reader, parent=None):
attrs = {"interpretation": "image"}
LazyLoadableDataset.__init__(self, name, parent, attrs=attrs)
self.__fabio_reader = fabio_reader
def _create_data(self):
return self.__fabio_reader.get_data()
class RawHeaderData(LazyLoadableDataset):
"""Lazy loadable raw header"""
def __init__(self, name, fabio_file, parent=None):
LazyLoadableDataset.__init__(self, name, parent)
self.__fabio_file = fabio_file
def _create_data(self):
"""Initialize hold data by merging all headers of each frames.
"""
headers = []
for frame in range(self.__fabio_file.nframes):
if self.__fabio_file.nframes == 1:
header = self.__fabio_file.header
else:
header = self.__fabio_file.getframe(frame).header
data = []
for key, value in header.items():
data.append("%s: %s" % (str(key), str(value)))
headers.append(u"\n".join(data))
# create the header list
return numpy.array(headers)
class MetadataGroup(LazyLoadableGroup):
"""Abstract class for groups containing a reference to a fabio image.
"""
def __init__(self, name, metadata_reader, kind, parent=None, attrs=None):
LazyLoadableGroup.__init__(self, name, parent, attrs)
self.__metadata_reader = metadata_reader
self.__kind = kind
def _create_child(self):
keys = self.__metadata_reader.get_keys(self.__kind)
for name in keys:
data = self.__metadata_reader.get_value(self.__kind, name)
dataset = Dataset(name, data)
self.add_node(dataset)
@property
def _metadata_reader(self):
return self.__metadata_reader
class DetectorGroup(LazyLoadableGroup):
"""Define the detector group (sub group of instrument) using Fabio data.
"""
def __init__(self, name, fabio_reader, parent=None, attrs=None):
if attrs is None:
attrs = {"NX_class": "NXdetector"}
LazyLoadableGroup.__init__(self, name, parent, attrs)
self.__fabio_reader = fabio_reader
def _create_child(self):
data = FrameData("data", self.__fabio_reader)
self.add_node(data)
# TODO we should add here Nexus informations we can extract from the
# metadata
others = MetadataGroup("others", self.__fabio_reader, kind=FabioReader.DEFAULT)
self.add_node(others)
class ImageGroup(LazyLoadableGroup):
"""Define the image group (sub group of measurement) using Fabio data.
"""
def __init__(self, name, fabio_reader, parent=None, attrs=None):
LazyLoadableGroup.__init__(self, name, parent, attrs)
self.__fabio_reader = fabio_reader
def _create_child(self):
data = FrameData("data", self.__fabio_reader)
self.add_node(data)
# TODO detector should be a real soft-link
detector = DetectorGroup("info", self.__fabio_reader)
self.add_node(detector)
class MeasurementGroup(LazyLoadableGroup):
"""Define the measurement group for fabio file.
"""
def __init__(self, name, fabio_reader, parent=None, attrs=None):
LazyLoadableGroup.__init__(self, name, parent, attrs)
self.__fabio_reader = fabio_reader
def _create_child(self):
keys = self.__fabio_reader.get_keys(FabioReader.COUNTER)
# create image measurement but take care that no other metadata use
# this name
for i in range(1000):
name = "image_%i" % i
if name not in keys:
data = ImageGroup(name, self.__fabio_reader)
self.add_node(data)
break
else:
raise Exception("image_i for 0..1000 already used")
# add all counters
for name in keys:
data = self.__fabio_reader.get_value(FabioReader.COUNTER, name)
dataset = Dataset(name, data)
self.add_node(dataset)
class FabioReader(object):
"""Class which read and cache data and metadata from a fabio image."""
DEFAULT = 0
COUNTER = 1
POSITIONER = 2
def __init__(self, fabio_file):
self.__fabio_file = fabio_file
self.__counters = {}
self.__positioners = {}
self.__measurements = {}
self.__data = None
self.__frame_count = self.__fabio_file.nframes
self._read(self.__fabio_file)
def _create_data(self):
"""Initialize hold data by merging all frames into a single cube.
Choose the cube size which fit the best the data. If some images are
smaller than expected, the empty space is set to 0.
The computation is cached into the class, and only done ones.
"""
images = []
for frame in range(self.__fabio_file.nframes):
if self.__fabio_file.nframes == 1:
image = self.__fabio_file.data
else:
image = self.__fabio_file.getframe(frame).data
images.append(image)
# get the max size
max_shape = [0, 0]
for image in images:
if image.shape[0] > max_shape[0]:
max_shape[0] = image.shape[0]
if image.shape[1] > max_shape[1]:
max_shape[1] = image.shape[1]
max_shape = tuple(max_shape)
# fix smallest images
for index, image in enumerate(images):
if image.shape == max_shape:
continue
right_image = numpy.zeros(max_shape)
right_image[0:image.shape[0], 0:image.shape[1]] = image
images[index] = right_image
# create a cube
return numpy.array(images)
def __get_dict(self, kind):
"""Returns a dictionary from according to an expected kind"""
if kind == self.DEFAULT:
return self.__measurements
elif kind == self.COUNTER:
return self.__counters
elif kind == self.POSITIONER:
return self.__positioners
else:
raise Exception("Unexpected kind %s", kind)
def get_data(self):
"""Returns a cube from all available data from frames
:rtype: numpy.ndarray
"""
if self.__data is None:
self.__data = self._create_data()
return self.__data
def get_keys(self, kind):
"""Get all available keys according to a kind of metadata.
:rtype: list
"""
return self.__get_dict(kind).keys()
def get_value(self, kind, name):
"""Get a metadata value according to the kind and the name.
:rtype: numpy.ndarray
"""
value = self.__get_dict(kind)[name]
if not isinstance(value, numpy.ndarray):
value = self._convert_metadata_vector(value)
self.__get_dict(kind)[name] = value
return value
def _set_counter_value(self, frame_id, name, value):
"""Set a counter metadata according to the frame id"""
if name not in self.__counters:
self.__counters[name] = [None] * self.__frame_count
self.__counters[name][frame_id] = value
def _set_positioner_value(self, frame_id, name, value):
"""Set a positioner metadata according to the frame id"""
if name not in self.__positioners:
self.__positioners[name] = [None] * self.__frame_count
self.__positioners[name][frame_id] = value
def _set_measurement_value(self, frame_id, name, value):
"""Set a measurement metadata according to the frame id"""
if name not in self.__measurements:
self.__measurements[name] = [None] * self.__frame_count
self.__measurements[name][frame_id] = value
def _read(self, fabio_file):
"""Read all metadata from the fabio file and store it into this
object."""
for frame in range(fabio_file.nframes):
if fabio_file.nframes == 1:
header = fabio_file.header
else:
header = fabio_file.getframe(frame).header
self._read_frame(frame, header)
def _read_frame(self, frame_id, header):
"""Read all metadata from a frame and store it into this
object."""
for key, value in header.items():
self._read_key(frame_id, key, value)
def _read_key(self, frame_id, name, value):
"""Read a key from the metadata and cache it into this object."""
self._set_measurement_value(frame_id, name, value)
def _convert_metadata_vector(self, values):
"""Convert a list of numpy data into a numpy array with the better
fitting type."""
converted = []
types = set([])
has_none = False
for v in values:
if v is None:
converted.append(None)
has_none = True
else:
c = self._convert_value(v)
converted.append(c)
types.add(c.dtype)
if has_none and len(types) == 0:
# That's a list of none values
return numpy.array([0] * len(values), numpy.int8)
result_type = numpy.result_type(*types)
if issubclass(result_type.type, numpy.string_):
# use the raw data to create the array
result = values
elif issubclass(result_type.type, numpy.unicode_):
# use the raw data to create the array
result = values
else:
result = converted
if has_none:
# Fix missing data according to the array type
if result_type.kind in ["S", "U"]:
none_value = ""
elif result_type.kind == "f":
none_value = numpy.float("NaN")
elif result_type.kind == "i":
none_value = | numpy.int(0) | numpy.int |
from pprint import pprint
from imgaug import augmenters as iaa
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from script.data_handler.Base.BaseDataset import BaseDataset
from script.data_handler.ImgMaskAug import ActivatorMask, ImgMaskAug
from script.data_handler.TGS_salt import collect_images, TRAIN_MASK_PATH, TGS_salt, \
TRAIN_IMAGE_PATH, TEST_IMAGE_PATH, RLE_mask_encoding, make_submission_csv
from script.model.sklearn_like_model.BaseModel import BaseDatasetCallback
from script.model.sklearn_like_model.TFSummary import TFSummaryParams
from script.util.PlotTools import PlotTools
from script.util.misc_util import path_join, lazy_property, load_pickle
from script.util.numpy_utils import *
import tensorflow as tf
from script.workbench.TGS_salt.post_process_AE import post_process_AE
class Metrics:
@staticmethod
def miou(trues, predicts):
return np.mean(Metrics.iou_vector(trues, predicts))
@staticmethod
def iou_vector(trues, predicts):
return [
Metrics.iou(gt, predict)
for gt, predict in zip(trues, predicts)
]
@staticmethod
def iou(true, predict):
true = true.astype(np.int32)
predict = predict.astype(np.int32)
# zero rate mask will include, 1
intersection = np.logical_and(true, predict)
union = np.logical_or(true, predict)
iou = (np.sum(intersection > 0) + 1e-10) / (np.sum(union > 0) + 1e-10)
return iou
@staticmethod
def TGS_salt_score(mask_true, mask_predict):
def _metric(mask_true, mask_predict):
iou_score = Metrics.iou(mask_true, mask_predict)
threshold = np.arange(0.5, 1, 0.05)
score = np.sum(threshold <= iou_score) / 10.0
return score
if mask_true.shape != mask_predict.shape:
raise ValueError(f'mask shape does not match, true={mask_true.shape}, predict={mask_predict}')
if mask_true.ndim in (3, 4):
ret = np.mean([_metric(m_true, m_predict) for m_true, m_predict in zip(mask_true, mask_predict)])
else:
ret = _metric(mask_true, mask_predict)
return ret
@staticmethod
def miou_non_empty(true, predict):
non_empty = np.mean(true, axis=(1, 2, 3))
idx = non_empty > 0
return Metrics.miou(true[idx], predict[idx])
@staticmethod
def TGS_salt_score_non_empty(true, predict):
non_empty = np.mean(true, axis=(1, 2, 3))
idx = non_empty > 0
return Metrics.TGS_salt_score(true[idx], predict[idx])
def masks_rate(masks):
size = masks.shape[0]
mask = masks.reshape([size, -1])
return np.mean(mask, axis=1)
def save_tf_summary_params(path, params):
with tf.Session() as sess:
run_id = params['run_id']
path = path_join(path, run_id)
summary_params = TFSummaryParams(path, 'params')
summary_params.update(sess, params)
summary_params.flush()
summary_params.close()
print(f'TFSummaryParams save at {path}')
def is_empty_mask(mask):
return np.mean(mask) == 0
def depth_to_image(depths):
# normalize
max_val = np.max(depths)
min_val = np.min(depths)
depths = (depths - min_val) / (max_val - min_val)
# gen depth images
base = [
np.ones([1, 101, 101]) * depth * 255
for depth in depths
]
base = np.concatenate(base, axis=0)
base = base.astype(np.uint8)
return base
class TGS_salt_DataHelper:
def __init__(self, data_pack_path='./data/TGS_salt', sample_offset=10, sample_size=10):
self.data_pack_path = data_pack_path
self.sample_offset = sample_offset
self.sample_size = sample_size
self._data_pack = None
self._train_set = None
self._test_set = None
self._sample_xs = None
self._sample_ys = None
self._train_set_non_empty_mask = None
self._train_set_empty_mask = None
self._train_depth_image = None
@lazy_property
def data_pack(self):
self._data_pack = TGS_salt()
self._data_pack.load(self.data_pack_path)
return self._data_pack
@lazy_property
def train_set(self):
return self.data_pack['train']
@lazy_property
def test_set(self):
return self.data_pack['test']
@lazy_property
def sample_xs(self):
x_full, _ = self.train_set.full_batch()
sample_x = x_full[self.sample_offset:self.sample_offset + self.sample_size]
return sample_x
@lazy_property
def sample_ys(self):
_, ys_full = self.train_set.full_batch()
self._sample_ys = ys_full[self.sample_offset:self.sample_offset + self.sample_size]
return self._sample_ys
@staticmethod
def get_non_empty_mask_idxs(dataset):
ys = dataset.full_batch(['mask'])['mask']
idxs = [
i
for i, y in enumerate(ys)
if not is_empty_mask(y)
]
return idxs
def get_non_empty_mask(self, dataset):
idxs = self.get_non_empty_mask_idxs(dataset)
return dataset.query_by_idxs(idxs)
@staticmethod
def get_empty_mask_idxs(dataset):
ys = dataset.full_batch(['mask'])['mask']
idxs = [
i
for i, y in enumerate(ys)
if is_empty_mask(y)
]
return idxs
def get_empty_mask(self, dataset):
idxs = self.get_empty_mask_idxs(dataset)
return dataset.query_by_idxs(idxs)
@staticmethod
def add_depth_image_channel(dataset):
np_dict = dataset.full_batch(['image', 'depth_image'])
x = np_dict['image']
depth_image = np_dict['depth_image']
x_with_depth = np.concatenate((x, depth_image), axis=3)
dataset.add_data('x_with_depth', x_with_depth)
return dataset
@staticmethod
def mask_rate_under_n_percent(dataset, n):
mask_rate = dataset.full_batch(['mask_rate'])['mask_rate']
idx = mask_rate < n
return dataset.query_by_idxs(idx)
@staticmethod
def mask_rate_upper_n_percent(dataset, n):
mask_rate = dataset.full_batch(['mask_rate'])['mask_rate']
idx = mask_rate > n
return dataset.query_by_idxs(idx)
@staticmethod
def lr_flip(dataset, x_key='image', y_key='mask'):
flip_lr_set = dataset.copy()
x, y = flip_lr_set.full_batch()
x = np.fliplr(x)
flip_lr_set.data[x_key] = x
y = np.fliplr(y)
flip_lr_set.data[y_key] = y
dataset = dataset.merge(dataset, flip_lr_set)
return dataset
@staticmethod
def split_hold_out(dataset, random_state=1234, ratio=(9, 1)):
return dataset.split(ratio, shuffle=False, random_state=random_state)
@staticmethod
def k_fold_split(dataset, k=5, shuffle=False, random_state=1234):
return dataset.k_fold_split(k, shuffle=shuffle, random_state=random_state)
@staticmethod
def crop_dataset(dataset, size=(64, 64), k=30, with_edge=True):
xs, ys = dataset.full_batch()
w, h = size
new_x = []
new_y = []
size = len(xs)
# edge
if with_edge:
for i in range(size):
x = xs[i]
y = ys[i]
new_x += [x[:w, :h, :].reshape([1, h, w, 1])]
new_y += [y[:w, :h, :].reshape([1, h, w, 1])]
new_x += [x[101 - w:101, :h, :].reshape([1, h, w, 1])]
new_y += [y[101 - w:101, :h, :].reshape([1, h, w, 1])]
new_x += [x[:w, 101 - h:101, :].reshape([1, h, w, 1])]
new_y += [y[:w, 101 - h:101, :].reshape([1, h, w, 1])]
new_x += [x[101 - w:101, 101 - h:101, :].reshape([1, h, w, 1])]
new_y += [y[101 - w:101, 101 - h:101, :].reshape([1, h, w, 1])]
# non_edge
for i in range(size):
for _ in range(k):
x = xs[i]
y = ys[i]
a = np.random.randint(1, 101 - 64 - 1)
b = np.random.randint(1, 101 - 64 - 1)
new_x += [x[a:a + w, b:b + h, :].reshape([1, h, w, 1])]
new_y += [y[a:a + w, b:b + h, :].reshape([1, h, w, 1])]
new_x = np.concatenate(new_x)
new_y = np.concatenate(new_y)
print(new_x.shape)
print(new_y.shape)
return BaseDataset(x=new_x, y=new_y)
@staticmethod
def crop_dataset_stride(dataset, size=(64, 64), stride=10, with_edge=True):
xs, ys = dataset.full_batch()
w, h = size
new_x = []
new_y = []
size = len(xs)
# edge
if with_edge:
for i in range(size):
x = xs[i]
y = ys[i]
new_x += [x[:w, :h, :].reshape([1, h, w, 1])]
new_y += [y[:w, :h, :].reshape([1, h, w, 1])]
new_x += [x[101 - w:101, :h, :].reshape([1, h, w, 1])]
new_y += [y[101 - w:101, :h, :].reshape([1, h, w, 1])]
new_x += [x[:w, 101 - h:101, :].reshape([1, h, w, 1])]
new_y += [y[:w, 101 - h:101, :].reshape([1, h, w, 1])]
new_x += [x[101 - w:101, 101 - h:101, :].reshape([1, h, w, 1])]
new_y += [y[101 - w:101, 101 - h:101, :].reshape([1, h, w, 1])]
# non_edge
for i in range(size):
for a in range(0, 101 - 64, stride):
for b in range(0, 101 - 64, stride):
x = xs[i]
y = ys[i]
new_x += [x[a:a + w, b:b + h, :].reshape([1, h, w, 1])]
new_y += [y[a:a + w, b:b + h, :].reshape([1, h, w, 1])]
new_x = np.concatenate(new_x)
new_y = np.concatenate(new_y)
print(new_x.shape)
print(new_y.shape)
return BaseDataset(x=new_x, y=new_y)
class TGS_salt_aug_callback(BaseDatasetCallback):
def __init__(self, x, y, batch_size, n_job=2, q_size=100):
super().__init__(x, y, batch_size)
self.seq = iaa.Sequential([
# iaa.OneOf([
# iaa.PiecewiseAffine((0.002, 0.1), name='PiecewiseAffine'),
# iaa.Affine(rotate=(-20, 20)),
# iaa.Affine(shear=(-45, 45)),
# iaa.Affine(translate_percent=(0, 0.3), mode='symmetric'),
# iaa.Affine(translate_percent=(0, 0.3), mode='wrap'),
# # iaa.PerspectiveTransform((0.0, 0.3))
# ], name='affine'),
iaa.Fliplr(0.5, name="horizontal flip"),
# iaa.Crop(percent=(0, 0.3), name='crop'),
# image only
# iaa.OneOf([
# iaa.Add((-45, 45), name='bright'),
# iaa.Multiply((0.5, 1.5), name='contrast')]
# ),
# iaa.OneOf([
# iaa.AverageBlur((1, 5), name='AverageBlur'),
# # iaa.BilateralBlur(),
# iaa.GaussianBlur((0.1, 2), name='GaussianBlur'),
# # iaa.MedianBlur((1, 7), name='MedianBlur'),
# ], name='blur'),
# scale to 128 * 128
# iaa.Scale((128, 128), name='to 128 * 128'),
])
self.activator = ActivatorMask(['bright', 'contrast', 'AverageBlur', 'GaussianBlur', 'MedianBlur'])
self.aug = ImgMaskAug(self.x, self.y, self.seq, self.activator, self.batch_size, n_jobs=n_job, q_size=q_size)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return self.__class__.__name__
@property
def size(self):
return len(self.x)
def shuffle(self):
pass
def next_batch(self, batch_size, batch_keys=None, update_cursor=True, balanced_class=False, out_type='concat'):
x, y = self.aug.get_batch()
# try:
# plot.plot_image_tile(np.concatenate([x, y]), title='aug')
# except BaseException:
# pass
return x[:batch_size], y[:batch_size]
class data_helper:
@staticmethod
def is_empty_mask(mask):
return np.mean(mask) == 0
@staticmethod
def is_white_image(image):
if np.mean(image) == 255:
return True
else:
return False
@staticmethod
def is_black_image(image):
if | np.mean(image) | numpy.mean |
# Digital Signal Processing - Lab 1 - Part 4 (BONUS)
# <NAME> - 03117037
# <NAME> - 03117165
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import librosa
import sounddevice as sd
plt.close('all')
counter = 0
# Part 4 (Bonus)
#4.1 Open .wav file of salsa music signal 1
salsa1, fs = librosa.load('salsa_excerpt1.mp3')
sd.play(salsa1, fs) #kommatara :)
Ts = 1/fs # fs = 22050Hz sampling frequency
segment = salsa1[10000:75536] #segment of 2^16=65536 samples
t = np.arange(0,np.size(segment)*Ts, Ts) #time index
counter = counter+1
plt.figure(counter)
plt.plot(t,segment, 'b', label = 'Samples L=2^16')
plt.xlabel('Time [sec]')
plt.ylabel('Amplitude')
plt.title('Segment of "salsa_excerpt1.mp3"')
plt.legend()
#4.2 Discrete Wavelet Transform
from pywt import wavedec
coeffs = wavedec(segment, 'db1', level=7)/np.sqrt(2)
ya7, yd7, yd6, yd5, yd4, yd3, yd2, yd1 = coeffs
#4.3 Envelope Detection
#(a) Absolute Value
absolutes = np.abs(coeffs)
za7 = absolutes[0]
zd7 = absolutes[1]
zd6 = absolutes[2]
zd5 = absolutes[3]
zd4 = absolutes[4]
zd3 = absolutes[5]
zd2 = absolutes[6]
zd1 = absolutes[7]
#(b) Lowpass Filter
a0 = 0.006
a = np.zeros(7)
for i in range(1,8):
a[i-1] = a0*(2**(i+1))
def envelope(signal, absolute, a):
x = np.zeros(np.size(signal))
x[0] = a*absolute[0]
for i in range(1,np.size(x)):
x[i] = (1-a)*x[i-1] + a*absolute[i]
x = x - np.mean(x)
return x
xa7 = envelope(ya7, za7, a[6])
xd7 = envelope(yd7, zd7, a[6])
xd6 = envelope(yd6, zd6, a[5])
xd5 = envelope(yd5, zd5, a[4])
xd4 = envelope(yd4, zd4, a[3])
xd3 = envelope(yd3, zd3, a[2])
xd2 = envelope(yd2, zd2, a[1])
xd1 = envelope(yd1, zd1, a[0])
n = np.arange(0,np.size(yd3),1) #number of samples
counter=counter+1
plt.figure(counter)
plt.plot(n, yd3, 'b', label = 'Detal yd3[n]')
plt.plot(n, xd3, 'r', label = 'Envelope xd3[n]')
plt.xlabel('Samples (2^13 = 8192)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd3')
plt.show()
plt.legend()
counter=counter+1
plt.figure(counter)
n = np.arange(0,np.size(yd6),1) #number of samples
plt.plot(n, yd6, 'b', label = 'Detail yd6[n]')
plt.plot(n, xd6, 'r', label = 'Envelope xd6[n]')
plt.xlabel('Samples (2^10 = 1024)')
plt.ylabel('Amplitude')
plt.title('Envelope Detection of Detail yd6')
plt.show()
plt.legend()
#4.4 Sum of Envelopes and Autocorrelation
nvalues = np.arange(0, 32768, 1)
n = np.arange(0, 32768, 1)
xd1 = np.interp(nvalues, n, xd1)
n = np.arange(0, 16384, 1)
xd2 = np.interp(nvalues, n, xd2)
n = | np.arange(0, 8192, 1) | numpy.arange |
import pandas as pd
import numpy as np
import os
# Generate the risk distribution parameters from the risk_distribution.py script
from risk_distribution import *
# Import parameters from parameters.py script
from parameters import *
# Set path for saving dataframes
base_path = '...'
sims = 10000
# Functions to return probabilistic variables in suitable format
def gamma(alpha, beta):
alpha = np.array([alpha] * sims)
beta = np.array([beta] * sims)
samples = np.random.gamma(alpha, beta)
return samples
def gamma_specified(min, multiplier, alpha, beta):
min = np.array([min] * sims).T
alpha = np.array([alpha] * sims)
beta = np.array([beta] * sims)
samples = min + np.random.gamma(alpha, beta) * multiplier
samples = samples.T
return samples
def normal(parameter, sd):
samples = np.random.normal(parameter, sd, sims)
samples = np.array([samples] * 45).T
return samples
def lognormal(parameter, sd):
samples = np.random.lognormal(parameter, sd, sims)
samples = np.array([samples] * 45).T
return samples
def beta(parameter, se):
alpha = np.array([parameter * ((parameter*(1-parameter))/(se**2)-1)] * sims)
beta = (alpha/parameter) - alpha
samples = np.random.beta(alpha, beta)
samples = samples.T
return samples
# Function to deliver PSA simulation matrix for variables not being varied
def psa_function(var):
return np.array([var] * sims)
# Function to generate outcomes
def outcomes(parameter):
# Simulations - one total value per simulation
sims = np.sum(parameter, axis=1)
# Mean value across all simulations
mean = np.mean(parameter, axis=0)
# Total value (mean and sum across all simulations)
total = np.sum(mean)
return sims, mean, total
##############
# Parameters #
##############
# Costs
cost_psa = gamma(33.9,0.3)
cost_psa = np.tile(cost_psa, (45,1)).T # Extend cost_psa to be a matrix of length 45 x sims
cost_prs = gamma(33.9,0.7)
cost_biopsy = gamma(33.9,11.5)
cost_biopsy = np.tile(cost_biopsy, (45,1)).T
cost_refuse_biopsy = gamma(33.9,3.1)
cost_refuse_biopsy = np.tile(cost_refuse_biopsy, (45,1)).T
cost_assessment = gamma(33.9,22.7)
cost_as = gamma(33.9,128.1)
cost_rp = gamma(33.9,241.2)
cost_rt = gamma(33.9,158.9)
cost_brachytherapy = gamma(33.9,45.1)
cost_adt = gamma(33.9,16.5)
cost_chemo = gamma(33.9,219.2)
cost_rt_chemo = cost_rt + cost_chemo
cost_rp_rt = cost_rp + cost_rt
cost_rp_chemo = cost_rp + cost_chemo
cost_rp_rt_chemo = cost_rp + cost_rt + cost_chemo
costs_local = np.stack((cost_chemo, cost_rp,
cost_rt, cost_rt_chemo,
cost_rp_chemo, cost_rp_rt,
cost_rp_rt_chemo, cost_as,
cost_adt, cost_brachytherapy), axis=-1)
costs_adv = np.array(costs_local, copy=True)
# Incident costs / treatment dataframe
tx_costs_local = costs_local * tx_local
tx_costs_adv = costs_adv * tx_adv
pca_death_costs = gamma(1.8,3854.9)
# Utilities
pca_incidence_utility_psa = gamma_specified((pca_incidence_utility-0.05), 0.2, 5, 0.05)
utility_background_psa = gamma_specified((utility_background-0.03), 0.167, 4, 0.06)
# Relative risk of death in screened cohort
rr_death_screening = lognormal(-0.2357, 0.0724)
# Proportion of cancers at risk of overdiagnosis
p_overdiagnosis_psa = beta(p_overdiagnosis, 0.001)
additional_years = psa_function(np.repeat(0,20))
p_overdiagnosis_psa = np.concatenate((p_overdiagnosis_psa, additional_years.T))
p_overdiagnosis_psa[0:10,:] = 0
# Relative risk incidence of advanced cancer (stages III and IV)
rr_adv_screening = lognormal(-0.1625, 0.0829)
rr_adv_screening[:,0:10] = 0
rr_adv_screening[:,25:] = 0
# The relative increase in cancers detected if screened
p_increase_df = pd.read_csv('data/p_increase_df.csv', index_col='age')
[RR_INCIDENCE_SC_55, RR_INCIDENCE_SC_56,
RR_INCIDENCE_SC_57, RR_INCIDENCE_SC_58,
RR_INCIDENCE_SC_59, RR_INCIDENCE_SC_60,
RR_INCIDENCE_SC_61, RR_INCIDENCE_SC_62,
RR_INCIDENCE_SC_63, RR_INCIDENCE_SC_64,
RR_INCIDENCE_SC_65, RR_INCIDENCE_SC_66,
RR_INCIDENCE_SC_67, RR_INCIDENCE_SC_68,
RR_INCIDENCE_SC_69] = [np.random.lognormal(p_increase_df.loc[i, '1.23_log'],
p_increase_df.loc[i, 'se'],
sims)
for i in np.arange(55,70,1)]
rr_incidence = np.vstack((np.array([np.repeat(1,sims)]*10),
RR_INCIDENCE_SC_55, RR_INCIDENCE_SC_56, RR_INCIDENCE_SC_57,
RR_INCIDENCE_SC_58, RR_INCIDENCE_SC_59, RR_INCIDENCE_SC_60,
RR_INCIDENCE_SC_61, RR_INCIDENCE_SC_62, RR_INCIDENCE_SC_63,
RR_INCIDENCE_SC_64, RR_INCIDENCE_SC_65, RR_INCIDENCE_SC_66,
RR_INCIDENCE_SC_67, RR_INCIDENCE_SC_68, RR_INCIDENCE_SC_69))
rr_incidence[rr_incidence < 1] = 1.03 # truncate
# Drop in incidence in the year after screening stops
post_sc_incidence_drop = 0.9
# Number of biopsies per cancer detected
# Proportion having biopsy (screened arms)
p_suspected = normal(0.24,0.05)
p_suspected_refuse_biopsy = normal(0.24,0.05)
# Proportion having biopsy (non-screened arms)
# (201/567) - Ahmed et al. 2017, Table S6 (doi: 10.1016/S0140-6736(16)32401-1)
p_suspected_ns = normal((201/567),0.05)
p_suspected_refuse_biopsy_ns = normal((201/567),0.05)
n_psa_tests = normal(1.2,0.05)
# Relative cost increase if clinically detected
# Source: Pharoah et al. 2013
relative_cost_clinically_detected = normal(1.1,0.04)
# Create a function to append the results to the relevant lists
def gen_list_outcomes(parameter_list, parameter):
parameter_list.append(parameter)
return parameter_list
# Run through each AR threshold in turn:
reference_absolute_risk = np.round(np.arange(0.02,0.105,0.005),3)
for reference_value in reference_absolute_risk:
a_risk = pd.read_csv(base_path+(str(np.round(reference_value*100,2)))+'/a_risk_'+(str(np.round(reference_value*100,2)))+'.csv').set_index('age')
# Generate lists to store the variables
(s_qalys_discount_ns_list, s_cost_discount_ns_list, s_pca_deaths_ns_list,
ns_cohort_list, outcomes_ns_psa_list,
s_qalys_discount_age_list, s_cost_discount_age_list,
s_pca_deaths_age_list, s_overdiagnosis_age_list,
age_cohort_list, outcomes_age_psa_list,
s_qalys_discount_prs_list, s_cost_discount_prs_list,
s_pca_deaths_prs_list, s_overdiagnosis_prs_list,
prs_cohort_list, outcomes_prs_psa_list) = [[] for _ in range(17)]
parameter_list_ns = [s_qalys_discount_ns_list, s_cost_discount_ns_list, s_pca_deaths_ns_list,
ns_cohort_list, outcomes_ns_psa_list]
parameter_list_age = [s_qalys_discount_age_list, s_cost_discount_age_list,
s_pca_deaths_age_list, s_overdiagnosis_age_list,
age_cohort_list, outcomes_age_psa_list]
parameter_list_prs = [s_qalys_discount_prs_list, s_cost_discount_prs_list,
s_pca_deaths_prs_list, s_overdiagnosis_prs_list,
prs_cohort_list, outcomes_prs_psa_list]
# Loop through years 45-69 to build cohorts
for year in (a_risk.index[0:25]):
################################################
# Non-screening Cohort #
################################################
#################################
# Transition rates - no screening
#################################
tr_incidence = psa_function(pca_incidence[year-45:])
tr_pca_death_baseline = psa_function(pca_death_baseline[year-45:])
tr_death_other_causes = psa_function(death_other_causes[year-45:])
psa_stage_local = psa_function(stage_local[year-45:])
psa_stage_adv = psa_function(stage_adv[year-45:])
# Year 1 in the model
#####################
age = np.arange(year,90)
length_df = len(age)
# Cohorts, numbers 'healthy', and incident cases
cohort = np.array([np.repeat(pop[year], length_df)] * sims)
pca_alive = np.array([np.zeros(length_df)] * sims)
healthy = cohort - pca_alive
pca_incidence_ns_cohort = healthy * tr_incidence
# Deaths
pca_death = ((pca_alive * tr_pca_death_baseline)
+ (healthy * tr_pca_death_baseline))
pca_death_other = ((pca_incidence_ns_cohort
+ pca_alive
- pca_death)
* tr_death_other_causes)
healthy_death_other = ((healthy - pca_incidence_ns_cohort)
* tr_death_other_causes)
total_death = (pca_death
+ pca_death_other
+ healthy_death_other)
# Prevalent cases & life-years
pca_prevalence_ns = (pca_incidence_ns_cohort
- pca_death
- pca_death_other)
lyrs_pca_nodiscount = pca_prevalence_ns * 0.5
# Treatment costs
costs_tx = np.array([np.zeros(length_df)] * sims)
costs_tx[:,0] = ((pca_incidence_ns_cohort[:,0]
* psa_stage_local[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_ns_cohort[:,0]
* psa_stage_adv[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0]) # this variable is tiled to reach 45 - each level is the same
# Year 2 onwards
################
total_cycles = length_df
for i in range(1, total_cycles):
# Cohorts, numbers 'healthy', and incident cases
cohort[:,i] = cohort[:,i-1] - total_death[:,i-1]
pca_alive[:,i] = (pca_alive[:,i-1]
+ pca_incidence_ns_cohort[:,i-1]
- pca_death[:,i-1]
- pca_death_other[:,i-1]) # PCa alive at the beginning of the year
healthy[:,i] = (cohort[:,i] - pca_alive[:,i])
pca_incidence_ns_cohort[:,i] = healthy[:,i] * tr_incidence[:,i]
# Deaths
pca_death[:,i] = ((pca_alive[:,i] * tr_pca_death_baseline[:,i])
+ (healthy[:,i] * tr_pca_death_baseline[:,i]))
pca_death_other[:,i] = ((pca_incidence_ns_cohort[:,i]
+ pca_alive[:,i]
- pca_death[:,i])
* tr_death_other_causes[:,i])
healthy_death_other[:,i] = ((healthy[:,i] - pca_incidence_ns_cohort[:,i])
* tr_death_other_causes[:,i])
total_death[:,i] = (pca_death[:,i]
+ pca_death_other[:,i]
+ healthy_death_other[:,i])
# Prevalent cases & life-years
pca_prevalence_ns[:,i] = (pca_incidence_ns_cohort[:,i]
+ pca_alive[:,i]
- pca_death[:,i]
- pca_death_other[:,i])
lyrs_pca_nodiscount[:,i] = ((pca_prevalence_ns[:,i-1]
+ pca_prevalence_ns[:,i])
* 0.5)
# Costs
costs_tx[:,i] = ((pca_incidence_ns_cohort[:,i]
* psa_stage_local[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_ns_cohort[:,i]
* psa_stage_adv[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i])
############
# Outcomes #
############
# INDEX:
# s_ = sim (this is the sum across the simulations i.e. one total value per simulation)
# m_ = mean (this is the mean across the simulations i.e. one value for each year of the model)
# t_ = total
# nodiscount = not discounted
# discount = discounted
# _ns = outcomes for the no screening cohort
# Total incident cases
######################
s_cases_ns, m_cases_ns, t_cases_ns = outcomes(pca_incidence_ns_cohort)
# PCa alive
s_pca_alive_ns, m_pca_alive_ns, t_pca_alive_ns = outcomes(pca_alive)
# Healthy
s_healthy_ns, m_healthy_ns, t_healthy_ns = outcomes(healthy)
# Deaths from other causes amongst prostate cancer cases
s_pca_deaths_other_ns, m_pca_deaths_other_ns, t_pca_deaths_other_ns = outcomes(pca_death_other)
# Deaths from other causes amongst the healthy
(s_healthy_deaths_other_ns,
m_healthy_deaths_other_ns,
t_healthy_deaths_other_ns) = outcomes(healthy_death_other)
# Total deaths from other causes
################################
deaths_other_ns = pca_death_other + healthy_death_other
s_deaths_other_ns, m_deaths_other_ns, t_deaths_other_ns = outcomes(deaths_other_ns)
# Total deaths from prostate cancer
###################################
s_deaths_pca_ns, m_deaths_pca_ns, t_deaths_pca_ns = outcomes(pca_death)
# Life-years ('healthy')
lyrs_healthy_nodiscount_ns = healthy-(0.5 * (healthy_death_other + pca_incidence_ns_cohort))
(s_lyrs_healthy_nodiscount_ns,
m_lyrs_healthy_nodiscount_ns,
t_lyrs_healthy_nodiscount_ns) = outcomes(lyrs_healthy_nodiscount_ns)
lyrs_healthy_discount_ns = lyrs_healthy_nodiscount_ns * discount_factor[:total_cycles]
(s_lyrs_healthy_discount_ns,
m_lyrs_healthy_discount_ns,
t_lyrs_healthy_discount_ns) = outcomes(lyrs_healthy_discount_ns)
# Life-years with prostate cancer
lyrs_pca_discount_ns = lyrs_pca_nodiscount * discount_factor[:total_cycles]
(s_lyrs_pca_discount_ns,
m_lyrs_pca_discount_ns,
t_lyrs_pca_discount_ns) = outcomes(lyrs_pca_discount_ns)
# Total life-years
##################
lyrs_nodiscount_ns = lyrs_healthy_nodiscount_ns + lyrs_pca_nodiscount
(s_lyrs_nodiscount_ns,
m_lyrs_nodiscount_ns,
t_lyrs_nodiscount_ns) = outcomes(lyrs_nodiscount_ns)
lyrs_discount_ns = lyrs_healthy_discount_ns + lyrs_pca_discount_ns
(s_lyrs_discount_ns,
m_lyrs_discount_ns,
t_lyrs_discount_ns) = outcomes(lyrs_discount_ns)
# QALYs in the healthy
qalys_healthy_nodiscount_ns = lyrs_healthy_nodiscount_ns * utility_background_psa[:,year-45:]
qalys_healthy_discount_ns = lyrs_healthy_discount_ns * utility_background_psa[:,year-45:]
(s_qalys_healthy_discount_ns,
m_qalys_healthy_discount_ns,
t_qalys_healthy_discount_ns) = outcomes(qalys_healthy_discount_ns)
# QALYs with prostate cancer
qalys_pca_nodiscount_ns = lyrs_pca_nodiscount * pca_incidence_utility_psa[:,year-45:]
qalys_pca_discount_ns = lyrs_pca_discount_ns * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_discount_ns,
m_qalys_pca_discount_ns,
t_qalys_pca_discount_ns) = outcomes(qalys_pca_discount_ns)
# Total QALYs
#############
qalys_nodiscount_ns = qalys_healthy_nodiscount_ns + qalys_pca_nodiscount_ns
(s_qalys_nodiscount_ns,
m_qalys_nodiscount_ns,
t_qalys_nodiscount_ns) = outcomes(qalys_nodiscount_ns)
qalys_discount_ns = qalys_healthy_discount_ns + qalys_pca_discount_ns
(s_qalys_discount_ns,
m_qalys_discount_ns,
t_qalys_discount_ns) = outcomes(qalys_discount_ns)
# Cost of PSA testing
n_psa_tests_ns = ((pca_incidence_ns_cohort / p_suspected_ns[:,year-45:])
+ ((pca_incidence_ns_cohort * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])) * n_psa_tests[:,year-45:]
(s_n_psa_tests_ns,
m_n_psa_tests_ns,
total_n_psa_tests_ns) = outcomes(n_psa_tests_ns)
cost_psa_testing_nodiscount_ns = n_psa_tests_ns * cost_psa[:,year-45:] * relative_cost_clinically_detected[:,year-45:]
(s_cost_psa_testing_nodiscount_ns,
m_cost_psa_testing_nodiscount_ns,
t_cost_psa_testing_nodiscount_ns) = outcomes(cost_psa_testing_nodiscount_ns)
cost_psa_testing_discount_ns = cost_psa_testing_nodiscount_ns * discount_factor[:total_cycles]
(s_cost_psa_testing_discount_ns,
m_cost_psa_testing_discount_ns,
t_cost_psa_testing_discount_ns) = outcomes(cost_psa_testing_discount_ns)
# Cost of suspected cancer - biopsies
n_biopsies_ns = pca_incidence_ns_cohort / p_suspected_ns[:,year-45:]
(s_n_biopsies_ns,
m_n_biopsies_ns,
total_n_biopsies_ns) = outcomes(n_biopsies_ns)
cost_biopsy_nodiscount_ns = (((pca_incidence_ns_cohort / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_ns_cohort * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
(s_cost_biopsy_nodiscount_ns,
m_cost_biopsy_nodiscount_ns,
t_cost_biopsy_nodiscount_ns) = outcomes(cost_biopsy_nodiscount_ns)
cost_biopsy_discount_ns = cost_biopsy_nodiscount_ns * discount_factor[:total_cycles]
(s_cost_biopsy_discount_ns,
m_cost_biopsy_discount_ns,
t_cost_biopsy_discount_ns) = outcomes(cost_biopsy_discount_ns)
# Cost of staging
cost_staging_nodiscount_ns = (cost_assessment
* psa_stage_adv.T
* pca_incidence_ns_cohort.T
* relative_cost_clinically_detected[:,year-45:].T).T
(s_cost_staging_nodiscount_ns,
m_cost_staging_nodiscount_ns,
t_cost_staging_nodiscount_ns) = outcomes(cost_staging_nodiscount_ns)
cost_staging_discount_ns = cost_staging_nodiscount_ns * discount_factor[:total_cycles]
(s_cost_staging_discount_ns,
m_cost_staging_discount_ns,
t_cost_staging_discount_ns) = outcomes(cost_staging_discount_ns)
# Cost in last 12 months of life
cost_eol_nodiscount_ns = (pca_death_costs * pca_death.T).T
(s_cost_eol_nodiscount_ns,
m_cost_eol_nodiscount_ns,
t_cost_eol_nodiscount_ns) = outcomes(cost_eol_nodiscount_ns)
cost_eol_discount_ns = cost_eol_nodiscount_ns * discount_factor[:total_cycles]
(s_cost_eol_discount_ns,
m_cost_eol_discount_ns,
t_cost_eol_discount_ns) = outcomes(cost_eol_discount_ns)
# Costs of treatment
(s_cost_tx_nodiscount_ns,
m_cost_tx_nodiscount_ns,
t_cost_tx_nodiscount_ns) = outcomes(costs_tx)
cost_tx_discount_ns = costs_tx * discount_factor[:total_cycles]
(s_cost_tx_discount_ns,
m_cost_tx_discount_ns,
t_cost_tx_discount_ns) = outcomes(cost_tx_discount_ns)
# Amalgamated costs
cost_nodiscount_ns = (cost_psa_testing_nodiscount_ns
+ cost_biopsy_nodiscount_ns
+ cost_staging_nodiscount_ns
+ costs_tx
+ cost_eol_nodiscount_ns)
(s_cost_nodiscount_ns,
m_cost_nodiscount_ns,
t_cost_nodiscount_ns) = outcomes(cost_nodiscount_ns)
cost_discount_ns = (cost_psa_testing_discount_ns
+ cost_biopsy_discount_ns
+ cost_staging_discount_ns
+ cost_tx_discount_ns
+ cost_eol_discount_ns)
(s_cost_discount_ns,
m_cost_discount_ns,
t_cost_discount_ns) = outcomes(cost_discount_ns)
# Generate a mean dataframe
ns_matrix = [age, m_cases_ns, m_deaths_other_ns, m_deaths_pca_ns,
m_pca_alive_ns, m_healthy_ns, m_lyrs_healthy_nodiscount_ns,
m_lyrs_healthy_discount_ns, m_lyrs_pca_discount_ns, m_lyrs_discount_ns,
m_qalys_healthy_discount_ns, m_qalys_pca_discount_ns, m_qalys_discount_ns,
m_cost_psa_testing_discount_ns, m_cost_biopsy_discount_ns, m_cost_staging_discount_ns,
m_cost_tx_discount_ns, m_cost_eol_discount_ns, m_cost_discount_ns]
ns_columns = ['age', 'pca_cases', 'deaths_other', 'deaths_pca',
'pca_alive', 'healthy', 'lyrs_healthy_nodiscount', 'lyrs_healthy_discount',
'lyrs_pca_discount', 'total_lyrs_discount',
'qalys_healthy_discount', 'qalys_pca_discount', 'total_qalys_discount',
'cost_psa_testing_discount', 'cost_biopsy_discount', 'cost_staging_discount',
'cost_treatment_discount', 'costs_eol_discount', 'total_cost_discount']
ns_cohort = pd.DataFrame(ns_matrix, index = ns_columns).T
t_parameters_ns = [year, t_cases_ns, t_deaths_pca_ns,
t_deaths_other_ns,
t_lyrs_healthy_discount_ns, t_lyrs_pca_discount_ns,
t_lyrs_nodiscount_ns, t_lyrs_discount_ns,
t_qalys_healthy_discount_ns, t_qalys_pca_discount_ns,
t_qalys_nodiscount_ns, t_qalys_discount_ns,
t_cost_psa_testing_nodiscount_ns, t_cost_psa_testing_discount_ns,
t_cost_biopsy_nodiscount_ns, t_cost_biopsy_discount_ns,
t_cost_staging_nodiscount_ns, t_cost_staging_discount_ns,
t_cost_eol_nodiscount_ns, t_cost_eol_discount_ns,
t_cost_tx_nodiscount_ns, t_cost_tx_discount_ns,
t_cost_nodiscount_ns, t_cost_discount_ns,
total_n_psa_tests_ns, total_n_biopsies_ns]
columns_ns = ['cohort_age_at_start', 'pca_cases',
'pca_deaths', 'deaths_other_causes', 'lyrs_healthy_discounted',
'lyrs_pca_discounted', 'lyrs_undiscounted', 'lyrs_discounted',
'qalys_healthy_discounted', 'qalys_pca_discounted',
'qalys_undiscounted', 'qalys_discounted',
'cost_psa_testing_undiscounted', 'cost_psa_testing_discounted',
'cost_biopsy_undiscounted', 'cost_biopsy_discounted',
'cost_staging_undiscounted', 'cost_staging_discounted',
'cost_eol_undiscounted', 'cost_eol_discounted',
'cost_treatment_undiscounted', 'cost_treatment_discounted',
'costs_undiscounted', 'costs_discounted', 'n_psa_tests', 'n_biopsies']
outcomes_ns_psa = pd.DataFrame(t_parameters_ns, index = columns_ns).T
outcomes_ns_psa['overdiagnosis'] = 0
parameters_ns = [s_qalys_discount_ns, s_cost_discount_ns, s_deaths_pca_ns,
ns_cohort, outcomes_ns_psa]
for index, parameter in enumerate(parameter_list_ns):
parameter = gen_list_outcomes(parameter_list_ns[index], parameters_ns[index])
#######################
# Age-based screening #
#######################
###################################
# Specific transition probabilities
###################################
if year < 55:
# Yearly probability of PCa incidence
smoothed_pca_incidence_age = psa_function(pca_incidence[year-45:])
# Yearly probability of death from PCa - smoothed entry and exit
smoothed_pca_mortality_age = psa_function(pca_death_baseline[year-45:])
# Proportion of cancers detected by screening at an advanced stage
stage_screened_adv = psa_function(stage_adv)
psa_stage_screened_adv = stage_screened_adv[:,year-45:]
# Proportion of cancers detected by screening at a localised stage
stage_screened_local = 1-stage_screened_adv
psa_stage_screened_local = stage_screened_local[:,year-45:]
if year > 54:
# Yearly probability of PCa incidence
smoothed_pca_incidence = psa_function(pca_incidence)
smoothed_pca_incidence[:,10:25] = (smoothed_pca_incidence[:,10:25].T * rr_incidence[year-45,:]).T
smoothed_pca_incidence[:,25:35] = (smoothed_pca_incidence[:,25:35] * np.linspace(post_sc_incidence_drop,1,10))
smoothed_pca_incidence_age = smoothed_pca_incidence[:,year-45:]
# Yearly probability of death from PCa - smoothed entry and exit
smoothed_pca_mortality = psa_function(pca_death_baseline)
smoothed_pca_mortality[:,10:15] = smoothed_pca_mortality[:,10:15] * np.linspace(1,0.79,5)
smoothed_pca_mortality[:,15:] = smoothed_pca_mortality[:,15:] * rr_death_screening[:,15:]
smoothed_pca_mortality_age = smoothed_pca_mortality[:,year-45:]
# Proportion of cancers detected by screening at a localised / advanced stage
stage_screened_adv = stage_adv * rr_adv_screening
stage_screened_local = 1-stage_screened_adv
psa_stage_screened_local = stage_screened_local[:,year-45:]
psa_stage_screened_adv = stage_screened_adv[:,year-45:]
#######################
# Year 1 in the model #
#######################
age = np.arange(year,90)
length_df = len(age)
length_screen = len(np.arange(year,70)) # number of screening years depending on age cohort starting
# Cohorts, numbers healthy, and incident cases
cohort_sc = np.array([np.repeat(pop[year], length_df)] * sims) * uptake_psa
cohort_ns = np.array([np.repeat(pop[year], length_df)] * sims) * (1-uptake_psa)
pca_alive_sc = np.array([np.zeros(length_df)] * sims)
pca_alive_ns = np.array([np.zeros(length_df)] * sims)
healthy_sc = cohort_sc - pca_alive_sc
healthy_ns = cohort_ns - pca_alive_ns
pca_incidence_sc = healthy_sc * smoothed_pca_incidence_age # Total incidence in screened arm
if year > 54:
pca_incidence_screened = pca_incidence_sc.copy()
pca_incidence_post_screening = np.array([np.zeros(length_df)] * sims) # Post-screening cancers - 0 until model reaches age 70.
elif year < 55:
pca_incidence_screened = np.array([np.zeros(length_df)] * sims)
pca_incidence_post_screening = np.array([np.zeros(length_df)] * sims) # post-screening cancers 0 as no screening (needed for later code to run smoothly)
pca_incidence_ns = healthy_ns * tr_incidence # Incidence in non-screened
# Deaths
pca_death_sc = ((pca_alive_sc * smoothed_pca_mortality_age)
+ (healthy_sc * smoothed_pca_mortality_age))
pca_death_ns = ((pca_alive_ns * tr_pca_death_baseline)
+ (healthy_ns * tr_pca_death_baseline))
pca_death_other_sc = ((pca_incidence_sc
+ pca_alive_sc
- pca_death_sc)
* tr_death_other_causes)
pca_death_other_ns = ((pca_incidence_ns
+ pca_alive_ns
- pca_death_ns)
* tr_death_other_causes)
healthy_death_other_sc = ((healthy_sc - pca_incidence_sc)
* tr_death_other_causes)
healthy_death_other_ns = ((healthy_ns - pca_incidence_ns)
* tr_death_other_causes)
t_death_sc = (pca_death_sc
+ pca_death_other_sc
+ healthy_death_other_sc) # Total deaths screened arm
t_death_ns = (pca_death_ns
+ pca_death_other_ns
+ healthy_death_other_ns) # Total deaths non-screened arm
t_death = t_death_sc + t_death_ns # Total deaths
# Prevalent cases & life-years
pca_prevalence_sc = (pca_incidence_sc
- pca_death_sc
- pca_death_other_sc)
pca_prevalence_ns = (pca_incidence_ns
- pca_death_ns
- pca_death_other_ns)
lyrs_pca_sc_nodiscount = pca_prevalence_sc * 0.5
lyrs_pca_ns_nodiscount = pca_prevalence_ns * 0.5
# Costs
if year > 54:
costs_tx_screened = np.array([np.zeros(length_df)] * sims)
costs_tx_post_screening = np.array([np.zeros(length_df)] * sims)
costs_tx_screened[:,0] = ((pca_incidence_screened[:,0]
* psa_stage_screened_local[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_screened[:,0]
* psa_stage_screened_adv[:,0].T
* tx_costs_adv.T).sum(axis=0)) # cost of screen-detected cancers
costs_tx_post_screening[:,0] = ((pca_incidence_post_screening[:,0]
* psa_stage_local[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_post_screening[:,0]
* psa_stage_adv[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0])
costs_tx_sc = np.array([np.zeros(length_df)] * sims)
costs_tx_sc[:,0] = (costs_tx_screened[:,0] + costs_tx_post_screening[:,0]) # total cost in screened arms
elif year < 55:
costs_tx_sc = np.array([np.zeros(length_df)] * sims)
costs_tx_sc[:,0] = ((pca_incidence_sc[:,0]
* psa_stage_local[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_sc[:,0]
* psa_stage_adv[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0])
costs_tx_ns = np.array([np.zeros(length_df)] * sims)
costs_tx_ns[:,0] = ((pca_incidence_ns[:,0]
* psa_stage_local[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_ns[:,0]
* psa_stage_adv[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0])
##################
# Year 2 onwards #
##################
total_cycles = length_df
for i in range(1, total_cycles):
# Cohorts, numbers healthy, incident & prevalent cases
cohort_sc[:,i] = cohort_sc[:,i-1] - t_death_sc[:,i-1]
cohort_ns[:,i] = cohort_ns[:,i-1] - t_death_ns[:,i-1]
pca_alive_sc[:,i] = (pca_alive_sc[:,i-1]
+ pca_incidence_sc[:,i-1]
- pca_death_sc[:,i-1]
- pca_death_other_sc[:,i-1])
pca_alive_ns[:,i] = (pca_alive_ns[:,i-1]
+ pca_incidence_ns[:,i-1]
- pca_death_ns[:,i-1]
- pca_death_other_ns[:,i-1])
healthy_sc[:,i] = (cohort_sc[:,i] - pca_alive_sc[:,i])
healthy_ns[:,i] = (cohort_ns[:,i] - pca_alive_ns[:,i])
pca_incidence_sc[:,i] = healthy_sc[:,i] * smoothed_pca_incidence_age[:,i]
if year > 54:
if i < length_screen:
pca_incidence_screened[:,i] = pca_incidence_sc[:,i].copy() # Screen-detected cancers
pca_incidence_post_screening[:,i] = 0
else:
pca_incidence_screened[:,i] = 0 # Screen-detected cancers
pca_incidence_post_screening[:,i] = pca_incidence_sc[:,i].copy()
elif year < 55:
pca_incidence_screened[:,i] = 0 # Screen-detected cancers
pca_incidence_post_screening[:,i] = 0 # post-screening cancers 0 as no screening (needed for later code to run smoothly)
pca_incidence_ns[:,i] = healthy_ns[:,i] * tr_incidence[:,i]
# Deaths
pca_death_sc[:,i] = ((pca_alive_sc[:,i] * smoothed_pca_mortality_age[:,i])
+ (healthy_sc[:,i] * smoothed_pca_mortality_age[:,i]))
pca_death_ns[:,i] = ((pca_alive_ns[:,i] * tr_pca_death_baseline[:,i])
+ (healthy_ns[:,i] * tr_pca_death_baseline[:,i]))
pca_death_other_sc[:,i] = ((pca_incidence_sc[:,i]
+ pca_alive_sc[:,i]
- pca_death_sc[:,i])
* tr_death_other_causes[:,i])
pca_death_other_ns[:,i] = ((pca_incidence_ns[:,i]
+ pca_alive_ns[:,i]
- pca_death_ns[:,i])
* tr_death_other_causes[:,i])
healthy_death_other_sc[:,i] = ((healthy_sc[:,i] - pca_incidence_sc[:,i])
* tr_death_other_causes[:,i])
healthy_death_other_ns[:,i] = ((healthy_ns[:,i] - pca_incidence_ns[:,i])
* tr_death_other_causes[:,i])
t_death_sc[:,i] = (pca_death_sc[:,i]
+ pca_death_other_sc[:,i]
+ healthy_death_other_sc[:,i])
t_death_ns[:,i] = (pca_death_ns[:,i]
+ pca_death_other_ns[:,i]
+ healthy_death_other_ns[:,i])
t_death[:,i] = t_death_sc[:,i] + t_death_ns[:,i]
# Prevalent cases & life-years
pca_prevalence_sc[:,i] = (pca_incidence_sc[:,i]
+ pca_alive_sc[:,i]
- pca_death_sc[:,i]
- pca_death_other_sc[:,i])
pca_prevalence_ns[:,i] = (pca_incidence_ns [:,i]
+ pca_alive_ns[:,i]
- pca_death_ns[:,i]
- pca_death_other_ns[:,i])
lyrs_pca_sc_nodiscount[:,i] = ((pca_prevalence_sc[:,i-1]
+ pca_prevalence_sc[:,i])
* 0.5)
lyrs_pca_ns_nodiscount[:,i] = ((pca_prevalence_ns[:,i-1]
+ pca_prevalence_ns[:,i])
* 0.5)
# Costs
if year > 54:
costs_tx_screened[:,i] = ((pca_incidence_screened[:,i]
* psa_stage_screened_local[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_screened[:,i]
* psa_stage_screened_adv[:,i].T
* tx_costs_adv.T).sum(axis=0)) # cost of screen-detected cancers
costs_tx_post_screening[:,i] = ((pca_incidence_post_screening[:,i]
* psa_stage_local[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_post_screening[:,i]
* psa_stage_adv[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i])
costs_tx_sc[:,i] = (costs_tx_screened[:,i]
+ costs_tx_post_screening[:,i]) # total cost in screened arms
elif year < 55:
costs_tx_sc[:,i] = ((pca_incidence_sc[:,i]
* psa_stage_local[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_sc[:,i]
* psa_stage_adv[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i])
costs_tx_ns[:,i] = ((pca_incidence_ns[:,i]
* psa_stage_local[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_ns[:,i]
* psa_stage_adv[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i])
############
# Outcomes #
############
# INDEX:
# s_ = sim (this is the sum across the simulations i.e. one total value per simulation)
# m_ = mean (this is the mean across the simulations i.e. one value for each year of the model)
# t_ = total
# nodiscount = not discounted
# discount = discounted
# _age = outcomes for the age-based screening cohort
# Total incident cases (screened arm)
s_cases_sc_age, m_cases_sc_age, t_cases_sc_age = outcomes(pca_incidence_sc)
# Total screen-detected cancers (screened arm)
s_cases_sc_detected_age, m_cases_sc_detected_age, t_cases_sc_detected_age = outcomes(pca_incidence_screened)
# Total cancers detected after screening stops (screened arm)
s_cases_post_screening_age, m_cases_post_screening_age, t_cases_post_screening_age = outcomes(pca_incidence_post_screening)
# Incident cases (non-screened arm)
s_cases_ns_age, m_cases_ns_age, t_cases_ns_age = outcomes(pca_incidence_ns)
# Incident cases (total)
########################
s_cases_age = s_cases_sc_age + s_cases_ns_age
m_cases_age = m_cases_sc_age + m_cases_ns_age
t_cases_age = t_cases_sc_age + t_cases_ns_age
# PCa alive
s_pca_alive_age, m_pca_alive_age, t_pca_alive_age = outcomes((pca_alive_sc + pca_alive_ns))
# Healthy
s_healthy_age, m_healthy_age, t_healthy_age = outcomes((healthy_sc + healthy_ns))
# Overdiagnosed cases
overdiagnosis_age = pca_incidence_screened * p_overdiagnosis_psa.T[:,year-45:]
s_overdiagnosis_age, m_overdiagnosis_age, t_overdiagnosis_age = outcomes(overdiagnosis_age)
# Deaths from other causes (screened arm)
deaths_sc_other_age = pca_death_other_sc + healthy_death_other_sc
s_deaths_sc_other_age, m_deaths_sc_other_age, t_deaths_sc_other_age = outcomes(deaths_sc_other_age)
# Deaths from other causes (non-screened arm)
deaths_ns_other_age = pca_death_other_ns + healthy_death_other_ns
s_deaths_ns_other_age, m_deaths_ns_other_age, t_deaths_ns_other_age = outcomes(deaths_ns_other_age)
# Deaths from other causes (total)
s_deaths_other_age = s_deaths_sc_other_age + s_deaths_ns_other_age
m_deaths_other_age = m_deaths_sc_other_age + m_deaths_ns_other_age
t_deaths_other_age = t_deaths_sc_other_age + t_deaths_ns_other_age
# Deaths from prosate cancer (screened arm)
s_deaths_sc_pca_age, m_deaths_sc_pca_age, t_deaths_sc_pca_age = outcomes(pca_death_sc)
# Deaths from prosate cancer (non-screened arm)
s_deaths_ns_pca_age, m_deaths_ns_pca_age, t_deaths_ns_pca_age = outcomes(pca_death_ns)
# Deaths from prosate cancer (total)
####################################
s_deaths_pca_age = s_deaths_sc_pca_age + s_deaths_ns_pca_age
m_deaths_pca_age = m_deaths_sc_pca_age + m_deaths_ns_pca_age
t_deaths_pca_age = t_deaths_sc_pca_age + t_deaths_ns_pca_age
# Healthy life-years (screened arm)
lyrs_healthy_sc_nodiscount_age = (healthy_sc
- (0.5 * (healthy_death_other_sc+pca_incidence_sc)))
lyrs_healthy_sc_discount_age = lyrs_healthy_sc_nodiscount_age * discount_factor[:total_cycles]
(s_lyrs_healthy_sc_discount_age,
m_lyrs_healthy_sc_discount_age,
t_lyrs_healthy_sc_discount_age) = outcomes(lyrs_healthy_sc_discount_age)
# Healthy life-years (non-screened arm)
lyrs_healthy_ns_nodiscount_age = (healthy_ns
- (0.5 * (healthy_death_other_ns+pca_incidence_ns)))
lyrs_healthy_ns_discount_age = lyrs_healthy_ns_nodiscount_age * discount_factor[:total_cycles]
(s_lyrs_healthy_ns_discount_age,
m_lyrs_healthy_ns_discount_age,
t_lyrs_healthy_ns_discount_age) = outcomes(lyrs_healthy_ns_discount_age)
# Total healthy life-years
lyrs_healthy_nodiscount_age = lyrs_healthy_sc_nodiscount_age + lyrs_healthy_ns_nodiscount_age
(s_lyrs_healthy_nodiscount_age,
m_lyrs_healthy_nodiscount_age,
t_lyrs_healthy_nodiscount_age) = outcomes(lyrs_healthy_nodiscount_age)
lyrs_healthy_discount_age = lyrs_healthy_nodiscount_age * discount_factor[:total_cycles]
(s_lyrs_healthy_discount_age,
m_lyrs_healthy_discount_age,
t_lyrs_healthy_discount_age) = outcomes(lyrs_healthy_discount_age)
# Life-years with prostate cancer in screened arm
lyrs_pca_sc_discount = lyrs_pca_sc_nodiscount * discount_factor[:total_cycles]
(s_lyrs_pca_sc_discount_age,
m_lyrs_pca_sc_discount_age,
t_lyrs_pca_sc_discount_age) = outcomes(lyrs_pca_sc_discount)
# Life-years with prostate cancer in non-screened arm
lyrs_pca_ns_discount = lyrs_pca_ns_nodiscount * discount_factor[:total_cycles]
(s_lyrs_pca_ns_discount_age,
m_lyrs_pca_ns_discount_age,
t_lyrs_pca_ns_age) = outcomes(lyrs_pca_ns_discount)
# Life-years with prostate cancer in both arms
lyrs_pca_nodiscount_age = lyrs_pca_sc_nodiscount + lyrs_pca_ns_nodiscount
lyrs_pca_discount_age = lyrs_pca_sc_discount + lyrs_pca_ns_discount
(s_lyrs_pca_discount_age,
m_lyrs_pca_discount_age,
t_lyrs_pca_discount_age) = outcomes(lyrs_pca_discount_age)
# Total life-years
##################
lyrs_nodiscount_age = lyrs_healthy_nodiscount_age + lyrs_pca_nodiscount_age
(s_lyrs_nodiscount_age,
m_lyrs_nodiscount_age,
t_lyrs_nodiscount_age) = outcomes(lyrs_nodiscount_age)
lyrs_discount_age = lyrs_healthy_discount_age + lyrs_pca_discount_age
(s_lyrs_discount_age,
m_lyrs_discount_age,
t_lyrs_discount_age) = outcomes(lyrs_discount_age)
# QALYs (healthy life) - screened arm
qalys_healthy_sc_nodiscount_age = lyrs_healthy_sc_nodiscount_age * utility_background_psa[:,year-45:]
qalys_healthy_sc_discount_age = lyrs_healthy_sc_discount_age * utility_background_psa[:,year-45:]
(s_qalys_healthy_sc_discount_age,
m_qalys_healthy_sc_discount_age,
t_qalys_healthy_sc_discount_age) = outcomes(qalys_healthy_sc_discount_age)
# QALYs (healthy life) - non-screened arm
qalys_healthy_ns_nodiscount_age = lyrs_healthy_ns_nodiscount_age * utility_background_psa[:,year-45:]
qalys_healthy_ns_discount_age = lyrs_healthy_ns_discount_age * utility_background_psa[:,year-45:]
(s_qalys_healthy_ns_discount_age,
m_qalys_healthy_ns_discount_age,
t_qalys_healthy_ns_discount_age) = outcomes(qalys_healthy_ns_discount_age)
# Total QALYs (healthy life)
qalys_healthy_nodiscount_age = lyrs_healthy_nodiscount_age * utility_background_psa[:,year-45:]
qalys_healthy_discount_age = lyrs_healthy_discount_age * utility_background_psa[:,year-45:]
(s_qalys_healthy_discount_age,
m_qalys_healthy_discount_age,
t_qalys_healthy_discount_age) = outcomes(qalys_healthy_discount_age)
# QALYS with prostate cancer - screened arm
qalys_pca_sc_nodiscount_age = lyrs_pca_sc_nodiscount * pca_incidence_utility_psa[:,year-45:]
qalys_pca_sc_discount_age = lyrs_pca_sc_discount * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_sc_discount_age,
m_qalys_pca_sc_discount_age,
t_qalys_pca_sc_discount_age) = outcomes(qalys_pca_sc_discount_age)
# QALYS with prostate cancer - non-screened arm
qalys_pca_ns_nodiscount_age = lyrs_pca_ns_nodiscount * pca_incidence_utility_psa[:,year-45:]
qalys_pca_ns_discount_age = lyrs_pca_ns_discount * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_ns_discount_age,
m_qalys_pca_ns_discount_age,
t_qalys_pca_ns_discount_age) = outcomes(qalys_pca_ns_discount_age)
# Total QALYS with prostate cancer
qalys_pca_nodiscount_age = lyrs_pca_nodiscount_age * pca_incidence_utility_psa[:,year-45:]
qalys_pca_discount_age = lyrs_pca_discount_age * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_discount_age,
m_qalys_pca_discount_age,
t_qalys_pca_discount_age) = outcomes(qalys_pca_discount_age)
# Total QALYs
#############
qalys_nodiscount_age = qalys_healthy_nodiscount_age + qalys_pca_nodiscount_age
(s_qalys_nodiscount_age,
m_qalys_nodiscount_age,
t_qalys_nodiscount_age) = outcomes(qalys_nodiscount_age)
qalys_discount_age = qalys_healthy_discount_age + qalys_pca_discount_age
(s_qalys_discount_age,
m_qalys_discount_age,
t_qalys_discount_age) = outcomes(qalys_discount_age)
# Costs of PSA testing in non-screened arm
n_psa_tests_ns_age = ((pca_incidence_ns / p_suspected_ns[:,year-45:])
+ ((pca_incidence_ns * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])) * n_psa_tests[:,year-45:]
cost_psa_testing_ns_nodiscount_age = n_psa_tests_ns_age * cost_psa[:,year-45:] * relative_cost_clinically_detected[:,year-45:]
(s_cost_psa_testing_ns_nodiscount_age,
m_cost_psa_testing_ns_nodiscount_age,
t_cost_psa_testing_ns_nodiscount_age) = outcomes(cost_psa_testing_ns_nodiscount_age)
cost_psa_testing_ns_discount_age = cost_psa_testing_ns_nodiscount_age * discount_factor[:total_cycles]
(s_cost_psa_testing_ns_discount_age,
m_cost_psa_testing_ns_discount_age,
t_cost_psa_testing_ns_discount_age) = outcomes(cost_psa_testing_ns_discount_age)
# Costs of PSA testing in screened arm (PSA screening every four years)
# PSA tests during screened and non-screened period
if year < 55:
# Assuming all cancers are clinically detected as these cohorts
# are not eligible for screening (hence p_suspected_ns)
# This uses 1-uptake biopsy as the original part of the equation works out
# the number of biopsies which is then multiplied by n_psa_tests to get the number of PSA tests
n_psa_tests_sc_age = (((pca_incidence_sc / p_suspected_ns[:,year-45:])
+ ((pca_incidence_sc * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:]))
* uptake_psa
* n_psa_tests[:,year-45:])
cost_psa_testing_sc_nodiscount_age = (n_psa_tests_sc_age
* cost_psa[:,year-45:]
* relative_cost_clinically_detected[:,year-45:])
if year > 54:
# Get the screened years
lyrs_healthy_screened_nodiscount_age = np.array([np.zeros(length_df)] * sims)
lyrs_healthy_screened_nodiscount_age[:,:length_screen] = lyrs_healthy_sc_nodiscount_age[:,:length_screen].copy()
lyrs_healthy_screened_nodiscount_age[:,length_screen:] = 0
# Population-level PSA testing during screening phase
n_psa_tests_screened_age = lyrs_healthy_screened_nodiscount_age * uptake_psa / 4
# Assuming all cancers are clinically detected in the post-screening phase
n_psa_tests_post_screening_age = (((pca_incidence_post_screening / p_suspected_ns[:,year-45:])
+ ((pca_incidence_post_screening * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:]))
* uptake_psa
* n_psa_tests[:,year-45:])
# Total PSA tests
n_psa_tests_sc_age = (n_psa_tests_screened_age + n_psa_tests_post_screening_age)
cost_psa_testing_screened_age = n_psa_tests_screened_age * cost_psa[:,year-45:]
cost_psa_testing_post_screening_age = (n_psa_tests_post_screening_age
* cost_psa[:,year-45:]
* relative_cost_clinically_detected[:,year-45:])
cost_psa_testing_sc_nodiscount_age = (cost_psa_testing_screened_age
+ cost_psa_testing_post_screening_age)
(s_cost_psa_testing_sc_nodiscount_age,
m_cost_psa_testing_sc_nodiscount_age,
t_cost_psa_testing_sc_nodiscount_age) = outcomes(cost_psa_testing_sc_nodiscount_age)
cost_psa_testing_sc_discount_age = cost_psa_testing_sc_nodiscount_age * discount_factor[:total_cycles]
(s_cost_psa_testing_sc_discount_age,
m_cost_psa_testing_sc_discount_age,
t_cost_psa_testing_sc_discount_age) = outcomes(cost_psa_testing_sc_discount_age)
# Total costs of PSA testing
############################
n_psa_tests_age = n_psa_tests_ns_age + n_psa_tests_sc_age
(s_n_psa_tests_age,
m_n_psa_tests_age,
total_n_psa_tests_age) = outcomes(n_psa_tests_age)
cost_psa_testing_nodiscount_age = cost_psa_testing_ns_nodiscount_age + cost_psa_testing_sc_nodiscount_age
(s_cost_psa_testing_nodiscount_age,
m_cost_psa_testing_nodiscount_age,
t_cost_psa_testing_nodiscount_age) = outcomes(cost_psa_testing_nodiscount_age)
cost_psa_testing_discount_age = cost_psa_testing_ns_discount_age + cost_psa_testing_sc_discount_age
(s_cost_psa_testing_discount_age,
m_cost_psa_testing_discount_age,
t_cost_psa_testing_discount_age) = outcomes(cost_psa_testing_discount_age)
# Costs of biopsy - screened arm
if year < 55:
# Assuming all cancers are clinically detected as these cohorts
# are not eligible for screening (hence p_suspected_ns)
n_biopsies_sc_age = pca_incidence_sc / p_suspected_ns[:,year-45:]
# Costs include the costs of those who turn down biopsy
cost_biopsy_sc_nodiscount_age = (((pca_incidence_sc / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_sc * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
if year > 54:
# Screen-detected cancers
n_biopsies_screened_age = pca_incidence_screened / p_suspected[:,year-45:]
cost_biopsy_screened_nodiscount_age = (((pca_incidence_screened / p_suspected[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_screened * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy[:,year-45:])
* cost_refuse_biopsy[:,year-45:]))
# Assuming all cancers are clinically detected in the post-screening phase
n_biopsies_post_screening_age = pca_incidence_post_screening / p_suspected_ns[:,year-45:]
cost_biopsies_post_screening_nodiscount_age = (((pca_incidence_post_screening / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_post_screening * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
# Total biopsies
n_biopsies_sc_age = (n_biopsies_screened_age + n_biopsies_post_screening_age)
# Total cost of biopsies
cost_biopsy_sc_nodiscount_age = (cost_biopsy_screened_nodiscount_age
+ cost_biopsies_post_screening_nodiscount_age)
(s_cost_biopsy_sc_nodiscount_age,
m_cost_biopsy_sc_nodiscount_age,
t_cost_biopsy_sc_nodiscount_age) = outcomes(cost_biopsy_sc_nodiscount_age)
cost_biopsy_sc_discount_age = cost_biopsy_sc_nodiscount_age * discount_factor[:total_cycles]
(s_cost_biopsy_sc_discount_age,
m_cost_biopsy_sc_discount_age,
t_cost_biopsy_sc_discount_age) = outcomes(cost_biopsy_sc_discount_age)
# Costs of biopsy - non-screened arm
n_biopsies_ns_age = pca_incidence_ns / p_suspected_ns[:,year-45:]
cost_biopsy_ns_nodiscount_age = (((pca_incidence_ns / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_ns * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
(s_cost_biopsy_ns_nodiscount_age,
m_cost_biopsy_ns_nodiscount_age,
t_cost_biopsy_ns_nodiscount_age) = outcomes(cost_biopsy_ns_nodiscount_age)
cost_biopsy_ns_discount_age = cost_biopsy_ns_nodiscount_age * discount_factor[:total_cycles]
(s_cost_biopsy_ns_discount_age,
m_cost_biopsy_ns_discount_age,
t_cost_biopsy_ns_discount_age) = outcomes(cost_biopsy_ns_discount_age)
# Total costs of biopsy
#######################
n_biopsies_age = n_biopsies_sc_age + n_biopsies_ns_age
(s_n_biopsies_age,
m_n_biopsies_age,
total_n_biopsies_age) = outcomes(n_biopsies_age)
cost_biopsy_nodiscount_age = cost_biopsy_sc_nodiscount_age + cost_biopsy_ns_nodiscount_age
(s_cost_biopsy_nodiscount_age,
m_cost_biopsy_nodiscount_age,
t_cost_biopsy_nodiscount_age) = outcomes(cost_biopsy_nodiscount_age)
cost_biopsy_discount_age = cost_biopsy_sc_discount_age + cost_biopsy_ns_discount_age
(s_cost_biopsy_discount_age,
m_cost_biopsy_discount_age,
t_cost_biopsy_discount_age) = outcomes(cost_biopsy_discount_age)
# Cost of staging in the screened arm
if year < 55:
cost_staging_sc_nodiscount_age = (cost_assessment
* psa_stage_adv.T
* pca_incidence_sc.T
* relative_cost_clinically_detected[:,year-45:].T).T
if year > 54:
cost_staging_screened_nodiscount_age = (cost_assessment
* psa_stage_screened_adv.T
* pca_incidence_screened.T).T
cost_staging_post_screening_nodiscount_age = (cost_assessment
* psa_stage_adv.T
* pca_incidence_post_screening.T
* relative_cost_clinically_detected[:,year-45:].T).T
cost_staging_sc_nodiscount_age = (cost_staging_screened_nodiscount_age
+ cost_staging_post_screening_nodiscount_age)
(s_cost_staging_sc_nodiscount_age,
m_cost_staging_sc_nodiscount_age,
t_cost_staging_sc_nodiscount_age) = outcomes(cost_staging_sc_nodiscount_age)
cost_staging_sc_discount_age = cost_staging_sc_nodiscount_age * discount_factor[:total_cycles]
(s_cost_staging_sc_discount_age,
m_cost_staging_sc_discount_age,
t_cost_staging_sc_discount_age) = outcomes(cost_staging_sc_discount_age)
# Cost of staging in the non-screened arm
cost_staging_ns_nodiscount_age = (cost_assessment
* psa_stage_adv.T
* pca_incidence_ns.T
* relative_cost_clinically_detected[:,year-45:].T).T
(s_cost_staging_ns_nodiscount_age,
m_cost_staging_ns_nodiscount_age,
t_cost_staging_ns_nodiscount_age) = outcomes(cost_staging_ns_nodiscount_age)
cost_staging_ns_discount_age = cost_staging_ns_nodiscount_age * discount_factor[:total_cycles]
(s_cost_staging_ns_discount_age,
m_cost_staging_ns_discount_age,
t_cost_staging_ns_discount_age) = outcomes(cost_staging_ns_discount_age)
# Total costs of staging
########################
cost_staging_nodiscount_age = cost_staging_sc_nodiscount_age + cost_staging_ns_nodiscount_age
(s_cost_staging_nodiscount_age,
m_cost_staging_nodiscount_age,
t_cost_staging_nodiscount_age) = outcomes(cost_staging_nodiscount_age)
cost_staging_discount_age = cost_staging_sc_discount_age + cost_staging_ns_discount_age
(s_cost_staging_discount_age,
m_cost_staging_discount_age,
t_cost_staging_discount_age) = outcomes(cost_staging_discount_age)
# Cost of treatment in screened arm
(s_cost_tx_sc_nodiscount_age,
m_cost_tx_sc_nodiscount_age,
t_cost_tx_sc_nodiscount_age) = outcomes(costs_tx_sc)
cost_tx_sc_nodiscount_age = costs_tx_sc * discount_factor[:total_cycles]
(s_cost_tx_sc_discount_age,
m_cost_tx_sc_discount_age,
t_cost_tx_sc_discount_age) = outcomes(cost_tx_sc_nodiscount_age)
# Cost of treatment in non-screened arm
(s_cost_tx_ns_nodiscount_age,
m_cost_tx_ns_nodiscount_age,
t_cost_tx_ns_nodiscount_age) = outcomes(costs_tx_ns)
cost_tx_ns_nodiscount_age = costs_tx_ns * discount_factor[:total_cycles]
(s_cost_tx_ns_discount_age,
m_cost_tx_ns_discount_age,
t_cost_tx_ns_discount_age) = outcomes(cost_tx_ns_nodiscount_age)
# Total costs of treatment
##########################
cost_tx_nodiscount_age = costs_tx_sc + costs_tx_ns
(s_cost_tx_nodiscount_age,
m_cost_tx_nodiscount_age,
t_cost_tx_nodiscount_age) = outcomes(cost_tx_nodiscount_age)
cost_tx_discount_age = cost_tx_nodiscount_age * discount_factor[:total_cycles]
(s_cost_tx_discount_age,
m_cost_tx_discount_age,
t_cost_tx_discount_age) = outcomes(cost_tx_discount_age)
# Costs of palliation and death in screened arm
cost_eol_sc_nodiscount_age = (pca_death_costs * pca_death_sc.T).T
(s_cost_eol_sc_nodiscount_age,
m_cost_eol_sc_nodiscount_age,
t_cost_eol_sc_nodiscount_age) = outcomes(cost_eol_sc_nodiscount_age)
cost_eol_sc_discount_age = cost_eol_sc_nodiscount_age * discount_factor[:total_cycles]
(s_cost_eol_sc_discount_age,
m_cost_eol_sc_discount_age,
t_cost_eol_sc_discount_age) = outcomes(cost_eol_sc_discount_age)
# Costs of palliation and death in non-screened arm
cost_eol_ns_nodiscount_age = (pca_death_costs * pca_death_ns.T).T
(s_cost_eol_ns_nodiscount_age,
m_cost_eol_ns_nodiscount_age,
t_cost_eol_ns_nodiscount_age) = outcomes(cost_eol_ns_nodiscount_age)
cost_eol_ns_discount_age = cost_eol_ns_nodiscount_age * discount_factor[:total_cycles]
(s_cost_eol_ns_discount_age,
m_cost_eol_ns_discount_age,
t_cost_eol_ns_discount_age) = outcomes(cost_eol_ns_discount_age)
# Total costs of palliation and death
cost_eol_nodiscount_age = cost_eol_sc_nodiscount_age + cost_eol_ns_nodiscount_age
(s_cost_eol_nodiscount_age,
m_cost_eol_nodiscount_age,
t_cost_eol_nodiscount_age) = outcomes(cost_eol_nodiscount_age)
cost_eol_discount_age = cost_eol_sc_discount_age + cost_eol_ns_discount_age
(s_cost_eol_discount_age,
m_cost_eol_discount_age,
t_cost_eol_discount_age) = outcomes(cost_eol_discount_age)
# TOTAL COSTS AGE-BASED SCREENING
#################################
cost_nodiscount_age = (cost_psa_testing_nodiscount_age
+ cost_biopsy_nodiscount_age
+ cost_staging_nodiscount_age
+ cost_tx_nodiscount_age
+ cost_eol_nodiscount_age)
s_cost_nodiscount_age, m_cost_nodiscount_age, t_cost_nodiscount_age = outcomes(cost_nodiscount_age)
cost_discount_age = (cost_psa_testing_discount_age
+ cost_biopsy_discount_age
+ cost_staging_discount_age
+ cost_tx_discount_age
+ cost_eol_discount_age)
s_cost_discount_age, m_cost_discount_age, t_cost_discount_age = outcomes(cost_discount_age)
# Generate a mean dataframe
age_matrix = [age, m_cases_age, m_cases_sc_detected_age,
m_cases_post_screening_age, m_overdiagnosis_age, m_deaths_other_age, m_deaths_pca_age,
m_pca_alive_ns, m_healthy_age, m_lyrs_healthy_nodiscount_age,
m_lyrs_healthy_discount_age, m_lyrs_pca_discount_age, m_lyrs_discount_age,
m_qalys_healthy_discount_age, m_qalys_pca_discount_age, m_qalys_discount_age,
m_cost_psa_testing_discount_age, m_cost_biopsy_discount_age, m_cost_staging_discount_age,
m_cost_tx_discount_age, m_cost_eol_discount_age, m_cost_discount_age]
age_columns = ['age', 'pca_cases', 'screen-detected cases',
'post-screening cases', 'overdiagnosis', 'deaths_other', 'deaths_pca',
'pca_alive', 'healthy','lyrs_healthy_nodiscount', 'lyrs_healthy_discount',
'lyrs_pca_discount', 'total_lyrs_discount',
'qalys_healthy_discount', 'qalys_pca_discount', 'total_qalys_discount',
'cost_psa_testing_discount', 'cost_biopsy_discount', 'cost_staging_discount',
'cost_treatment_discount', 'costs_eol_discount', 'total_cost_discount']
age_cohort = pd.DataFrame(age_matrix, index = age_columns).T
t_parameters_age = [year, t_cases_age, t_overdiagnosis_age,
t_deaths_pca_age, t_deaths_other_age,
t_lyrs_healthy_discount_age, t_lyrs_pca_discount_age,
t_lyrs_nodiscount_age, t_lyrs_discount_age, t_qalys_healthy_discount_age,
t_qalys_pca_discount_age, t_qalys_nodiscount_age, t_qalys_discount_age,
t_cost_psa_testing_discount_age, t_cost_psa_testing_discount_age,
t_cost_biopsy_nodiscount_age, t_cost_biopsy_discount_age,
t_cost_staging_nodiscount_age, t_cost_staging_discount_age,
t_cost_tx_nodiscount_age, t_cost_tx_discount_age,
t_cost_eol_nodiscount_age, t_cost_eol_discount_age,
t_cost_nodiscount_age, t_cost_discount_age,
total_n_psa_tests_age, total_n_biopsies_age]
columns_age = ['cohort_age_at_start', 'pca_cases', 'overdiagnosis',
'pca_deaths', 'deaths_other_causes',
'lyrs_healthy_discounted', 'lyrs_pca_discounted',
'lyrs_undiscounted', 'lyrs_discounted','qalys_healthy_discounted',
'qalys_pca_discounted', 'qalys_undiscounted', 'qalys_discounted',
'cost_psa_testing_undiscounted', 'cost_psa_testing_discounted',
'cost_biopsy_undiscounted', 'cost_biopsy_discounted',
'cost_staging_undiscounted', 'cost_staging_discounted',
'cost_treatment_undiscounted', 'cost_treatment_discounted',
'cost_eol_undiscounted', 'cost_eol_discounted',
'costs_undiscounted', 'costs_discounted', 'n_psa_tests', 'n_biopsies']
outcomes_age_psa = pd.DataFrame(t_parameters_age, index = columns_age).T
s_qalys_discount_age_df = pd.DataFrame(s_qalys_discount_age)
s_cost_discount_age_df = pd.DataFrame(s_cost_discount_age)
parameters_age = [s_qalys_discount_age, s_cost_discount_age,
s_deaths_pca_age, s_overdiagnosis_age,
age_cohort, outcomes_age_psa]
for index, parameter in enumerate(parameter_list_age):
parameter = gen_list_outcomes(parameter_list_age[index], parameters_age[index])
#################################################
# Polygenic risk tailored screening from age 55 #
#################################################
# Yearly probability of PCa incidence
smoothed_pca_incidence_prs = psa_function(pca_incidence)
smoothed_pca_incidence_prs[:,10:25] = (smoothed_pca_incidence_prs[:,10:25].T * rr_incidence[year-45,:]).T
smoothed_pca_incidence_prs[:,25:35] = smoothed_pca_incidence_prs[:,25:35] * np.linspace(post_sc_incidence_drop,1,10)
smoothed_pca_incidence_prs = smoothed_pca_incidence_prs[:,year-45:]
# Yearly probability of death from PCa - smoothed entry and exit
smoothed_pca_mortality_prs = psa_function(pca_death_baseline)
smoothed_pca_mortality_prs[:,10:15] = smoothed_pca_mortality_prs[:,10:15] * np.linspace(1,0.79,5)
smoothed_pca_mortality_prs[:,15:] = smoothed_pca_mortality_prs[:,15:] * rr_death_screening[:,15:]
smoothed_pca_mortality_prs = smoothed_pca_mortality_prs[:,year-45:]
# Probability of being screened
p_screened = np.array(uptake_prs * a_risk.loc[year,'p_above_threshold'])
p_ns = np.array((1-uptake_prs) * a_risk.loc[year,'p_above_threshold'])
p_nos = np.array(compliance * (1-a_risk.loc[year,'p_above_threshold']))
p_nos_screened = np.array((1-compliance) * (1-a_risk.loc[year,'p_above_threshold']))
if year < 55:
# Yearly probability of PCa incidence
p_pca_screened = tr_incidence
p_pca_ns = tr_incidence
p_pca_nos = tr_incidence
p_pca_nos_screened = tr_incidence
# Yearly probability of death from PCa
p_pca_death_screened = tr_pca_death_baseline
p_pca_death_ns = tr_pca_death_baseline
p_pca_death_nos = tr_pca_death_baseline
p_pca_death_nos_screened = tr_pca_death_baseline
# Proportion of cancers detected by screening at a localised / advanced stage
psa_stage_adv_sc = psa_function(stage_adv[year-45:])
psa_stage_adv_ns = psa_function(stage_adv[year-45:])
psa_stage_adv_nos_sc = psa_function(stage_adv[year-45:])
psa_stage_adv_nos = psa_function(stage_adv[year-45:])
psa_stage_local_sc = psa_function(stage_local[year-45:])
psa_stage_local_ns = psa_function(stage_local[year-45:])
psa_stage_local_nos_sc = psa_function(stage_local[year-45:])
psa_stage_local_nos = psa_function(stage_local[year-45:])
elif year > 54:
# Yearly probability of PCa incidence
p_pca_screened = smoothed_pca_incidence_prs * a_risk.loc[year, 'rr_high']
p_pca_ns = tr_incidence * a_risk.loc[year,'rr_high']
p_pca_nos = tr_incidence * a_risk.loc[year,'rr_low']
p_pca_nos_screened = smoothed_pca_incidence_prs * a_risk.loc[year,'rr_low']
# Yearly probability of death from PCa
p_pca_death_screened = smoothed_pca_mortality_prs * a_risk.loc[year,'rr_high']
p_pca_death_ns = tr_pca_death_baseline * a_risk.loc[year,'rr_high']
p_pca_death_nos = tr_pca_death_baseline * a_risk.loc[year,'rr_low']
p_pca_death_nos_screened = smoothed_pca_mortality_prs * a_risk.loc[year,'rr_low']
# Proportion of cancers detected by screening at a localised / advanced stage
stage_screened_adv_sc = (stage_adv
* rr_adv_screening
* a_risk.loc[year, 'rr_high'])
psa_stage_adv_sc = stage_screened_adv_sc[:,year-45:]
stage_clinical_adv_ns = stage_adv * a_risk.loc[year, 'rr_high']
psa_stage_adv_ns = psa_function(stage_clinical_adv_ns[year-45:])
stage_screened_adv_nos_sc = (stage_adv
* rr_adv_screening
* a_risk.loc[year, 'rr_low'])
psa_stage_adv_nos_sc = stage_screened_adv_nos_sc[:,year-45:]
stage_clinical_adv_nos = stage_adv * a_risk.loc[year, 'rr_low']
psa_stage_adv_nos = psa_function(stage_clinical_adv_nos[year-45:])
stage_screened_local_sc = 1-stage_screened_adv_sc
psa_stage_local_sc = stage_screened_local_sc[:,year-45:]
stage_clinical_local_ns = 1-stage_clinical_adv_ns
psa_stage_local_ns = psa_function(stage_clinical_local_ns[year-45:])
stage_screened_local_nos_sc = 1-stage_screened_adv_nos_sc
psa_stage_local_nos_sc = stage_screened_local_nos_sc[:, year-45:]
stage_clinical_local_nos = 1-stage_clinical_adv_nos
psa_stage_local_nos = psa_function(stage_clinical_local_nos[year-45:])
#####################
# Year 1 in the model
#####################
age = np.arange(year,90)
length_df = len(age)
length_screen = len(np.arange(year,70)) # number of screening years depending on age cohort starting
# Cohorts, numbers 'healthy', and incident cases
cohort_sc = np.array([np.repeat(pop[year], length_df)] * sims) * p_screened
cohort_ns = np.array([np.repeat(pop[year], length_df)] * sims) * p_ns
cohort_nos = np.array([np.repeat(pop[year], length_df)] * sims) * p_nos
cohort_nos_sc = np.array([np.repeat(pop[year], length_df)] * sims) * p_nos_screened
pca_alive_sc = np.array([np.zeros(length_df)] * sims)
pca_alive_ns = np.array([ | np.zeros(length_df) | numpy.zeros |
"""
RRRobot robot class definition
"""
import pickle
from pathlib import Path
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
from robots.robot import Robot
from utils.robo_math import SymbolicTransformation as st
from utils.plot_utils import TransformationPlotter
class RRRobot(Robot):
"""
RRRobot manipulator Forward Kinematics (FK) and Inverse Kinematics(IK)
calculator class
Attributes:
d (float): Length parameter from TxTz substitution
dq (float): Angle parameter from TxTz substitution
fk_data_path (pathlib.Path): Path to pickle forward kinematics data
Used to speedup FK calculation
ik_data_path (pathlib.Path): Path to pickle inverse kinematics data
Used to speedup FK calculation
ls (tuple): Lengths of links
qs_lim_deg (tuple of tuples): Joint limits in degrees
qs_lim_rad (tuple of tuples): Joint limits in radians
T_base (4x4 array like): Transformation from world frame to the base
T_tool (4x4 array like): Transformation from end-effector frame to the
tool frame
"""
qs_lim_deg = ((-360.0, 360.0), (-360.0, 360.0))
def __init__(self, T_base=None, T_tool=None, lengths=None, save=True):
"""
Prepares all necessary values and loads pickled matrices
Args:
T_base (None, optional): Transformation from the world frame
to the base frame
T_tool (None, optional): Transformation from the end-effector
frame to the tool frame
"""
self.set_transforms(T_base, T_tool)
self.set_lengths(lengths)
self._generate_value_pairs()
self._calculate_limits_radians()
self._save = save
if self._save:
self.fk_data_path = Path("robots/data/rr_forward_kinematics.pkl")
self._precalculate_data()
self._tp = TransformationPlotter()
def set_lengths(self, lengths):
if lengths is None:
self._ls = (0.8, 0.8)
else:
self._ls = lengths
def _generate_value_pairs(self):
"""
Generates name-value tuples for sympy substitution
"""
value_pairs = []
for i in range(len(self._ls)):
value_pairs.append((f"l_{i}", self._ls[i]))
self._value_pairs = value_pairs
def _calculate_limits_radians(self):
"""
Converts joint limits from degrees to radians
"""
self.qs_lim_rad = tuple(
(np.deg2rad(x[0]), np.deg2rad(x[1])) for x in self.qs_lim_deg)
def _precalculate_data(self):
"""
Precalculates and pickles constant matrices
"""
if self._save and self.fk_data_path.is_file():
with open(self.fk_data_path, 'rb') as input:
self._Ts = pickle.load(input)
else:
self._Ts = st("RzTxRzTx",
['q_0', 'l_0', 'q_1', 'l_1'])
self._Ts.substitute(self._value_pairs)
# Save data
if self._save and not self.fk_data_path.is_file():
with open(self.fk_data_path, 'wb') as output:
pickle.dump(self._Ts, output, pickle.HIGHEST_PROTOCOL)
def forward_kinematics(self, q_values, plot=True):
"""
Calculates forward kinematics of the tool pose given values of joints
Args:
q_values (list of float): Values of joints
plot (bool, optional): Flag to plot the result
Returns:
4x4 np.ndarray: Homogeneous tool pose
"""
qs_dict = {}
for i in range(len(q_values)):
qs_dict[sp.symbols(f"q_{i}")] = q_values[i]
self._numeric_frames = []
for frame in self._Ts.frames:
self._numeric_frames.append(frame.evalf(subs=qs_dict))
T = self.T_base * self._numeric_frames[-1] * self.T_tool
if plot:
self._show_fk()
return np.array(T, dtype=np.float)
def _show_fk(self):
"""
Plots current pose of the robot
"""
frames = [self.T_base]
for frame, var in zip(self._numeric_frames, self._Ts.variables):
if var[0] == 'q':
frames.append(self.T_base * frame)
frames.append(self.T_base * self._numeric_frames[-1])
frames.append(frames[-1] * self.T_tool)
self._tp.plot_numeric_frames(frames,
axis_len=self._ls[0] / 8,
margin=2,
center=0,
fixed_scale=True)
def inverse_kinematics(self, T, m=1):
"""
Calculates inverse kinematics joint values qs from pose T
Args:
T (4x4 array like): Homogeneous pose matrix
m (int, optional): Elbow up flag. Should be -1 or 1
k (int, optional): Square root sign flag. Should be -1 or 1
Returns:
np.ndarray: Joint values, corresponding to T
or zeros in case of failure
"""
if abs(m) != 1:
print("[WARNING] m can only be -1 or 1. Defaulting to 1")
m = 1
T = sp.Matrix(T)
T_0 = self.T_base.inv() * T * self.T_tool.inv()
x, y = float(T_0[0, 3]), float(T_0[1, 3])
arccos_numerator = x**2 + y**2 - self._ls[0]**2 - self._ls[1]**2
arccos_denominator = 2.0 * self._ls[0] * self._ls[1]
arccos = arccos_numerator / arccos_denominator
# Check if the given position is reachable
if abs(arccos) > 1:
print("[INFO] The configuration is not reachable")
return np.array([0.0, 0.0])
q_1 = m * np.arccos(arccos)
beta = np.arctan2(self._ls[1] * np.sin(m * q_1),
self._ls[0] + self._ls[1] * np.cos(q_1))
q_0 = | np.arctan2(y, x) | numpy.arctan2 |
"""
Hidden Markov Tree model
"""
from abc import ABCMeta
from collections import namedtuple
import os
import scipy
from config import RES_DIR, CHROM_SIZES
from data_provider import SeqLoader
from hmm.HMMModel import _ContinuousEmission
from hmm.bwiter import bw_iter, IteratorCondition
__author__ = 'eranroz'
import numpy as np
class HMTModel(object):
"""
base model for HMT
see Crouse 1997 and Durand 2013
"""
__metaclass__ = ABCMeta
MIN_STD = 0.1
def __init__(self, state_transition, mean_vars, emission_density=scipy.stats.norm):
"""
Initializes a new HMT model.
@param state_transition: state transition matrix.
with rows - source state, cols - target state.
0 state assumed to be the begin state (pi - distrbution for root of the tree)
@param mean_vars: matrix with rows=num of states and cols =2,
where the first column is mean and second is variance
"""
self.state_transition = state_transition
self.mean_vars = mean_vars
self.emission_density = emission_density
self.emission = _ContinuousEmission(mean_vars, emission_density)
self.min_alpha = None
def num_states(self):
"""
Get number of states in the model
"""
return self.state_transition.shape[0]
def level_emission(self, level):
"""
Emission for level. override it to assign different emissions for different levels
@param level: level where 0 is the root
@return: a emission matrix (indexable object) with rows as states and columns as values for emission
"""
return self.emission
def maximize(self, sequence_tree, ud_output):
"""
Maximization step for in Upward-Downward algorithm (EM)
@param sequence_tree symbol sequence
@param ud_output results of upward downward (scaling version)
"""
self._maximize_emission(sequence_tree, ud_output.state_p)
self.state_transition[0, 1:] = ud_output.state_p[-1]
self.state_transition[1:, 1:] *= ud_output.transition_stat
#normalize
self.state_transition /= np.sum(self.state_transition, 1)[:, None]
if self.min_alpha is not None:
n_states = self.state_transition.shape[0]-1 # minus begin/root state
diagonal_selector = np.eye(n_states, dtype='bool')
self_transitions = self.state_transition[1:, 1:][diagonal_selector]
n_self_transitions = np.maximum(self.min_alpha, self_transitions)
# reduce the diff from the rest of transitions equally
self.state_transition[1:, 1:][~diagonal_selector] -= (n_self_transitions-self_transitions)/(n_states-1)
self.state_transition[1:, 1:][diagonal_selector] = n_self_transitions
print('State transition')
print(self.state_transition)
def _maximize_emission(self, sequence_tree, gammas):
n_states = self.num_states() - 1
n_levels = len(sequence_tree)
means_levels = np.zeros((n_levels, n_states))
vars_levels = np.zeros((n_levels, n_states))
state_norm_levels = np.zeros((n_levels, n_states))
scale_level = 0
for gamma, seq in zip(gammas, sequence_tree):
state_norm = np.sum(gamma, 0)
mu = np.sum(gamma * seq[:, None], 0) / state_norm
sym_min_mu = np.power(seq[:, None] - mu, 2)
std = np.sum(gamma * sym_min_mu, 0) / state_norm
state_norm_levels[scale_level, :] = state_norm
vars_levels[scale_level, :] = | np.sqrt(std) | numpy.sqrt |
###################################################################################
## Main sampler
## Depending on the number of MCMC states defined in the first run.
if __name__ == "__main__":
import nonstat_model_noXs.model_sim as utils
import nonstat_model_noXs.generic_samplers as sampler
import nonstat_model_noXs.priors as priors
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from pickle import load
from pickle import dump
from scipy.linalg import lapack
# Check whether the 'mpi4py' is installed
test_mpi = os.system("python -c 'from mpi4py import *' &> /dev/null")
if test_mpi != 0:
import sys
sys.exit("mpi4py import is failing, aborting...")
# get rank and size
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
thinning = 10; echo_interval = 20; n_updates = 50001
# Filename for storing the intermediate results
input_file='./nonstat_progress_'+str(rank)+'.pkl'
# Load data input
if rank==0:
with open(input_file, 'rb') as f:
Y = load(f)
cen = load(f)
cen_above = load(f)
initial_values = load(f)
sigma_m = load(f)
prop_sigma = load(f)
iter_current = load(f)
phi_trace = load(f)
tau_sqd_trace = load(f)
theta_c_trace = load(f)
beta_loc0_trace = load(f)
beta_loc1_trace = load(f)
beta_scale_trace = load(f)
beta_shape_trace = load(f)
Z_1t_trace = load(f)
R_1t_trace = load(f)
Y_onetime = load(f)
X_onetime = load(f)
X_s_onetime = load(f)
R_onetime = load(f)
Z_onetime = load(f)
f.close()
else:
with open(input_file, 'rb') as f:
Y = load(f)
cen = load(f)
cen_above = load(f)
initial_values = load(f)
sigma_m = load(f)
iter_current = load(f)
Z_1t_trace = load(f)
R_1t_trace = load(f)
Y_onetime = load(f)
X_onetime = load(f)
X_s_onetime = load(f)
R_onetime = load(f)
Z_onetime = load(f)
f.close()
# Bookkeeping
n_s = Y.shape[0]
n_t = Y.shape[1]
if n_t != size:
import sys
sys.exit("Make sure the number of cpus (N) = number of time replicates (n_t), i.e.\n srun -N python nonstat_sampler.py")
wh_to_plot_Xs = n_s*np.array([0.25,0.5,0.75])
wh_to_plot_Xs = wh_to_plot_Xs.astype(int)
# Filename for storing the intermediate results
filename='./nonstat_progress_'+str(rank)+'.pkl'
# Generate multiple independent random streams
random_generator = np.random.RandomState()
# Constants to control adaptation of the Metropolis sampler
c_0 = 10
c_1 = 0.8
offset = 3 # the iteration offset
r_opt_1d = .41
r_opt_2d = .35
eps = 1e-6 # a small number
# Hyper parameters for the prior of the mixing distribution parameters and
hyper_params_phi = np.array([0.5,0.7])
hyper_params_tau_sqd = np.array([0.1,0.1])
hyper_params_theta_c = np.array([0, 20])
hyper_params_theta_gev = 25
# hyper_params_range = np.array([0.5,1.5]) # in case where roughness is not updated
# Load latest values
initial_values = comm.bcast(initial_values,root=0) # Latest values are mostly in initial_values
phi = initial_values['phi']
gamma = initial_values['gamma']
tau_sqd = initial_values['tau_sqd']
prob_below = initial_values['prob_below']
prob_above = initial_values['prob_above']
Dist = initial_values['Dist']
theta_c = initial_values['theta_c']
Design_mat = initial_values['Design_mat']
beta_loc0 = initial_values['beta_loc0']
beta_loc1 = initial_values['beta_loc1']
Time = initial_values['Time']
beta_scale = initial_values['beta_scale']
beta_shape = initial_values['beta_shape']
n_covariates = len(beta_loc0)
if rank == 0:
X = np.empty((n_s,n_t))
X_s = np.empty((n_s,n_t))
Z = np.empty((n_s,n_t))
R = np.empty((n_t,))
# Eigendecomposition of the correlation matrix
tmp_vec = np.ones(n_s)
Cor = utils.corr_fn(Dist, theta_c)
# eig_Cor = np.linalg.eigh(Cor) #For symmetric matrices
# V = eig_Cor[1]
# d = eig_Cor[0]
cholesky_inv = lapack.dposv(Cor,tmp_vec)
# For current values of phi and gamma, obtain grids of survival probs and densities
grid = utils.density_interp_grid(phi, gamma, grid_size=800)
xp = grid[0]; den_p = grid[1]; surv_p = grid[2]
thresh_X = utils.qRW_me_interp(prob_below, xp, surv_p, tau_sqd, phi, gamma)
thresh_X_above = utils.qRW_me_interp(prob_above, xp, surv_p, tau_sqd, phi, gamma)
# Marginal GEV parameters: per location x time
loc0 = Design_mat @beta_loc0
loc1 = Design_mat @beta_loc1
Loc = np.tile(loc0, n_t) + np.tile(loc1, n_t)*np.repeat(Time,n_s)
Loc = Loc.reshape((n_s,n_t),order='F')
scale = Design_mat @beta_scale
Scale = np.tile(scale, n_t)
Scale = Scale.reshape((n_s,n_t),order='F')
Design_mat1 = np.c_[np.repeat(1,n_s), np.log(Design_mat[:,1])]
shape = Design_mat1 @beta_shape
Shape = np.tile(shape, n_t)
Shape = Shape.reshape((n_s,n_t),order='F')
# Initial trace objects
Z_1t_accept = np.zeros(n_s)
R_accept = 0
if rank == 0:
print("Number of time replicates = %d"%size)
theta_c_trace_within_thinning = np.empty((2,thinning)); theta_c_trace_within_thinning[:] = np.nan
beta_loc0_trace_within_thinning = np.empty((n_covariates,thinning)); beta_loc0_trace_within_thinning[:] = np.nan
beta_loc1_trace_within_thinning = np.empty((n_covariates,thinning)); beta_loc1_trace_within_thinning[:] = np.nan
beta_scale_trace_within_thinning = np.empty((n_covariates,thinning)); beta_scale_trace_within_thinning[:] = np.nan
beta_shape_trace_within_thinning = np.empty((n_covariates,thinning)); beta_shape_trace_within_thinning[:] = np.nan
phi_accept = 0
tau_sqd_accept = 0
theta_c_accept = 0
beta_loc0_accept = 0
beta_loc1_accept = 0
beta_scale_accept = 0
beta_shape_accept = 0
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# --------------------------- Start Metropolis Updates ------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
for iter in np.arange(iter_current+1,n_updates):
# Update X
# print(str(rank)+" "+str(iter)+" Gathered? "+str(np.where(~cen)))
X_onetime = utils.X_update(Y_onetime, cen[:,rank], cen_above[:,rank], xp, surv_p, tau_sqd, phi, gamma, Loc[:,rank], Scale[:,rank], Shape[:,rank])
# Update Z
tmp = utils.Z_update_onetime(Y_onetime, X_onetime, R_onetime, Z_onetime, cen[:,rank], cen_above[:,rank], prob_below, prob_above,
tau_sqd, phi, gamma, Loc[:,rank], Scale[:,rank], Shape[:,rank], xp, surv_p, den_p,
thresh_X, thresh_X_above, Cor, cholesky_inv, sigma_m['Z_onetime'], random_generator)
Z_1t_accept = Z_1t_accept + tmp
# Update R
Metr_R = sampler.static_metr(Y_onetime, R_onetime, utils.Rt_update_mixture_me_likelihood,
priors.R_prior, gamma, 2,
random_generator,
np.nan, sigma_m['R_1t'], False,
X_onetime, Z_onetime, cen[:,rank], cen_above[:,rank],
prob_below, prob_above, Loc[:,rank], Scale[:,rank], Shape[:,rank], tau_sqd, phi, gamma,
xp, surv_p, den_p, thresh_X, thresh_X_above)
R_accept = R_accept + Metr_R['acc_prob']
R_onetime = Metr_R['trace'][0,1]
X_s_onetime = (R_onetime**phi)*utils.norm_to_Pareto(Z_onetime)
# *** Gather items ***
X_s_recv = comm.gather(X_s_onetime,root=0)
X_recv = comm.gather(X_onetime, root=0)
Z_recv = comm.gather(Z_onetime, root=0)
R_recv = comm.gather(R_onetime, root=0)
if rank==0:
X_s[:] = np.vstack(X_s_recv).T
X[:] = np.vstack(X_recv).T
# Check whether X is negative
if np.any(X[~cen & ~cen_above]<0):
sys.exit("X value abnormalty "+str(phi)+" "+str(tau_sqd))
Z[:] = np.vstack(Z_recv).T
R[:] = R_recv
index_within = (iter-1)%thinning
# print('beta_shape_accept=',beta_shape_accept, ', iter=', iter)
# Update phi
Metr_phi = sampler.static_metr(Y, phi, utils.phi_update_mixture_me_likelihood, priors.interval_unif,
hyper_params_phi, 2,
random_generator,
np.nan, sigma_m['phi'], False,
R, Z, cen, cen_above,
prob_below, prob_above, Loc, Scale, Shape, tau_sqd, gamma)
phi_accept = phi_accept + Metr_phi['acc_prob']
phi = Metr_phi['trace'][0,1]
# Update gamma (TBD)
#
grid = utils.density_interp_grid(phi, gamma, grid_size=800)
xp = grid[0]; den_p = grid[1]; surv_p = grid[2]
X_s = (R**phi)*utils.norm_to_Pareto(Z)
# Update tau_sqd
Metr_tau_sqd = sampler.static_metr(Y, tau_sqd, utils.tau_update_mixture_me_likelihood, priors.invGamma_prior,
hyper_params_tau_sqd, 2,
random_generator,
np.nan, sigma_m['tau_sqd'], False,
X_s, cen, cen_above,
prob_below, prob_above, Loc, Scale, Shape,
phi, gamma, xp, surv_p, den_p)
tau_sqd_accept = tau_sqd_accept + Metr_tau_sqd['acc_prob']
tau_sqd = Metr_tau_sqd['trace'][0,1]
thresh_X = utils.qRW_me_interp(prob_below, xp, surv_p, tau_sqd, phi, gamma)
thresh_X_above = utils.qRW_me_interp(prob_above, xp, surv_p, tau_sqd, phi, gamma)
# Update theta_c
Metr_theta_c = sampler.static_metr(Z, theta_c, utils.theta_c_update_mixture_me_likelihood,
priors.interval_unif_multi, hyper_params_theta_c, 2,
random_generator,
prop_sigma['theta_c'], sigma_m['theta_c'], False,
Dist)
theta_c_accept = theta_c_accept + Metr_theta_c['acc_prob']
theta_c = Metr_theta_c['trace'][:,1]
theta_c_trace_within_thinning[:,index_within] = theta_c
if Metr_theta_c['acc_prob']>0:
Cor = utils.corr_fn(Dist, theta_c)
# eig_Cor = np.linalg.eigh(Cor) #For symmetric matrices
# V = eig_Cor[1]
# d = eig_Cor[0]
cholesky_inv = lapack.dposv(Cor,tmp_vec)
# Update beta_loc0
Metr_beta_loc0 = sampler.static_metr(Design_mat, beta_loc0, utils.loc0_gev_update_mixture_me_likelihood,
priors.unif_prior, hyper_params_theta_gev, 2,
random_generator,
prop_sigma['beta_loc0'], sigma_m['beta_loc0'], False,
Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, loc1, Scale, Shape, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above)
beta_loc0_accept = beta_loc0_accept + Metr_beta_loc0['acc_prob']
beta_loc0 = Metr_beta_loc0['trace'][:,1]
beta_loc0_trace_within_thinning[:,index_within] = beta_loc0
loc0 = Design_mat @beta_loc0
# Update beta_loc1
Metr_beta_loc1 = sampler.static_metr(Design_mat, beta_loc1, utils.loc1_gev_update_mixture_me_likelihood,
priors.unif_prior, hyper_params_theta_gev, 2,
random_generator,
prop_sigma['beta_loc1'], sigma_m['beta_loc1'], False,
Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, loc0, Scale, Shape, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above)
beta_loc1_accept = beta_loc1_accept + Metr_beta_loc1['acc_prob']
beta_loc1 = Metr_beta_loc1['trace'][:,1]
beta_loc1_trace_within_thinning[:,index_within] = beta_loc1
loc1 = Design_mat @beta_loc1
Loc = np.tile(loc0, n_t) + np.tile(loc1, n_t)*np.repeat(Time,n_s)
Loc = Loc.reshape((n_s,n_t),order='F')
# Update beta_scale
Metr_beta_scale = sampler.static_metr(Design_mat, beta_scale, utils.scale_gev_update_mixture_me_likelihood,
priors.unif_prior, hyper_params_theta_gev, 2,
random_generator,
prop_sigma['beta_scale'], sigma_m['beta_scale'], False,
Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, Loc, Shape, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above)
beta_scale_accept = beta_scale_accept + Metr_beta_scale['acc_prob']
beta_scale = Metr_beta_scale['trace'][:,1]
beta_scale_trace_within_thinning[:,index_within] = beta_scale
scale = Design_mat @beta_scale
Scale = np.tile(scale, n_t)
Scale = Scale.reshape((n_s,n_t),order='F')
# # Update beta_shape
# Metr_beta_shape = sampler.static_metr(Design_mat, beta_shape, utils.shape_gev_update_mixture_me_likelihood,
# priors.unif_prior, hyper_params_theta_gev, 2,
# random_generator,
# prop_sigma['beta_shape'], sigma_m['beta_shape'], False,
# Y, X_s, cen, cen_above, prob_below, prob_above,
# tau_sqd, phi, gamma, Loc, Scale, Time, xp, surv_p, den_p,
# thresh_X, thresh_X_above)
# beta_shape_accept = beta_shape_accept + Metr_beta_shape['acc_prob']
# beta_shape = Metr_beta_shape['trace'][:,1]
# beta_shape_trace_within_thinning[:,index_within] = beta_shape
# shape = Design_mat1 @beta_shape
# Shape = np.tile(shape, n_t)
# Shape = Shape.reshape((n_s,n_t),order='F')
# cen[:] = utils.which_censored(Y, Loc, Scale, Shape, prob_below)
# cen_above[:] = ~utils.which_censored(Y, Loc, Scale, Shape, prob_above)
# *** Broadcast items ***
phi = comm.bcast(phi,root=0)
xp = comm.bcast(xp,root=0)
den_p = comm.bcast(den_p,root=0)
surv_p = comm.bcast(surv_p,root=0)
tau_sqd = comm.bcast(tau_sqd,root=0)
thresh_X = comm.bcast(thresh_X,root=0)
thresh_X_above = comm.bcast(thresh_X_above,root=0)
theta_c = comm.bcast(theta_c,root=0)
# V = comm.bcast(V,root=0)
# d = comm.bcast(d,root=0)
Cor = comm.bcast(Cor,root=0)
cholesky_inv = comm.bcast(cholesky_inv,root=0)
Loc = comm.bcast(Loc,root=0)
Scale = comm.bcast(Scale,root=0)
Shape = comm.bcast(Shape,root=0)
# cen = comm.bcast(cen,root=0)
# cen_above = comm.bcast(cen_above,root=0)
# ----------------------------------------------------------------------------------------
# --------------------------- Summarize every 'thinning' steps ---------------------------
# ----------------------------------------------------------------------------------------
if (iter % thinning) == 0:
index = np.int(iter/thinning)
# Fill in trace objects
Z_1t_trace[:,index] = Z_onetime
R_1t_trace[index] = R_onetime
if rank == 0:
phi_trace[index] = phi
tau_sqd_trace[index] = tau_sqd
theta_c_trace[:,index] = theta_c
beta_loc0_trace[:,index] = beta_loc0
beta_loc1_trace[:,index] = beta_loc1
beta_scale_trace[:,index] = beta_scale
beta_shape_trace[:,index] = beta_shape
# Adapt via Shaby and Wells (2010)
gamma2 = 1 / (index + offset)**(c_1)
gamma1 = c_0*gamma2
sigma_m['Z_onetime'] = np.exp(np.log(sigma_m['Z_onetime']) + gamma1*(Z_1t_accept/thinning - r_opt_1d))
Z_1t_accept[:] = 0
sigma_m['R_1t'] = np.exp(np.log(sigma_m['R_1t']) + gamma1*(R_accept/thinning - r_opt_1d))
R_accept = 0
if rank == 0:
sigma_m['phi'] = np.exp(np.log(sigma_m['phi']) + gamma1*(phi_accept/thinning - r_opt_1d))
phi_accept = 0
sigma_m['tau_sqd'] = np.exp(np.log(sigma_m['tau_sqd']) + gamma1*(tau_sqd_accept/thinning - r_opt_1d))
tau_sqd_accept = 0
sigma_m['theta_c'] = np.exp(np.log(sigma_m['theta_c']) + gamma1*(theta_c_accept/thinning - r_opt_2d))
theta_c_accept = 0
prop_sigma['theta_c'] = prop_sigma['theta_c'] + gamma2*(np.cov(theta_c_trace_within_thinning) - prop_sigma['theta_c'])
check_chol_cont = True
while check_chol_cont:
try:
# Initialize prop_C
np.linalg.cholesky(prop_sigma['theta_c'])
check_chol_cont = False
except np.linalg.LinAlgError:
prop_sigma['theta_c'] = prop_sigma['theta_c'] + eps*np.eye(2)
print("Oops. Proposal covariance matrix is now:\n")
print(prop_sigma['theta_c'])
sigma_m['beta_loc0'] = np.exp(np.log(sigma_m['beta_loc0']) + gamma1*(beta_loc0_accept/thinning - r_opt_2d))
beta_loc0_accept = 0
prop_sigma['beta_loc0'] = prop_sigma['beta_loc0'] + gamma2*(np.cov(beta_loc0_trace_within_thinning) - prop_sigma['beta_loc0'])
check_chol_cont = True
while check_chol_cont:
try:
# Initialize prop_C
np.linalg.cholesky(prop_sigma['beta_loc0'])
check_chol_cont = False
except np.linalg.LinAlgError:
prop_sigma['beta_loc0'] = prop_sigma['beta_loc0'] + eps*np.eye(n_covariates)
print("Oops. Proposal covariance matrix is now:\n")
print(prop_sigma['beta_loc0'])
sigma_m['beta_loc1'] = np.exp( | np.log(sigma_m['beta_loc1']) | numpy.log |
import pytest
import numpy as np
import numpy.testing as npt
import scipy.stats as st
from scipy.special import expit
from scipy import linalg
import numpy.random as nr
import theano
import pymc3 as pm
from pymc3.distributions.distribution import (draw_values,
_DrawValuesContext,
_DrawValuesContextBlocker)
from .helpers import SeededTest
from .test_distributions import (
build_model, Domain, product, R, Rplus, Rplusbig, Runif, Rplusdunif,
Unit, Nat, NatSmall, I, Simplex, Vector, PdMatrix,
PdMatrixChol, PdMatrixCholUpper, RealMatrix, RandomPdMatrix
)
def pymc3_random(dist, paramdomains, ref_rand, valuedomain=Domain([0]),
size=10000, alpha=0.05, fails=10, extra_args=None,
model_args=None):
if model_args is None:
model_args = {}
model = build_model(dist, valuedomain, paramdomains, extra_args)
domains = paramdomains.copy()
for pt in product(domains, n_samples=100):
pt = pm.Point(pt, model=model)
pt.update(model_args)
p = alpha
# Allow KS test to fail (i.e., the samples be different)
# a certain number of times. Crude, but necessary.
f = fails
while p <= alpha and f > 0:
s0 = model.named_vars['value'].random(size=size, point=pt)
s1 = ref_rand(size=size, **pt)
_, p = st.ks_2samp(np.atleast_1d(s0).flatten(),
np.atleast_1d(s1).flatten())
f -= 1
assert p > alpha, str(pt)
def pymc3_random_discrete(dist, paramdomains,
valuedomain=Domain([0]), ref_rand=None,
size=100000, alpha=0.05, fails=20):
model = build_model(dist, valuedomain, paramdomains)
domains = paramdomains.copy()
for pt in product(domains, n_samples=100):
pt = pm.Point(pt, model=model)
p = alpha
# Allow Chisq test to fail (i.e., the samples be different)
# a certain number of times.
f = fails
while p <= alpha and f > 0:
o = model.named_vars['value'].random(size=size, point=pt)
e = ref_rand(size=size, **pt)
o = np.atleast_1d(o).flatten()
e = np.atleast_1d(e).flatten()
observed = dict(zip(*np.unique(o, return_counts=True)))
expected = dict(zip(*np.unique(e, return_counts=True)))
for e in expected.keys():
expected[e] = (observed.get(e, 0), expected[e])
k = np.array([v for v in expected.values()])
if np.all(k[:, 0] == k[:, 1]):
p = 1.
else:
_, p = st.chisquare(k[:, 0], k[:, 1])
f -= 1
assert p > alpha, str(pt)
class TestDrawValues(SeededTest):
def test_draw_scalar_parameters(self):
with pm.Model():
y = pm.Normal('y1', mu=0., sigma=1.)
mu, tau = draw_values([y.distribution.mu, y.distribution.tau])
npt.assert_almost_equal(mu, 0)
npt.assert_almost_equal(tau, 1)
def test_draw_dependencies(self):
with pm.Model():
x = pm.Normal('x', mu=0., sigma=1.)
exp_x = pm.Deterministic('exp_x', pm.math.exp(x))
x, exp_x = draw_values([x, exp_x])
npt.assert_almost_equal(np.exp(x), exp_x)
def test_draw_order(self):
with pm.Model():
x = pm.Normal('x', mu=0., sigma=1.)
exp_x = pm.Deterministic('exp_x', pm.math.exp(x))
# Need to draw x before drawing log_x
exp_x, x = draw_values([exp_x, x])
npt.assert_almost_equal(np.exp(x), exp_x)
def test_draw_point_replacement(self):
with pm.Model():
mu = pm.Normal('mu', mu=0., tau=1e-3)
sigma = pm.Gamma('sigma', alpha=1., beta=1., transform=None)
y = pm.Normal('y', mu=mu, sigma=sigma)
mu2, tau2 = draw_values([y.distribution.mu, y.distribution.tau],
point={'mu': 5., 'sigma': 2.})
npt.assert_almost_equal(mu2, 5)
npt.assert_almost_equal(tau2, 1 / 2.**2)
def test_random_sample_returns_nd_array(self):
with pm.Model():
mu = pm.Normal('mu', mu=0., tau=1e-3)
sigma = pm.Gamma('sigma', alpha=1., beta=1., transform=None)
y = pm.Normal('y', mu=mu, sigma=sigma)
mu, tau = draw_values([y.distribution.mu, y.distribution.tau])
assert isinstance(mu, np.ndarray)
assert isinstance(tau, np.ndarray)
class TestDrawValuesContext:
def test_normal_context(self):
with _DrawValuesContext() as context0:
assert context0.parent is None
context0.drawn_vars['root_test'] = 1
with _DrawValuesContext() as context1:
assert id(context1.drawn_vars) == id(context0.drawn_vars)
assert context1.parent == context0
with _DrawValuesContext() as context2:
assert id(context2.drawn_vars) == id(context0.drawn_vars)
assert context2.parent == context1
context2.drawn_vars['leaf_test'] = 2
assert context1.drawn_vars['leaf_test'] == 2
context1.drawn_vars['root_test'] = 3
assert context0.drawn_vars['root_test'] == 3
assert context0.drawn_vars['leaf_test'] == 2
def test_blocking_context(self):
with _DrawValuesContext() as context0:
assert context0.parent is None
context0.drawn_vars['root_test'] = 1
with _DrawValuesContext() as context1:
assert id(context1.drawn_vars) == id(context0.drawn_vars)
assert context1.parent == context0
with _DrawValuesContextBlocker() as blocker:
assert id(blocker.drawn_vars) != id(context0.drawn_vars)
assert blocker.parent is None
blocker.drawn_vars['root_test'] = 2
with _DrawValuesContext() as context2:
assert id(context2.drawn_vars) == id(blocker.drawn_vars)
assert context2.parent == blocker
context2.drawn_vars['root_test'] = 3
context2.drawn_vars['leaf_test'] = 4
assert blocker.drawn_vars['root_test'] == 3
assert 'leaf_test' not in context1.drawn_vars
assert context0.drawn_vars['root_test'] == 1
class BaseTestCases:
class BaseTestCase(SeededTest):
shape = 5
def setup_method(self, *args, **kwargs):
super().setup_method(*args, **kwargs)
self.model = pm.Model()
def get_random_variable(self, shape, with_vector_params=False, name=None):
if with_vector_params:
params = {key: value * np.ones(self.shape, dtype=np.dtype(type(value))) for
key, value in self.params.items()}
else:
params = self.params
if name is None:
name = self.distribution.__name__
with self.model:
if shape is None:
return self.distribution(name, transform=None, **params)
else:
return self.distribution(name, shape=shape, transform=None, **params)
@staticmethod
def sample_random_variable(random_variable, size):
try:
return random_variable.random(size=size)
except AttributeError:
return random_variable.distribution.random(size=size)
@pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)
def test_scalar_parameter_shape(self, size):
rv = self.get_random_variable(None)
if size is None:
expected = 1,
else:
expected = np.atleast_1d(size).tolist()
actual = np.atleast_1d(self.sample_random_variable(rv, size)).shape
assert tuple(expected) == actual
@pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)
def test_scalar_shape(self, size):
shape = 10
rv = self.get_random_variable(shape)
if size is None:
expected = []
else:
expected = np.atleast_1d(size).tolist()
expected.append(shape)
actual = np.atleast_1d(self.sample_random_variable(rv, size)).shape
assert tuple(expected) == actual
@pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)
def test_parameters_1d_shape(self, size):
rv = self.get_random_variable(self.shape, with_vector_params=True)
if size is None:
expected = []
else:
expected = np.atleast_1d(size).tolist()
expected.append(self.shape)
actual = self.sample_random_variable(rv, size).shape
assert tuple(expected) == actual
@pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)
def test_broadcast_shape(self, size):
broadcast_shape = (2 * self.shape, self.shape)
rv = self.get_random_variable(broadcast_shape, with_vector_params=True)
if size is None:
expected = []
else:
expected = np.atleast_1d(size).tolist()
expected.extend(broadcast_shape)
actual = np.atleast_1d(self.sample_random_variable(rv, size)).shape
assert tuple(expected) == actual
@pytest.mark.parametrize('shape', [(), (1,), (1, 1), (1, 2), (10, 10, 1), (10, 10, 2)], ids=str)
def test_different_shapes_and_sample_sizes(self, shape):
prefix = self.distribution.__name__
rv = self.get_random_variable(shape, name='%s_%s' % (prefix, shape))
for size in (None, 1, 5, (4, 5)):
if size is None:
s = []
else:
try:
s = list(size)
except TypeError:
s = [size]
if s == [1]:
s = []
if shape not in ((), (1,)):
s.extend(shape)
e = tuple(s)
a = self.sample_random_variable(rv, size).shape
assert e == a
class TestNormal(BaseTestCases.BaseTestCase):
distribution = pm.Normal
params = {'mu': 0., 'tau': 1.}
class TestTruncatedNormal(BaseTestCases.BaseTestCase):
distribution = pm.TruncatedNormal
params = {'mu': 0., 'tau': 1., 'lower':-0.5, 'upper':0.5}
class TestSkewNormal(BaseTestCases.BaseTestCase):
distribution = pm.SkewNormal
params = {'mu': 0., 'sigma': 1., 'alpha': 5.}
class TestHalfNormal(BaseTestCases.BaseTestCase):
distribution = pm.HalfNormal
params = {'tau': 1.}
class TestUniform(BaseTestCases.BaseTestCase):
distribution = pm.Uniform
params = {'lower': 0., 'upper': 1.}
class TestTriangular(BaseTestCases.BaseTestCase):
distribution = pm.Triangular
params = {'c': 0.5, 'lower': 0., 'upper': 1.}
class TestWald(BaseTestCases.BaseTestCase):
distribution = pm.Wald
params = {'mu': 1., 'lam': 1., 'alpha': 0.}
class TestBeta(BaseTestCases.BaseTestCase):
distribution = pm.Beta
params = {'alpha': 1., 'beta': 1.}
class TestKumaraswamy(BaseTestCases.BaseTestCase):
distribution = pm.Kumaraswamy
params = {'a': 1., 'b': 1.}
class TestExponential(BaseTestCases.BaseTestCase):
distribution = pm.Exponential
params = {'lam': 1.}
class TestLaplace(BaseTestCases.BaseTestCase):
distribution = pm.Laplace
params = {'mu': 1., 'b': 1.}
class TestLognormal(BaseTestCases.BaseTestCase):
distribution = pm.Lognormal
params = {'mu': 1., 'tau': 1.}
class TestStudentT(BaseTestCases.BaseTestCase):
distribution = pm.StudentT
params = {'nu': 5., 'mu': 0., 'lam': 1.}
class TestPareto(BaseTestCases.BaseTestCase):
distribution = pm.Pareto
params = {'alpha': 0.5, 'm': 1.}
class TestCauchy(BaseTestCases.BaseTestCase):
distribution = pm.Cauchy
params = {'alpha': 1., 'beta': 1.}
class TestHalfCauchy(BaseTestCases.BaseTestCase):
distribution = pm.HalfCauchy
params = {'beta': 1.}
class TestGamma(BaseTestCases.BaseTestCase):
distribution = pm.Gamma
params = {'alpha': 1., 'beta': 1.}
class TestInverseGamma(BaseTestCases.BaseTestCase):
distribution = pm.InverseGamma
params = {'alpha': 0.5, 'beta': 0.5}
class TestChiSquared(BaseTestCases.BaseTestCase):
distribution = pm.ChiSquared
params = {'nu': 2.}
class TestWeibull(BaseTestCases.BaseTestCase):
distribution = pm.Weibull
params = {'alpha': 1., 'beta': 1.}
class TestExGaussian(BaseTestCases.BaseTestCase):
distribution = pm.ExGaussian
params = {'mu': 0., 'sigma': 1., 'nu': 1.}
class TestVonMises(BaseTestCases.BaseTestCase):
distribution = pm.VonMises
params = {'mu': 0., 'kappa': 1.}
class TestGumbel(BaseTestCases.BaseTestCase):
distribution = pm.Gumbel
params = {'mu': 0., 'beta': 1.}
class TestLogistic(BaseTestCases.BaseTestCase):
distribution = pm.Logistic
params = {'mu': 0., 's': 1.}
class TestLogitNormal(BaseTestCases.BaseTestCase):
distribution = pm.LogitNormal
params = {'mu': 0., 'sigma': 1.}
class TestBinomial(BaseTestCases.BaseTestCase):
distribution = pm.Binomial
params = {'n': 5, 'p': 0.5}
class TestBetaBinomial(BaseTestCases.BaseTestCase):
distribution = pm.BetaBinomial
params = {'n': 5, 'alpha': 1., 'beta': 1.}
class TestBernoulli(BaseTestCases.BaseTestCase):
distribution = pm.Bernoulli
params = {'p': 0.5}
class TestDiscreteWeibull(BaseTestCases.BaseTestCase):
distribution = pm.DiscreteWeibull
params = {'q': 0.25, 'beta': 2.}
class TestPoisson(BaseTestCases.BaseTestCase):
distribution = pm.Poisson
params = {'mu': 1.}
class TestNegativeBinomial(BaseTestCases.BaseTestCase):
distribution = pm.NegativeBinomial
params = {'mu': 1., 'alpha': 1.}
class TestConstant(BaseTestCases.BaseTestCase):
distribution = pm.Constant
params = {'c': 3}
class TestZeroInflatedPoisson(BaseTestCases.BaseTestCase):
distribution = pm.ZeroInflatedPoisson
params = {'theta': 1., 'psi': 0.3}
class TestZeroInflatedNegativeBinomial(BaseTestCases.BaseTestCase):
distribution = pm.ZeroInflatedNegativeBinomial
params = {'mu': 1., 'alpha': 1., 'psi': 0.3}
class TestZeroInflatedBinomial(BaseTestCases.BaseTestCase):
distribution = pm.ZeroInflatedBinomial
params = {'n': 10, 'p': 0.6, 'psi': 0.3}
class TestDiscreteUniform(BaseTestCases.BaseTestCase):
distribution = pm.DiscreteUniform
params = {'lower': 0., 'upper': 10.}
class TestGeometric(BaseTestCases.BaseTestCase):
distribution = pm.Geometric
params = {'p': 0.5}
class TestCategorical(BaseTestCases.BaseTestCase):
distribution = pm.Categorical
params = {'p': np.ones(BaseTestCases.BaseTestCase.shape)}
def get_random_variable(self, shape, with_vector_params=False, **kwargs): # don't transform categories
return super().get_random_variable(shape, with_vector_params=False, **kwargs)
def test_probability_vector_shape(self):
"""Check that if a 2d array of probabilities are passed to categorical correct shape is returned"""
p = np.ones((10, 5))
assert pm.Categorical.dist(p=p).random().shape == (10,)
class TestScalarParameterSamples(SeededTest):
def test_bounded(self):
# A bit crude...
BoundedNormal = pm.Bound(pm.Normal, upper=0)
def ref_rand(size, tau):
return -st.halfnorm.rvs(size=size, loc=0, scale=tau ** -0.5)
pymc3_random(BoundedNormal, {'tau': Rplus}, ref_rand=ref_rand)
def test_uniform(self):
def ref_rand(size, lower, upper):
return st.uniform.rvs(size=size, loc=lower, scale=upper - lower)
pymc3_random(pm.Uniform, {'lower': -Rplus, 'upper': Rplus}, ref_rand=ref_rand)
def test_normal(self):
def ref_rand(size, mu, sigma):
return st.norm.rvs(size=size, loc=mu, scale=sigma)
pymc3_random(pm.Normal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)
def test_truncated_normal(self):
def ref_rand(size, mu, sigma, lower, upper):
return st.truncnorm.rvs((lower-mu)/sigma, (upper-mu)/sigma, size=size, loc=mu, scale=sigma)
pymc3_random(pm.TruncatedNormal, {'mu': R, 'sigma': Rplusbig, 'lower':-Rplusbig, 'upper':Rplusbig},
ref_rand=ref_rand)
def test_skew_normal(self):
def ref_rand(size, alpha, mu, sigma):
return st.skewnorm.rvs(size=size, a=alpha, loc=mu, scale=sigma)
pymc3_random(pm.SkewNormal, {'mu': R, 'sigma': Rplus, 'alpha': R}, ref_rand=ref_rand)
def test_half_normal(self):
def ref_rand(size, tau):
return st.halfnorm.rvs(size=size, loc=0, scale=tau ** -0.5)
pymc3_random(pm.HalfNormal, {'tau': Rplus}, ref_rand=ref_rand)
def test_wald(self):
# Cannot do anything too exciting as scipy wald is a
# location-scale model of the *standard* wald with mu=1 and lam=1
def ref_rand(size, mu, lam, alpha):
return st.wald.rvs(size=size, loc=alpha)
pymc3_random(pm.Wald,
{'mu': Domain([1., 1., 1.]), 'lam': Domain(
[1., 1., 1.]), 'alpha': Rplus},
ref_rand=ref_rand)
def test_beta(self):
def ref_rand(size, alpha, beta):
return st.beta.rvs(a=alpha, b=beta, size=size)
pymc3_random(pm.Beta, {'alpha': Rplus, 'beta': Rplus}, ref_rand=ref_rand)
def test_exponential(self):
def ref_rand(size, lam):
return nr.exponential(scale=1. / lam, size=size)
pymc3_random(pm.Exponential, {'lam': Rplus}, ref_rand=ref_rand)
def test_laplace(self):
def ref_rand(size, mu, b):
return st.laplace.rvs(mu, b, size=size)
pymc3_random(pm.Laplace, {'mu': R, 'b': Rplus}, ref_rand=ref_rand)
def test_lognormal(self):
def ref_rand(size, mu, tau):
return np.exp(mu + (tau ** -0.5) * st.norm.rvs(loc=0., scale=1., size=size))
pymc3_random(pm.Lognormal, {'mu': R, 'tau': Rplusbig}, ref_rand=ref_rand)
def test_student_t(self):
def ref_rand(size, nu, mu, lam):
return st.t.rvs(nu, mu, lam**-.5, size=size)
pymc3_random(pm.StudentT, {'nu': Rplus, 'mu': R, 'lam': Rplus}, ref_rand=ref_rand)
def test_cauchy(self):
def ref_rand(size, alpha, beta):
return st.cauchy.rvs(alpha, beta, size=size)
pymc3_random(pm.Cauchy, {'alpha': R, 'beta': Rplusbig}, ref_rand=ref_rand)
def test_half_cauchy(self):
def ref_rand(size, beta):
return st.halfcauchy.rvs(scale=beta, size=size)
pymc3_random(pm.HalfCauchy, {'beta': Rplusbig}, ref_rand=ref_rand)
def test_gamma_alpha_beta(self):
def ref_rand(size, alpha, beta):
return st.gamma.rvs(alpha, scale=1. / beta, size=size)
pymc3_random(pm.Gamma, {'alpha': Rplusbig, 'beta': Rplusbig}, ref_rand=ref_rand)
def test_gamma_mu_sigma(self):
def ref_rand(size, mu, sigma):
return st.gamma.rvs(mu**2 / sigma**2, scale=sigma ** 2 / mu, size=size)
pymc3_random(pm.Gamma, {'mu': Rplusbig, 'sigma': Rplusbig}, ref_rand=ref_rand)
def test_inverse_gamma(self):
def ref_rand(size, alpha, beta):
return st.invgamma.rvs(a=alpha, scale=beta, size=size)
pymc3_random(pm.InverseGamma, {'alpha': Rplus, 'beta': Rplus}, ref_rand=ref_rand)
def test_pareto(self):
def ref_rand(size, alpha, m):
return st.pareto.rvs(alpha, scale=m, size=size)
pymc3_random(pm.Pareto, {'alpha': Rplusbig, 'm': Rplusbig}, ref_rand=ref_rand)
def test_ex_gaussian(self):
def ref_rand(size, mu, sigma, nu):
return nr.normal(mu, sigma, size=size) + nr.exponential(scale=nu, size=size)
pymc3_random(pm.ExGaussian, {'mu': R, 'sigma': Rplus, 'nu': Rplus}, ref_rand=ref_rand)
def test_vonmises(self):
def ref_rand(size, mu, kappa):
return st.vonmises.rvs(size=size, loc=mu, kappa=kappa)
pymc3_random(pm.VonMises, {'mu': R, 'kappa': Rplus}, ref_rand=ref_rand)
def test_triangular(self):
def ref_rand(size, lower, upper, c):
scale = upper - lower
c_ = (c - lower) / scale
return st.triang.rvs(size=size, loc=lower, scale=scale, c=c_)
pymc3_random(pm.Triangular, {'lower': Runif, 'upper': Runif + 3, 'c': Runif + 1}, ref_rand=ref_rand)
def test_flat(self):
with pm.Model():
f = pm.Flat('f')
with pytest.raises(ValueError):
f.random(1)
def test_half_flat(self):
with pm.Model():
f = pm.HalfFlat('f')
with pytest.raises(ValueError):
f.random(1)
def test_binomial(self):
pymc3_random_discrete(pm.Binomial, {'n': Nat, 'p': Unit}, ref_rand=st.binom.rvs)
def test_beta_binomial(self):
pymc3_random_discrete(pm.BetaBinomial, {'n': Nat, 'alpha': Rplus, 'beta': Rplus},
ref_rand=self._beta_bin)
def _beta_bin(self, n, alpha, beta, size=None):
return st.binom.rvs(n, st.beta.rvs(a=alpha, b=beta, size=size))
def test_bernoulli(self):
pymc3_random_discrete(pm.Bernoulli, {'p': Unit},
ref_rand=lambda size, p=None: st.bernoulli.rvs(p, size=size))
def test_poisson(self):
pymc3_random_discrete(pm.Poisson, {'mu': Rplusbig}, size=500, ref_rand=st.poisson.rvs)
def test_negative_binomial(self):
def ref_rand(size, alpha, mu):
return st.nbinom.rvs(alpha, alpha / (mu + alpha), size=size)
pymc3_random_discrete(pm.NegativeBinomial, {'mu': Rplusbig, 'alpha': Rplusbig},
size=100, fails=50, ref_rand=ref_rand)
def test_geometric(self):
pymc3_random_discrete(pm.Geometric, {'p': Unit}, size=500, fails=50, ref_rand=nr.geometric)
def test_discrete_uniform(self):
def ref_rand(size, lower, upper):
return st.randint.rvs(lower, upper + 1, size=size)
pymc3_random_discrete(pm.DiscreteUniform, {'lower': -NatSmall, 'upper': NatSmall},
ref_rand=ref_rand)
def test_discrete_weibull(self):
def ref_rand(size, q, beta):
u = np.random.uniform(size=size)
return np.ceil(np.power(np.log(1 - u) / np.log(q), 1. / beta)) - 1
pymc3_random_discrete(pm.DiscreteWeibull, {'q': Unit, 'beta': Rplusdunif},
ref_rand=ref_rand)
@pytest.mark.parametrize('s', [2, 3, 4])
def test_categorical_random(self, s):
def ref_rand(size, p):
return nr.choice(np.arange(p.shape[0]), p=p, size=size)
pymc3_random_discrete(pm.Categorical, {'p': Simplex(s)}, ref_rand=ref_rand)
def test_constant_dist(self):
def ref_rand(size, c):
return c * np.ones(size, dtype=int)
pymc3_random_discrete(pm.Constant, {'c': I}, ref_rand=ref_rand)
def test_mv_normal(self):
def ref_rand(size, mu, cov):
return st.multivariate_normal.rvs(mean=mu, cov=cov, size=size)
def ref_rand_tau(size, mu, tau):
return ref_rand(size, mu, linalg.inv(tau))
def ref_rand_chol(size, mu, chol):
return ref_rand(size, mu, np.dot(chol, chol.T))
def ref_rand_uchol(size, mu, chol):
return ref_rand(size, mu, np.dot(chol.T, chol))
for n in [2, 3]:
pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'cov': PdMatrix(n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand)
pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'tau': PdMatrix(n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_tau)
pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'chol': PdMatrixChol(n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_chol)
pymc3_random(
pm.MvNormal,
{'mu': Vector(R, n), 'chol': PdMatrixCholUpper(n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_uchol,
extra_args={'lower': False}
)
def test_matrix_normal(self):
def ref_rand(size, mu, rowcov, colcov):
return st.matrix_normal.rvs(mean=mu, rowcov=rowcov, colcov=colcov, size=size)
# def ref_rand_tau(size, mu, tau):
# return ref_rand(size, mu, linalg.inv(tau))
def ref_rand_chol(size, mu, rowchol, colchol):
return ref_rand(size, mu, rowcov=np.dot(rowchol, rowchol.T),
colcov=np.dot(colchol, colchol.T))
def ref_rand_uchol(size, mu, rowchol, colchol):
return ref_rand(size, mu, rowcov=np.dot(rowchol.T, rowchol),
colcov=np.dot(colchol.T, colchol))
for n in [2, 3]:
pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'rowcov': PdMatrix(n), 'colcov': PdMatrix(n)},
size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand)
# pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'tau': PdMatrix(n)},
# size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_tau)
pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'rowchol': PdMatrixChol(n), 'colchol': PdMatrixChol(n)},
size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_chol)
# pymc3_random(
# pm.MvNormal,
# {'mu': RealMatrix(n, n), 'rowchol': PdMatrixCholUpper(n), 'colchol': PdMatrixCholUpper(n)},
# size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_uchol,
# extra_args={'lower': False}
# )
def test_kronecker_normal(self):
def ref_rand(size, mu, covs, sigma):
cov = pm.math.kronecker(covs[0], covs[1]).eval()
cov += sigma**2 * np.identity(cov.shape[0])
return st.multivariate_normal.rvs(mean=mu, cov=cov, size=size)
def ref_rand_chol(size, mu, chols, sigma):
covs = [np.dot(chol, chol.T) for chol in chols]
return ref_rand(size, mu, covs, sigma)
def ref_rand_evd(size, mu, evds, sigma):
covs = []
for eigs, Q in evds:
covs.append(np.dot(Q, np.dot(np.diag(eigs), Q.T)))
return ref_rand(size, mu, covs, sigma)
sizes = [2, 3]
sigmas = [0, 1]
for n, sigma in zip(sizes, sigmas):
N = n**2
covs = [RandomPdMatrix(n), RandomPdMatrix(n)]
chols = list(map(np.linalg.cholesky, covs))
evds = list(map(np.linalg.eigh, covs))
dom = Domain([np.random.randn(N)*0.1], edges=(None, None), shape=N)
mu = Domain([np.random.randn(N)*0.1], edges=(None, None), shape=N)
std_args = {'mu': mu}
cov_args = {'covs': covs}
chol_args = {'chols': chols}
evd_args = {'evds': evds}
if sigma is not None and sigma != 0:
std_args['sigma'] = Domain([sigma], edges=(None, None))
else:
for args in [cov_args, chol_args, evd_args]:
args['sigma'] = sigma
pymc3_random(
pm.KroneckerNormal, std_args, valuedomain=dom,
ref_rand=ref_rand, extra_args=cov_args, model_args=cov_args)
pymc3_random(
pm.KroneckerNormal, std_args, valuedomain=dom,
ref_rand=ref_rand_chol, extra_args=chol_args,
model_args=chol_args)
pymc3_random(
pm.KroneckerNormal, std_args, valuedomain=dom,
ref_rand=ref_rand_evd, extra_args=evd_args,
model_args=evd_args)
def test_mv_t(self):
def ref_rand(size, nu, Sigma, mu):
normal = st.multivariate_normal.rvs(cov=Sigma, size=size).T
chi2 = st.chi2.rvs(df=nu, size=size)
return mu + np.sqrt(nu) * (normal / chi2).T
for n in [2, 3]:
pymc3_random(pm.MvStudentT,
{'nu': Domain([5, 10, 25, 50]), 'Sigma': PdMatrix(
n), 'mu': Vector(R, n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand)
def test_dirichlet(self):
def ref_rand(size, a):
return st.dirichlet.rvs(a, size=size)
for n in [2, 3]:
pymc3_random(pm.Dirichlet, {'a': Vector(Rplus, n)},
valuedomain=Simplex(n), size=100, ref_rand=ref_rand)
def test_multinomial(self):
def ref_rand(size, p, n):
return nr.multinomial(pvals=p, n=n, size=size)
for n in [2, 3]:
pymc3_random_discrete(pm.Multinomial, {'p': Simplex(n), 'n': Nat},
valuedomain=Vector(Nat, n), size=100, ref_rand=ref_rand)
def test_gumbel(self):
def ref_rand(size, mu, beta):
return st.gumbel_r.rvs(loc=mu, scale=beta, size=size)
pymc3_random(pm.Gumbel, {'mu': R, 'beta': Rplus}, ref_rand=ref_rand)
def test_logistic(self):
def ref_rand(size, mu, s):
return st.logistic.rvs(loc=mu, scale=s, size=size)
pymc3_random(pm.Logistic, {'mu': R, 's': Rplus}, ref_rand=ref_rand)
def test_logitnormal(self):
def ref_rand(size, mu, sigma):
return expit(st.norm.rvs(loc=mu, scale=sigma, size=size))
pymc3_random(pm.LogitNormal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_interpolated(self):
for mu in R.vals:
for sigma in Rplus.vals:
#pylint: disable=cell-var-from-loop
def ref_rand(size):
return st.norm.rvs(loc=mu, scale=sigma, size=size)
class TestedInterpolated (pm.Interpolated):
def __init__(self, **kwargs):
x_points = np.linspace(mu - 5 * sigma, mu + 5 * sigma, 100)
pdf_points = st.norm.pdf(x_points, loc=mu, scale=sigma)
super().__init__(
x_points=x_points,
pdf_points=pdf_points,
**kwargs
)
pymc3_random(TestedInterpolated, {}, ref_rand=ref_rand)
@pytest.mark.skip('Wishart random sampling not implemented.\n'
'See https://github.com/pymc-devs/pymc3/issues/538')
def test_wishart(self):
# Wishart non current recommended for use:
# https://github.com/pymc-devs/pymc3/issues/538
# for n in [2, 3]:
# pymc3_random_discrete(Wisvaluedomainhart,
# {'n': Domain([2, 3, 4, 2000]) , 'V': PdMatrix(n) },
# valuedomain=PdMatrix(n),
# ref_rand=lambda n=None, V=None, size=None: \
# st.wishart(V, df=n, size=size))
pass
def test_lkj(self):
for n in [2, 10, 50]:
#pylint: disable=cell-var-from-loop
shape = n*(n-1)//2
def ref_rand(size, eta):
beta = eta - 1 + n/2
return (st.beta.rvs(size=(size, shape), a=beta, b=beta)-.5)*2
class TestedLKJCorr (pm.LKJCorr):
def __init__(self, **kwargs):
kwargs.pop('shape', None)
super().__init__(n=n, **kwargs)
pymc3_random(TestedLKJCorr,
{'eta': Domain([1., 10., 100.])},
size=10000//n,
ref_rand=ref_rand)
def test_normalmixture(self):
def ref_rand(size, w, mu, sigma):
component = np.random.choice(w.size, size=size, p=w)
return np.random.normal(mu[component], sigma[component], size=size)
pymc3_random(pm.NormalMixture, {'w': Simplex(2),
'mu': Domain([[.05, 2.5], [-5., 1.]], edges=(None, None)),
'sigma': Domain([[1, 1], [1.5, 2.]], edges=(None, None))},
extra_args={'comp_shape': 2},
size=1000,
ref_rand=ref_rand)
pymc3_random(pm.NormalMixture, {'w': Simplex(3),
'mu': Domain([[-5., 1., 2.5]], edges=(None, None)),
'sigma': Domain([[1.5, 2., 3.]], edges=(None, None))},
extra_args={'comp_shape': 3},
size=1000,
ref_rand=ref_rand)
def test_mixture_random_shape():
# test the shape broadcasting in mixture random
y = np.concatenate([nr.poisson(5, size=10),
nr.poisson(9, size=10)])
with pm.Model() as m:
comp0 = pm.Poisson.dist(mu=np.ones(2))
w0 = pm.Dirichlet('w0', a=np.ones(2))
like0 = pm.Mixture('like0',
w=w0,
comp_dists=comp0,
observed=y)
comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),
shape=(20, 2))
w1 = pm.Dirichlet('w1', a=np.ones(2))
like1 = pm.Mixture('like1',
w=w1,
comp_dists=comp1,
observed=y)
comp2 = pm.Poisson.dist(mu= | np.ones(2) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2020. Triad National Security, LLC. All rights reserved.
This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
Department of Energy/National Nuclear Security Administration. All rights in the program are
reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
Security Administration. The Government is granted for itself and others acting on its behalf a
nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
others to do so.
LANL software release C19112
Author: <NAME>
"""
import numpy as np
import scipy as sp
from scipy import stats
import matplotlib.pyplot as plt
from itertools import combinations, chain
from scipy.special import comb
from collections import namedtuple
from pathos.multiprocessing import ProcessingPool as Pool
import time
def abline(slope, intercept):
"""Plot a line from slope and intercept"""
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
y_vals = intercept + slope * x_vals
plt.plot(x_vals, y_vals, '--', color='red')
pos = lambda a: (abs(a) + a) / 2 # same as max(0,a)
def const(signs, knots):
"""Get max value of BASS basis function, assuming 0-1 range of inputs"""
cc = np.prod(((signs + 1) / 2 - signs * knots))
if cc == 0:
return 1
return cc
def makeBasis(signs, vs, knots, xdata):
"""Make basis function using continuous variables"""
cc = const(signs, knots)
temp1 = pos(signs * (xdata[:, vs] - knots))
if len(signs) == 1:
return temp1 / cc
temp2 = np.prod(temp1, axis=1) / cc
return temp2
def normalize(x, bounds):
"""Normalize to 0-1 scale"""
return (x - bounds[:, 0]) / (bounds[:, 1] - bounds[:, 0])
def unnormalize(z, bounds):
"""Inverse of normalize"""
return z * (bounds[:, 1] - bounds[:, 0]) + bounds[:, 0]
def comb_index(n, k):
"""Get all combinations of indices from 0:n of length k"""
# https://stackoverflow.com/questions/16003217/n-d-version-of-itertools-combinations-in-numpy
count = comb(n, k, exact=True)
index = np.fromiter(chain.from_iterable(combinations(range(n), k)),
int, count=count * k)
return index.reshape(-1, k)
def dmwnchBass(z_vec, vars_use):
"""Multivariate Walenius' noncentral hypergeometric density function with some variables fixed"""
alpha = z_vec[vars_use - 1] / sum(np.delete(z_vec, vars_use))
j = len(alpha)
ss = 1 + (-1) ** j * 1 / (sum(alpha) + 1)
for i in range(j - 1):
idx = comb_index(j, i + 1)
temp = alpha[idx]
ss = ss + (-1) ** (i + 1) * sum(1 / (temp.sum(axis=1) + 1))
return ss
Qf = namedtuple('Qf', 'R bhat qf')
def getQf(XtX, Xty):
"""Get the quadratic form y'X solve(X'X) X'y, as well as least squares beta and cholesky of X'X"""
try:
R = sp.linalg.cholesky(XtX, lower=False) # might be a better way to do this with sp.linalg.cho_factor
except np.linalg.LinAlgError as e:
return None
dr = np.diag(R)
if len(dr) > 1:
if max(dr[1:]) / min(dr) > 1e3:
return None
bhat = sp.linalg.solve_triangular(R, sp.linalg.solve_triangular(R, Xty, trans=1))
qf = np.dot(bhat, Xty)
return Qf(R, bhat, qf)
def logProbChangeMod(n_int, vars_use, I_vec, z_vec, p, maxInt):
"""Get reversibility factor for RJMCMC acceptance ratio, and also prior"""
if n_int == 1:
out = (np.log(I_vec[n_int - 1]) - np.log(2 * p) # proposal
+ np.log(2 * p) + np.log(maxInt))
else:
x = np.zeros(p)
x[vars_use] = 1
lprob_vars_noReplace = np.log(dmwnchBass(z_vec, vars_use))
out = (np.log(I_vec[n_int - 1]) + lprob_vars_noReplace - n_int * np.log(2) # proposal
+ n_int * np.log(2) + np.log(comb(p, n_int)) + np.log(maxInt)) # prior
return out
CandidateBasis = namedtuple('CandidateBasis', 'basis n_int signs vs knots lbmcmp')
def genCandBasis(maxInt, I_vec, z_vec, p, xdata):
"""Generate a candidate basis for birth step, as well as the RJMCMC reversibility factor and prior"""
n_int = int(np.random.choice(range(maxInt), p=I_vec) + 1)
signs = np.random.choice([-1, 1], size=n_int, replace=True)
# knots = np.random.rand(n_int)
knots = np.zeros(n_int)
if n_int == 1:
vs = np.random.choice(p)
knots = np.random.choice(xdata[:, vs], size=1)
else:
vs = np.sort(np.random.choice(p, size=n_int, p=z_vec, replace=False))
for i in range(n_int):
knots[i] = np.random.choice(xdata[:, vs[i]], size=1)
basis = makeBasis(signs, vs, knots, xdata)
lbmcmp = logProbChangeMod(n_int, vs, I_vec, z_vec, p, maxInt)
return CandidateBasis(basis, n_int, signs, vs, knots, lbmcmp)
BasisChange = namedtuple('BasisChange', 'basis signs vs knots')
def genBasisChange(knots, signs, vs, tochange_int, xdata):
"""Generate a condidate basis for change step"""
knots_cand = knots.copy()
signs_cand = signs.copy()
signs_cand[tochange_int] = np.random.choice([-1, 1], size=1)
knots_cand[tochange_int] = np.random.choice(xdata[:, vs[tochange_int]], size=1) # np.random.rand(1)
basis = makeBasis(signs_cand, vs, knots_cand, xdata)
return BasisChange(basis, signs_cand, vs, knots_cand)
class BassPrior:
"""Structure to store prior"""
def __init__(self, maxInt, maxBasis, npart, g1, g2, s2_lower, h1, h2, a_tau, b_tau, w1, w2):
self.maxInt = maxInt
self.maxBasis = maxBasis
self.npart = npart
self.g1 = g1
self.g2 = g2
self.s2_lower = s2_lower
self.h1 = h1
self.h2 = h2
self.a_tau = a_tau
self.b_tau = b_tau
self.w1 = w1
self.w2 = w2
return
class BassData:
"""Structure to store data"""
def __init__(self, xx, y):
self.xx_orig = xx
self.y = y
self.ssy = sum(y * y)
self.n = len(xx)
self.p = len(xx[0])
self.bounds = np.zeros([self.p, 2])
for i in range(self.p):
self.bounds[i, 0] = np.min(xx[:, i])
self.bounds[i, 1] = np.max(xx[:, i])
self.xx = normalize(self.xx_orig, self.bounds)
return
Samples = namedtuple('Samples', 's2 lam tau nbasis nbasis_models n_int signs vs knots beta')
Sample = namedtuple('Sample', 's2 lam tau nbasis nbasis_models n_int signs vs knots beta')
class BassState:
"""The current state of the RJMCMC chain, with methods for getting the log posterior and for updating the state"""
def __init__(self, data, prior):
self.data = data
self.prior = prior
self.s2 = 1.
self.nbasis = 0
self.tau = 1.
self.s2_rate = 1.
self.R = 1
self.lam = 1
self.I_star = np.ones(prior.maxInt) * prior.w1
self.I_vec = self.I_star / np.sum(self.I_star)
self.z_star = np.ones(data.p) * prior.w2
self.z_vec = self.z_star / np.sum(self.z_star)
self.basis = np.ones([data.n, 1])
self.nc = 1
self.knots = | np.zeros([prior.maxBasis, prior.maxInt]) | numpy.zeros |
import numpy as np
import sys
def same_dist_elems(arr):
"""
Smart little script to check if indices are equidistant.
Found at https://stackoverflow.com/questions/58741961/how-to-check-if-consecutive-elements-of-array-are-evenly-spaced
Parameters
----------
arr : array_like
Input array
Returns
-------
bool
boolean value, True if array is equidistantly spaced, False otherwise
"""
diff = arr[1] - arr[0]
for x in range(1, len(arr) - 1):
if arr[x + 1] - arr[x] != diff:
return False
return True
def progressbar(it, prefix="", size=60, file=sys.stdout):
"""
Function to generate a progress bar. Does not work ideally... Found on stackexchange
"""
count = len(it)
def show(j):
x = int(size*j/count)
file.write("%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count))
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
def mult(*args):
# Multiply elements one by one
result = 1
for x in args:
result = result * x
return result
def interp(x, y, wl_ran=(300, 1200), delta_lambda=1, kind='cubic', lowlim=400, uplim=1100):
"""
This function interpolates values between given input table values.
Parameters
----------
x : array_like
Input array of x values, eg. wavelength
Ex: np.array([100, 217, 350])
y : array_like
Input array of y values, eg. quantum efficieny, or mirror reflectance.
Ex: np.array([0.1, 0.7, 0.85])
wl_ran : tuple
wavelength span. Entries must be integers
delta_lambda : float
wavelength resolution, in nm.
kind : string
type of interpolation. Valid options are 'linear', 'quadratic' and 'cubic'.
lowlim : float
lower wavelength limit. Below this value, throughput will be set to 0
uplim : float
upper wavelength limit. Above this value, thoughput will be set to 0
Returns
-------
interpolated : array_like
Interpolated values between wl_start and wl_stop, with sharp cutoff beyond the specified limits.
Notes
-----
Check interp1d for more options.
"""
from scipy.interpolate import interp1d #Load neccessary package
import numpy as np
f = interp1d(x, y, kind=kind, fill_value="extrapolate") #interpolates, and extrapolates if the given table does not cover the wavelength range
# xnew = np.linspace(wl_ran[0], wl_ran[1], num=int((wl_ran[1]-wl_ran[0])/delta_lambda), endpoint=True) #Generates new x-values
xnew = np.arange(wl_ran[0], wl_ran[1], delta_lambda)
interp = f(xnew) #"Raw" interpolation
interpol= np.asarray([i if i>0 else 0 for i in interp]) #recast as numpy array for easier handling, and throws away values below 0
interpolated = np.stack((xnew,interpol), axis=-1) #Combine new x-values and interpolated
# To remove values below lower limit
for i in range(interpolated.shape[0]):
if interpolated[i,0]<lowlim:
interpolated[i,1]=0
if interpolated[i,0] > lowlim:
break
#To remove values above upper limit
for i in reversed(range(interpolated.shape[0])): #Start from top and goes down
if interpolated[i,0]>uplim:
interpolated[i,1]=0
if interpolated[i,0] < uplim:
break
return interpolated
def loadfunc(*args, wls, kind='cubic'):
result = 1
for x in args: #takes several input arrays
loaded = np.loadtxt(x)
if not loaded.shape[0] == (wls[1]-wls[0]): #if input is not of the correct length, this will interpolate
temp = interp(loaded[:,0], loaded[:,1], wl_ran=wls, kind=kind, lowlim=wls[0]-50, uplim=wls[1]+50)[:,1]
else:
temp = loaded
result = result * temp
return result
def CCD_maker(CCD_size, subpix=10, var=0.05, var2=0.05, grid_loss=0.6, smooth=5):
"""
This function creates a CCD composed of subpixels, with a separating grid between all full pixels.
The grid will have some loss.
Parameters
----------
CCD_size : array_like
Input size of CCD (in full pixels).
Ex: (10, 10)
subpix : int
Number of subpixels in each full pixel
var : float
Variation of noise, (in the gaussian noise)
var2 : float
Variation, relative variation from 0
grid_loss: float
Loss in the grid. 1 = everything gets through, 0 = nothing gets through
smooth : float
Smoothness factor, previously called "stepsize". Is the ammount of subpixel to correlate to during that phase. Must be larger than 1.
Returns
-------
CCD : ndarray
Output array of the CCD with the specified subpixel ammount, and size.
Notes
-----
Once used, remember to save the created CCD, as to not run the script again. It can take quite a while to make big arrays.
Exaples
-------
>>> new_CCD = CCD_maker((240, 240), 10, 0.3, 0.7, 5)
array([[0.59858663, 0.59919131, 0.59980866, ..., 0.59164421, 0.59224492,
0.59108706],
...,
[0.63641557, 0.88710319, 0.60372464, ..., 0.91472067, 0.65503371,
0.96646196]])
"""
import numpy as np
import sys
gridsize = subpix #"size of pixels" in # of subpixels
x_size = CCD_size[0]*gridsize
y_size = CCD_size[1]*gridsize # number of subpixels
S = smooth #stepsize "smoothness", previously =5
CCD = np.random.normal(1-var, var, [x_size, y_size])#*var #Noise matrix
# noise = np.random.standard_normal((x_size, y_size))*var #Noise matrix
# CCD = np.ones((x_size,y_size)) #"Clean" matrix
# CCD = CCD-noise #Subtracts noise from "clean" CCD matrix
CCD2=np.zeros(CCD.shape)
#Correlate the subpixels
N = 3 # number of times to correlate
for t in np.arange(0,N):
for i in np.arange(0,x_size):
for j in np.arange(0,y_size):
bit = CCD[i:i+S, j:j+S-1] #cuts out a bit to treat
CCD2[i, j] = np.sum(np.sum(bit)/np.size(bit)) #correlates surrounding subpixels
sys.stdout.write('.'); sys.stdout.flush(); #"Progress bar", just for visuals
#Introduces grid, to mimic the actual pixels - seperate the subpixels by a grid with a slight loss defined by grid_loss variable.
grid = np.ones((CCD.shape[0], CCD.shape[1])) #Set up grid
grid[0::gridsize,:]=grid_loss #Sets gridloss for every 'gridsize' row (10)
grid[:,0::gridsize]=grid_loss #sets gridloss for every 'gridsize' coloumn (10)
#to see a visualization of this, use the variable explorer - type: %varexp --imshow grid
# noise2 = np.random.standard_normal((x_size, y_size))*var2
noise2 = np.random.normal(0, var2, [x_size, y_size])#*var2
# CCD2 = CCD2+noise2+1
CCD2 = CCD2-noise2
CCD2 = CCD2/np.mean(CCD2)
# CCD2 = CCD2/np.mean(CCD2)
CCD = CCD2*grid #overlays the grid on the CCD
# CCD = CCD/np.max(CCD)
return CCD
def psf_maker(file_name, wl_endpoints=(350, 1100), f=1, size=101, res=(100, 100)):
"""
Creates a new file containing the full-color PSF, without interpolating as with psf_maker
Parameters
----------
file_name : str
Desired name of the file.
wl_endpoints : tuple, optional
Two values that mark the first and last colors. The default is (350, 1100).
f : float
factor to multiply in the sigma values
size : int, optional
Size of the PSF. The default is 101.
res : tuple, optional
Resolutionof the meshgrid used in the 2D Gaussian. Will affect the size of the PSF inversly: Larger values mean smaller PSF. Just a tweakable parameter. The default is (100, 100).
Returns
-------
.npy and .hdf5 files containing the PSF
"""
import os
import numpy as np
import h5py
path = os.getcwd() #Get current working directory
file_path = path + "/" + file_name +".hdf5" #Set up path to save file later
'''
numColors = int( (wl_endpoints[1]-wl_endpoints[0])/step) # Number of colors
x_size = size[0]
y_size = size[1] #Extracts from the size input
z = np.float128(np.zeros((res, res, numColors))) # Setup empty array for PSF-slices
x = np.float128(np.linspace(-x_size, x_size, res)) #Preparation for meshgrid
y = np.float128(np.linspace(-y_size, y_size, res))
xx, yy = np.meshgrid(x, y) #define meshgrid
for i in range(wl_endpoints[0], wl_endpoints[1], step): # for-loop to create one psf for each color
sigma_x = np.float128(np.log(i)+0.5*i/100) # Used in the 2D Gaussian
sigma_y = np.float128(np.log(i)+0.5*i/100)
# 2D Gaussian function, that takes sigma_x and _y as input variables. Also takes in the meshgrid xx and yy
zz = (1/(2*np.pi*sigma_x*sigma_y) * np.exp(-((xx)**2/(2*sigma_x**2)
+ (yy)**2/(2*sigma_y**2))))
zz = zz/np.sum(zz) # Normalizes, so the total value (the sum of the array) =1
z[:,:,i-350] = zz # put psf-"slice" into larger 3D array
'''
step=1
numColors = int( (wl_endpoints[1]-wl_endpoints[0])/step) # Number of colors
x_size = res[0]
y_size = res[1] #Extracts from the size input
z = np.zeros((size, size, numColors)) # Setup empty array for PSF-slices
x = np.linspace(-x_size, x_size, size) #Preparation for meshgrid
y = np.linspace(-y_size, y_size, size)
xx, yy = np.meshgrid(x, y) #define meshgrid
for i in range(wl_endpoints[0], wl_endpoints[1], step): # for-loop to create one psf for each color
# sigma_x = np.log(i)+f*i/100 # Used in the 2D Gaussian, old one
# sigma_y = np.log(i)+f*i/100
sigma_x = f*0.014285714285714285 * i + 20.714285714285715 # emperically determined slope, linear increase
sigma_y = f*0.014285714285714285 * i + 20.714285714285715
# 2D Gaussian function, that takes sigma_x and _y as input variables. Also takes in the meshgrid xx and yy
zz = (1/(2*np.pi*sigma_x*sigma_y) * np.exp(-((xx)**2/(2*sigma_x**2)
+ (yy)**2/(2*sigma_y**2))))
zz = zz/np.sum(zz) # Normalizes, so the total value (the sum of the array) =1
z[:,:,i-wl_endpoints[0]] = zz # put psf-"slice" into larger 3D array
if os.path.exists(file_path) == True: #If file already exists, it will be deleted
os.remove(file_path)
# Saving the psf as a hdf5 file in order to store the large file, using h5py
psf_file = h5py.File(file_path, "a")
psf_file.create_dataset('psf', data=z, dtype='f') # Place dataset in the .hdf5 file
np.save(file_name + "_raw.npy", z) #Save as .npy binary file
return print("New PSF done, saved as", file_name, ".npy")
def psf_interp(input_psf_images, input_psf_wl, wl_endpoints=(350, 1100), delta_lambda=1):
import sys
from scipy.interpolate import interp1d
print('\nInterpolating missing wavelengths in PSF... \n')
ran = range(wl_endpoints[0], wl_endpoints[1], delta_lambda) #set for-loop range
res = input_psf_images.shape[0] # Width of the input psf, so the created psf will have the same size
psf = np.zeros((input_psf_images.shape[0], input_psf_images.shape[1], wl_endpoints[1]-wl_endpoints[0])) #Creates empty array for the new psf
for i in range(res):
for j in range(res):
f_test = interp1d(input_psf_wl, input_psf_images[i,j,:], kind='quadratic', fill_value="extrapolate") #sets up interpolation function
psf[i,j,:] = f_test(ran) # interpolates at the wavelengths specified in the range
sys.stdout.write('.'); sys.stdout.flush(); #"Progress bar", just for visuals
print(' ')
print('Interpolation done')
print(' ')
return psf
def func_jitter (entries, gain, dt):
"""
Generates two jitter arrays, in x- and y.
Parameters
----------
entries : int
Number of entries in the desired jitter arrays
gain : float
Gain of the ADCS.
dt : int
Time delay
Returns
-------
x, y : array-like
Jitter in x- and y-directions
"""
x = np.zeros((entries+dt)) #allocates for arrays
y = np.zeros((entries+dt))
for i in range(entries+dt-1): #set up for loop
x[i+1] = x[i]+np.random.normal()-gain*x[i-dt] #next entry will be previous, plus a Gaussian number,
y[i+1] = y[i]+np.random.normal()-gain*y[i-dt] # and the correction is subtracted from the i-dt'th entry
x = x[dt-1:-1] #Cut off the initial dt entries.
y = y[dt-1:-1]
return x, y
def func_slit(slit_size=[10,100], pos=[499, 499], image_size=[1000,1000]):
""" Creates a slit "mask" to overlay images.
Parameters
----------
slit_size : array_like, int
Size of slit: should be two numbers, width and height.
pos : array_like, int
Position of the slit, measured in subpixels.
img_size : array_like, int
Size of mask. Should be identical to size of the image upon which the mask is overlaid.
Returns
-------
mask : array_like
Mask is zero everywhere except in the slit, where the value is 1.
"""
width = slit_size[0] #Loads in size of slit
height = slit_size[1]
x_low = pos[0] - width #Finds boundaries
x_up = pos[0] + width
y_low = pos[1] - height
y_up = pos[1] + height
mask = np.zeros(image_size) #Creates empty mask
mask[y_low:y_up, x_low:x_up] = mask[y_low:y_up, x_low:x_up]+1 #Fills in the slit, so that only the slit has any throughput
return mask
def mag(mag_star, mag_ref=0):
"""Calculates the brightness difference based on magnitudes
Parameters
----------
mag_star : float
Magnitude of input star
mag_ref : float
magnitude of reference star"""
return 10**(0.4*((mag_ref)-(mag_star)))
# def jitter_im(x, y, psf_size):
# ''' Creates a jitter "image" - a matrix of the same dimensions (x & y) as the psf, used in the folding function
# NOTE: Will round of the position of the jitter to nearest subpixel!
# Parameters
# ----------
# x : array
# Input jitter x-coord.
# y : array
# Input jitter y-coord.
# psf_size : int, two values
# Size of the psf.
# Returns
# -------
# jitter : array
# Jitter image, where each point where the jitter "stops" has a +1 value. All other points are zero.
# '''
# jitter=np.zeros(psf_size) # Setup image
# # jitter2=np.zeros(psf_size)
# for i in range(len(x)):
# jitter[(x[i]+(psf_size[0]/2)).astype(int), (y[i]+(psf_size[1]/2)).astype(int)]= jitter[(x[i]+(psf_size[0]/2)).astype(int), (y[i]+(psf_size[1]/2)).astype(int)]+1
# # jitter2[x[i].astype(int)+int(np.floor(psf_size[0]/2)), y[i].astype(int)+int(np.floor(psf_size[1]/2))]= jitter[x[i].astype(int)+int(np.floor(psf_size[0]/2)), y[i].astype(int)+int(np.floor(psf_size[1]/2))]+1 # Create jitter "image". +1 to every point where the jitter "hits"
# return jitter#, jitter2
def jitter_im(x, y, psf_size):
''' Creates a jitter "image" - a matrix of the same dimensions (x & y) as the psf, used in the folding function
NOTE: Will round of the position of the jitter to nearest subpixel!
Parameters
----------
x : array
Input jitter x-coord.
y : array
Input jitter y-coord.
psf_size : int, two values
Size of the psf.
Returns
-------
jitter : array
Jitter image, where each point where the jitter "stops" has a +1 value. All other points are zero.
'''
jitter=np.zeros(psf_size) # Setup image
# jitter2=np.zeros(psf_size)
for i in range(len(x)):
rang1 = (x[i]+(psf_size[0]/2)).astype(int)
rang2 = (y[i]+(psf_size[1]/2)).astype(int)
# print(rang1, rang2)
jitter[rang1, rang2] = jitter[rang1, rang2]+1
# Create jitter "image". +1 to every point where the jitter "hits"
return jitter#, jitter2
def folding(psf_image, jitter_image, mode='same', boundary='fill'):
#Clutter function, might as well just use signal.convolve2d
from scipy import signal
folded=signal.convolve2d(psf_image, jitter_image, mode=mode, boundary=boundary) #convolves the psf slice and the jitter image
return folded
#The correct disperser::::
def spatial_dispersion(wl_endpoints, jit_img, psf_ends, pos, image_size, dispersion, eff,
mask_img, steps=1, secondary_source='n', plot='n'):
import sys
from scipy import signal
from astropy.convolution import AiryDisk2DKernel
x_pos=pos[0]
y_pos=pos[1] #load in position of "zeroth order"
im_disp = np.zeros((image_size[0],image_size[1])) # empty image
im_disp_lambda = np.zeros((image_size[0],image_size[1]))
x_dispersion = dispersion[0] #load in dispersions
y_dispersion = dispersion[1]
numColors = int( (wl_endpoints[1]-wl_endpoints[0])) #total number of colours to iterate
# print("Number of colors to iterate: " + str(numColors))
# print(' ')
if plot=='y': #this part is not useful atm
import matplotlib.pyplot as plt
plt.figure()
from matplotlib.colors import LinearSegmentedColormap
N = 256 #8-bit value, to fix colours
colspec = plt.cm.get_cmap('Spectral') #Fetches colourmap to use later
vals = np.ones((N,4)) #Setup for colormap
temp = np.linspace(psf_ends[0], psf_ends[1], numColors)
for i in range(0, numColors, steps):
# for i in range(0,101, steps):
im = np.zeros((image_size[0],image_size[1])) #create temp. image
psf = AiryDisk2DKernel(temp[i], x_size=jit_img.shape[0], y_size=jit_img.shape[0]).array #PSF for this colour
if secondary_source == 'y': #To account for the secondary light source perhaps not being fully within the psf
# fold = folding(psf_img[:,:,i], jit_img)
fold = signal.convolve2d(psf[:,:,i], jit_img, mode='same', boundary='fill') #fold psf and jitter
fold = fold[0:jit_img.shape[1], 0:jit_img.shape[0]] #cut down to regular shape
else:
fold = signal.convolve2d(psf[:,:], jit_img, mode='same', boundary='fill') #fold as usual, if no sec. sources
# fold=fold/np.sum(fold)
foo = int(psf.shape[0]/2)
# im[0+x_pos-foo:len(jitter)+x_pos-foo, 0+y_pos-foo:len(jitter)+y_pos-foo] = im[0+x_pos-foo:len(jitter)+x_pos-foo, 0+y_pos-foo:len(jitter)+y_pos-foo] + fold*magni
im[0+y_pos-foo:len(fold)+y_pos-foo, 0+x_pos-foo:len(fold)+x_pos-foo] = fold #im[0+y_pos-foo:len(fold)+y_pos-foo, 0+x_pos-foo:len(fold)+x_pos-foo] + fold#*magni
immask = im*mask_img #mask is "overlaid" by multiplying
roll_x = np.roll(immask, int(np.modf(x_dispersion[i])[1]), axis=1) #move/disperse the light
roll_y = np.roll(roll_x, int(np.modf(y_dispersion[i])[1]), axis=0) #also in the y-direction
dx = abs(np.modf(x_dispersion[i])[0]) #residual amount (decimal amounts are shifted to the next sub-pixel)
dy = abs(np.modf(y_dispersion[i])[0])
foob = roll_y*(eff[i]*(1-dx)*(1-dy)) #multiply by efficiency
im_disp = im_disp + foob # Add the rolled image to the final, and multiply by the "effectivity"
roll_dx = np.roll(roll_y, 1, axis=1) # Roll the residual to the next subpixel
eff_dx = eff[i] * dx * (1-dy) # effectivity of the x-residual
roll_dy = np.roll(roll_y, 1, axis=0) # Roll the residual to the next subpixel, y-wise
eff_dy = eff[i] * dy * (1-dx) # y-residual eff.
roll_dxy = np.roll(roll_dx, 1, axis=0) # roll the image one step in both x- and y-wise.
eff_dxy = eff[i]* dx * dy #and eff.
baar = roll_dx*eff_dx + roll_dy*eff_dy + roll_dxy*eff_dxy
im_disp = im_disp + baar #add all residuals and multiply by their respective effectivities.
im_disp_lambda = im_disp_lambda+((foob+baar)*(i+wl_endpoints[0])) #fill in im_disp, and multiply by wavelength i
# im_disp_lambda = im_disp_lambda+(i+wl_endpoints[0]) #fill in im_disp, and multiply by wavelength i
# sys.stdout.write('/'); sys.stdout.flush(); #"Progress bar", just for visuals
##### Plotting #####
if plot == 'y':
vals[:, 0] = np.linspace(0, colspec(1-i/750)[0], N) #Making new colourmap values
vals[:, 1] = np.linspace(0, colspec(1-i/750)[1], N) #the /750 is to normalize the colormap, so values fall between 0 and 1
vals[:, 2] = np.linspace(0, colspec(1-i/750)[2], N)
vals[:, 3] = np.linspace(0, 1, N) #alpha, for making the cmap transparent
newcmp = LinearSegmentedColormap.from_list(name='Spectral', colors=vals) #Creates new cmp, based on vals
plt.imshow(roll_y, cmap=newcmp) # Show array
if plot=='y':
plt.title('Color dispersion of sample spectrum', size=18)
plt.xlabel('Sub-pixel', size=13)
plt.ylabel('Sub-pixel', size=13)
return im_disp, im_disp_lambda
def ccd_interp(inCCD, wls, img, img_wl):
"""
Interpolator used to find the subpixel sensitivity for all wavelengths (not just the ones created by ccd_maker)
Parameters
----------
inCCD : array
Input CCD array, can be made using ccd_maker.
wls : array
Corresponding wavelengths. Must have the same size as the depth of inCCD
img : array
Input image, from disperser2.
img_wl : array
Input image wavelengths.
Returns
-------
new_img : array
Image "multiplied" by the CCD, using the interpolated sensitivities for each subpixel.
"""
import sys
from scipy.interpolate import interp1d
if not wls.shape[0] is inCCD.shape[2]:
raise TypeError("Wavelength array and input CCD depth not same size")
if not inCCD.shape[0:2] == img.shape[0:2] == img_wl.shape:
raise TypeError("CCD and image not same size")
new_img = np.zeros((img.shape[0], img.shape[1]))
for i in range(0, inCCD.shape[0]):
for j in range(0, inCCD.shape[1]):
interp = interp1d(wls, inCCD[i,j,:], kind="slinear", fill_value="extrapolate")
new_img[i,j] = img[i,j]*interp(img_wl[i,j])
sys.stdout.write('.'); sys.stdout.flush();
return new_img
def read_out(dispersed):
'''
Will sum up the "photons" in the y-direction of the input dispersed image.
Parameters
----------
dispersed : array, 2 dimensional
Dispersed image-array.
Returns
-------
counts : array
Array of counts in the y-direction.
'''
counts = np.array(())
for i in range(dispersed.shape[1]):
counts = np.append(counts, np.sum(dispersed[:,i]))
return counts
def read_outx(dispersed):
'''
Will sum up the "photons" in the X-direction of the input dispersed image.
'''
counts = np.array(())
for i in range(dispersed.shape[0]):
counts = np.append(counts, np.sum(dispersed[i,:]))
return counts
def bin_sum(inp, bin_size):
"""
Returns a binned version of inp, with each bin being bin_size in each dimension. The bins are summed up.
Parameters
----------
inp : array_like
Input array. Must be 2D.
bin_size : int
Bin size. Division of input shape and bin_size should be a whole number, i.e. no 8.333 etc.
Returns
-------
binned : array
Array of inp.shape/bin_size in shape, with the bins summed up.
"""
# Check if bin_size is whole divisor of inp.shape
if not np.modf(inp.shape[0]/bin_size)[0] == 0 == np.modf(inp.shape[1]/bin_size)[0]:
raise TypeError("Input shape and bin size divided must be a whole number. (mod = 0)")
temp = np.zeros((inp.shape[0], int(inp.shape[1]/bin_size) )) #Create empty matrix for first step
summed = np.zeros((int(inp.shape[0]/bin_size), int(inp.shape[1]/bin_size) )) #Empty matrix for second step
for x in range(0, inp.shape[1], bin_size): #Range for 1st
j = range(0+x, bin_size+x) #Bin range. ex. 20-30 if bin_size is 10
for i in range(0, inp.shape[0]): # over all columns
temp[i, int(j[0]/bin_size)]= sum(inp[i,j]) #sum, and add to temp
for x in range(0, inp.shape[0], bin_size): #2nd step, repeat 1st step, but for rows
i = range(0+x, bin_size+x) #row bin-range.
for j in range(0, summed.shape[1]):
summed[int(i[0]/bin_size), j]= sum(temp[i,j]) #sum and add to result-matrix
return summed
def noise(size, image, RON=5):
noise = np.zeros((size[0], size[1]))
for i in range(size[0]):
for j in range(size[1]):
noise[i,j] = (np.sqrt(image[i,j])+RON)*np.random.normal(0,1)
return noise
def convert_plate_pix(plate_scale, pix_size):
"""
Plate scale is calculated with the equation:
P = 206265 / (D*f/#)
206265 is the amount of arcsecs in a radian.
D is the diameter of the telescope
f/# is the f-number: Focal length/Diameter
( http://www-supernova.lbl.gov/~sed/telescope/obsguide/platescale.html )
For a telescope of 20 cm, and focal length of 50 cm, the plate scale is 412.53 arcsec/mm
Parameters
----------
plate_scale : float
Must be in arcsec/mm.
pix_size : float
Must be in mm/pixel.
Returns
-------
convert_factor : float
How large a sky area a single pixel width covers.
"""
convert_factor = plate_scale * pix_size # [arcsec per pix] = [arcsec/mm] * [mm/pix]
return convert_factor
def convert_slit(unit, size, convert_factor):
if not type(unit) == str:
raise TypeError("unit must be a string")
if not ((unit == 'pix') or (unit == 'ang')):
raise TypeError("unit must be either 'ang' or 'pix'")
if unit == 'ang':
slit_size = np.divide(size, convert_factor)
if unit == 'pix':
slit_size = size
return slit_size
def setup(input_file):
import warnings
in_spec = np.loadtxt(input_file.in_spec) #Input science spectrum
in_spec2 = np.loadtxt(input_file.in_spec2) #Input science spectrum
col_area = input_file.col_area # Collecting area
sub_pixel = input_file.sub_pixel # Amount of sub-pixels per full pixel
img_size = input_file.img_size #Size of the CCD, in pixels
pl_scale = input_file.pl_scale # Plate scale
pix_size = input_file.pix_size # Pixel size
bg_spec = np.loadtxt(input_file.bg_spec) # Background spectrum, i.e. zodiacal light
exp = input_file.exp # Exposure time
wl_ran = input_file.wl_ran # Wavelength range
eta_in = input_file.eta_in # Spectral troughput of the entire system. Requires at minimum the CCD QE
slit = input_file.slit # Slit size. Unit first, then width and height
#psf = np.load(input_file.psf) # Point Spread Function of the optics etc.
#psf_col = input_file.psf_col
#psf_col = np.arange(300, 1000)
disper = np.load(input_file.disper) #Dispersion of the spectrograph
####### Optionals ########
if not input_file.jitter:
jitter = ''
else:
jitter = np.load(input_file.jitter) # Spacecraft jitter
step = input_file.step # Step size. Only needed if jitter is left empty
in_CCD = np.load(input_file.in_CCD) # Input CCD imperfections. Sub-pixel variations
CCD_col = input_file.CCD_col # CCD colours, respective to each slice in in_CCD
img_size[0] = img_size[0]*sub_pixel
img_size[1] = img_size[1]*sub_pixel
pl_arc_mm = convert_plate_pix(pl_scale, pix_size=pix_size)
disper[0] = disper[0]*sub_pixel
# disper[1] = disper[1]*sub_pixel
if not ((type(wl_ran[0]) == int) or (type(wl_ran[1]) == int)):
raise TypeError("wl_ran must be a tuple with two integers")
span = wl_ran[1]-wl_ran[0]
foo = 1
args = eta_in
for x in args: #takes several input arrays
loaded = np.loadtxt(x)
if not loaded.shape[0] == span: #if input is not of the correct length, this will interpolate
temp = interp(loaded[:,0], loaded[:,1], wl_ran=wl_ran, kind='cubic', lowlim=wl_ran[0]-50, uplim=wl_ran[1]+50)[:,1]
else:
temp = loaded
foo = foo * temp
eta_in = foo
del foo, args, temp, loaded
#Handling the input spectrum and SEC/TEC
if not in_spec.shape[0] == span:
in_spec = interp(x=in_spec[:,0], y=in_spec[:,1], wl_ran=wl_ran, kind='cubic', lowlim=wl_ran[0]-50, uplim=wl_ran[1]+50)
if not eta_in.shape[0] == span:
raise TypeError("eta_in must cover the range of wavelengths: " + str(span) + " entries, from " + str(wl_ran[0]) +" to " +str(wl_ran[1]))
spec_eff = in_spec[:,1] * col_area * eta_in
if not in_spec2.shape[0] == span:
in_spec2 = interp(x=in_spec2[:,0], y=in_spec2[:,1], wl_ran=wl_ran, kind='cubic', lowlim=wl_ran[0]-50, uplim=wl_ran[1]+50)
if not eta_in.shape[0] == span:
raise TypeError("eta_in must cover the range of wavelengths: " + str(span) + " entries, from " + str(wl_ran[0]) +" to " +str(wl_ran[1]))
spec_eff2 = in_spec2[:,1] * col_area * eta_in
#Slit is created here
slit_size = convert_slit(unit = slit[0], size = slit[1:3], convert_factor = pl_arc_mm) #Convert slit size to pixels
slit_size[0] = slit_size[0]*sub_pixel #Convert to subpixels
slit_size[1] = slit_size[1]*sub_pixel
slitpos = [150, 249] #Slit position on the sub-pixel CCD image. Arbitrary position..
mask = func_slit(slit_size = np.floor(slit_size).astype(int), pos=slitpos, image_size=img_size) #Generate mask used to overlay before actual dispersion later.
#Background spectrum gets handled here. A background image of exp = 1s will be created, and can be scaled and overlaid on the final image
# new_bg = input("Do you wish to generate a new background? (y/n): ")
new_bg = "n"
if new_bg == 'y':
if not bg_spec.shape[0] == span: #interpolate if values are missing
bg_spec = interp(x=bg_spec[:,0], y=bg_spec[:,1], wl_ran=wl_ran, kind='cubic', lowlim=wl_ran[0]-50, uplim=wl_ran[1]+50)
print("\nInterpolated missing values in background spectrum")
detector_area = (pl_arc_mm*img_size[0]/sub_pixel)*(pl_arc_mm*img_size[1]/sub_pixel) #Collecting area of the detector measured in arcsec^2
bg_spec = bg_spec*detector_area #Multiply by detector area
bg_psf = np.ones((101, 101, wl_ran[1]-wl_ran[0]))
x_j, y_j = func_jitter(entries=(exp*step), gain=0.15, dt=5) #This jitter will be a single point at the center of the jitter image
bg_jit = jitter_im(x= x_j, y= y_j, psf_size=(bg_psf[:,:,0].shape[0], bg_psf[:,:,0].shape[0]) ) #Creating jitter "image"
background, background_wl = spatial_dispersion(wl_endpoints=wl_ran, jit_img=bg_jit, psf_img=bg_psf, pos=slitpos, image_size=img_size, dispersion=disper, eff = bg_spec[:,1], mask_img=mask, steps=1, plot='n' )
np.save('background.npy', background) #saving the background image for later use.
del x_j, y_j, bg_jit, background_wl, bg_spec #getting rid of unnecessary variables
else:
background = np.load('../sample_values/background.npy')
try: #If jitter is not defined, new jitter will be generated
jitter
except NameError:
try:
step
except NameError:
raise NameError("Either jitter or step must be specified")
x_j, y_j = func_jitter(entries=(exp*step), gain=0.15, dt=5)
# x_j, y_j = simfun.jitter(entries=(exp*step), gain=0.02, dt=10)
jitter = np.stack((x_j, y_j), axis=-1)
spec_eff = spec_eff/step #If the generated jitter is used, the spectrum must be in step size, not seconds
spec_eff2 = spec_eff2/step
with warnings.catch_warnings(): #This is to suppress the potential "FutureWarning" error message. Comparing np-array to str etc. Might cause errors down the line?
warnings.simplefilter(action='ignore', category=FutureWarning)
if jitter == '': #if jitter is an empty str, it will also be generated.
if step == '': #step must be specified
raise TypeError("If jitter is unspecified, step must be explicitly specified")
x_j, y_j = func_jitter(entries=(exp*step), gain=0.15, dt=5) #New jitter, will have epx*step length
# x_j, y_j = simfun.jitter(entries=(exp*step), gain=0.02, dt=10)
jitter = | np.stack((x_j, y_j), axis=-1) | numpy.stack |
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import os
class StrikerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
self.ball = np.array([0.5, -0.3]) # -0.3 original -0.175
self.goal = np.array([0, 1])
utils.EzPickle.__init__(self)
self._striked = False
self.strike_threshold = 0.1
mujoco_env.MujocoEnv.__init__(self, os.path.dirname(__file__) + '/assets/striker.xml', 5)
def _step(self, a):
vec_1 = self.get_body_com("object") - self.get_body_com("tips_arm")
vec_2 = self.get_body_com("object") - self.get_body_com("goal")
if np.linalg.norm(vec_1) < self.strike_threshold:
self._striked = True
self._strike_pos = self.get_body_com("tips_arm")
if self._striked:
vec_3 = self.get_body_com("object") - self._strike_pos
reward_near = - np.linalg.norm(vec_3)
else:
reward_near = - np.linalg.norm(vec_1)
reward_dist = - np.linalg.norm(vec_2)
reward_ctrl = - np.square(a).sum()
reward = 3 * reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, dict(reward_dist=reward_dist,
reward_ctrl=reward_ctrl, reward_near=reward_near)
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
self.viewer.cam.distance = 4.0
def reset_model(self):
self._min_strike_dist = np.inf
self._striked = False
self._strike_pos = None
qpos = self.init_qpos
# table (-1~1, -0.5~1.5)
# goal range (-0.8~0.8, 0.5~1.3)
# safe ball range (0.3~0.7, -0.4~0)
self.ball = np.array([0.5, -0.3]) # -0.3 original -0.175
self.goal = np.array([0, 1])
qpos[:7] = [-0.2, 0.5, -1.7, -1.5, 1, 0, 0] # a good robot initial condition
qpos[-9:-7] = [self.ball[1], self.ball[0]]
qpos[-7:-5] = self.goal
diff = self.ball - self.goal
angle = - | np.arctan(diff[0] / (diff[1] + 1e-8)) | numpy.arctan |
import argparse, time, random, os
import numpy as np
import torch
import torch.nn as nn
from model.GIN.gin_all_fast import GIN
from model.GCN.gcn_all import GCN
from Temp.dataset import GINDataset
from utils.GIN.data_loader import GraphDataLoader, collate
from utils.scheduler import LinearSchedule
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
if args.gpu >= 0:
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def grad_norm(net):
ret = 0
for param in net.parameters():
ret += torch.norm(param.grad.data)**2 if param.grad is not None else 0.0
return torch.sqrt(ret).data.cpu().numpy()
def param_norm(net):
ret = 0
for param in net.parameters():
ret += torch.norm(param.data)**2
return torch.sqrt(ret).data.cpu().numpy()
def evaluate(model, dataloader, loss_fcn):
model.eval()
total = 0
total_loss = 0
total_correct = 0
with torch.no_grad():
for data in dataloader:
graphs, labels = data
feat = graphs.ndata['attr'].cuda()
labels = labels.cuda()
total += len(labels)
outputs = model(graphs, feat)
_, predicted = torch.max(outputs.data, 1)
total_correct += (predicted == labels.data).sum().item()
loss = loss_fcn(outputs, labels)
total_loss += loss * len(labels)
loss, acc = 1.0 * total_loss / total, 1.0 * total_correct / total
return loss, acc
def task_data(args, dataset=None):
# step 0: setting for gpu
if args.gpu >= 0:
torch.cuda.set_device(args.gpu)
# step 1: prepare dataset
if dataset is None:
dataset = GINDataset(args.dataset, args.self_loop, args.degree_as_label)
# step 2: prepare data_loader
train_loader, valid_loader = GraphDataLoader(
dataset, batch_size=args.batch_size, device=args.gpu,
collate_fn=collate, seed=args.seed, shuffle=True,
split_name=args.split_name, fold_idx=args.fold_idx
).train_valid_loader()
return dataset, train_loader, valid_loader
def task_model(args, dataset):
# step 1: prepare model
assert args.model in ['GIN', 'GCN']
if args.model == 'GIN':
model = GIN(
args.n_layers, args.n_mlp_layers,
dataset.dim_nfeats, args.n_hidden, dataset.gclasses,
args.dropout, args.learn_eps,
args.graph_pooling_type, args.neighbor_pooling_type,
args.norm_type
)
elif args.model == 'GCN':
model = GCN(
args.n_layers, dataset.dim_nfeats, args.n_hidden,
dataset.gclasses, args.dropout, args.graph_pooling_type,
norm_type=args.norm_type
)
else:
raise('Not supporting such model!')
if args.gpu >= 0:
model = model.cuda()
# step 2: prepare loss
loss_fcn = nn.CrossEntropyLoss()
# step 3: prepare optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
return model, loss_fcn, optimizer
def train(args, train_loader, valid_loader, model, loss_fcn, optimizer):
scheduler = LinearSchedule(optimizer, args.epoch)
dur = []
record = {}
grad_record = {}
param_record = {}
for epoch in range(args.epoch):
model.train()
t0 = time.time()
for graphs, labels in train_loader:
labels = labels.cuda()
features = graphs.ndata['attr'].cuda()
outputs = model(graphs, features)
optimizer.zero_grad()
loss = loss_fcn(outputs, labels)
loss.backward()
optimizer.step()
dur.append(time.time() - t0)
print('Average Epoch Time {:.4f}'.format(float(sum(dur)/len(dur))))
valid_loss, valid_acc = evaluate(model, valid_loader, loss_fcn)
train_loss, train_acc = evaluate(model, train_loader, loss_fcn)
print('Train acc {:.4f}'.format(float(train_acc)))
print('Test acc {:.4f}'.format(float(valid_acc)))
record[epoch] = (np.mean(dur), train_loss.item(), float(train_acc),
valid_loss.item(), float(valid_acc))
if args.log_norm:
grad_n = grad_norm(model)
param_n = param_norm(model)
grad_record[epoch] = grad_n
param_record[epoch] = param_n
scheduler.step()
return record, grad_record, param_record
def main(args):
dataset = None
result_record = {}
grad_record = {}
param_record = {}
set_seed(args.seed)
if args.cross_validation:
for fold_idx in range(10):
args.fold_idx = fold_idx
dataset, train_loader, valid_loader = task_data(args, dataset)
model, loss_fcn, optimizer = task_model(args, dataset)
result_record[args.fold_idx], grad_record[args.fold_idx], param_record[args.fold_idx] = train(args, train_loader, valid_loader, model, loss_fcn, optimizer)
else:
dataset, train_loader, valid_loader = task_data(args, dataset)
model, loss_fcn, optimizer = task_model(args, dataset)
result_record[args.fold_idx], grad_record[args.fold_idx], param_record[args.fold_idx] = train(args, train_loader, valid_loader, model, loss_fcn, optimizer)
return result_record, grad_record, param_record
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GIN')
# 1) general params
parser.add_argument("--gpu", type=int, default=0,
help="gpu")
parser.add_argument("--seed", type=int, default=9,
help='random seed')
parser.add_argument("--self_loop", action='store_true',
help='add self_loop to graph data')
parser.add_argument("--log_dir", type=str, help='path to the output log file')
parser.add_argument("--data_dir", type=str, help='path to the datas')
parser.add_argument('--exp', type=str, help='experiment name')
parser.add_argument(
'--dataset', type=str, default='MUTAG',
choices=['MUTAG', 'PTC', 'NCI1', 'PROTEINS', 'COLLAB', 'IMDBBINARY', 'IMDBMULTI', 'REDDITBINARY', 'REDDITMULTI5K'],
help='name of dataset (default: MUTAG)'
)
# 2) model params
parser.add_argument("--model", type=str, default='GIN',
help='graph models')
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--dropout", type=float, default=0.5,
help='dropout probability')
parser.add_argument("--epoch", type=int, default=400,
help="number of training epochs")
parser.add_argument("--n_hidden", type=int, default=64,
help='number of hidden gcn layers')
parser.add_argument("--weight_decay", type=float, default=0.0,
help='Weight for L2 Loss')
parser.add_argument("--n_layers", type=int, default=5,
help='num of layers')
parser.add_argument("--n_mlp_layers", type=int, default=2,
help='num of mlp layers')
parser.add_argument('--batch_size', type=int, default=128,
help='batch size for training and validation (default: 32)'
)
parser.add_argument('--fold_idx', type=int, default=0,
help='the index(<10) of fold in 10-fold validation'
)
parser.add_argument('--graph_pooling_type', type=str,
default="sum", choices=["sum", "mean", "max"],
help='type of graph pooling: sum, mean or max')
parser.add_argument('--neighbor_pooling_type', type=str,
default="sum", choices=["sum", "mean", "max"],
help='type of neighboring pooling: sum, mean or max')
parser.add_argument('--split_name', type=str,
default='fold10', choices=['fold10', 'rand'],
help='cross validation split type')
parser.add_argument('--learn_eps', action="store_true",
help='learn the epsilon weighting')
# 3) specific params
parser.add_argument('--cross_validation', action='store_true',
help='Do 10-fold-Cross validation')
parser.add_argument('--log_norm', action='store_true',
help='log normalization information')
parser.add_argument('--degree_as_label', action='store_true',
help='use node degree as node labels')
parser.add_argument('--norm_type', type=str,
default='gn',
help='type of normalization')
args = parser.parse_args()
os.environ['DGL_DOWNLOAD_DIR'] = args.data_dir
print(args)
result, grad, param = main(args)
def output_result(args, result, grad=None, param=None):
raw_record = {}
for seed in result.keys():
content = result[seed]
for epoch in content.keys():
time, train_loss, train_acc, \
valid_loss, valid_acc = content[epoch]
if epoch not in raw_record.keys():
raw_record[epoch] = {
'time': [],
'train_loss':[],
'train_acc':[],
'test_loss':[],
'test_acc':[]
}
raw_record[epoch]['time'].append(time)
raw_record[epoch]['train_loss'].append(train_loss)
raw_record[epoch]['train_acc'].append(train_acc)
raw_record[epoch]['test_loss'].append(valid_loss)
raw_record[epoch]['test_acc'].append(valid_acc)
assert args.log_norm and grad is not None and param is not None
grad_record = {}
if grad is not None:
for seed in grad.keys():
content = grad[seed]
for epoch in content.keys():
grad_n = content[epoch]
if epoch not in grad_record.keys():
grad_record[epoch] = {
'grad': [],
}
grad_record[epoch]['grad'].append(grad_n)
param_record = {}
if param is not None:
for seed in param.keys():
content = param[seed]
for epoch in content.keys():
param_n = content[epoch]
if epoch not in param_record.keys():
param_record[epoch] = {
'param': [],
}
param_record[epoch]['param'].append(param_n)
import xlwt
import os
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
save_path = os.path.join(args.log_dir, '%s-bs-%d-dp-%.2f-hidden-%d-wd-%.4f-nl-%d-epoch-%d-lr-%.4f-Norm-%s-log.xls' %
(args.exp, args.batch_size, args.dropout, args.n_hidden, args.weight_decay,
args.n_layers, args.epoch, args.lr, args.norm_type))
f = xlwt.Workbook()
sheet = f.add_sheet("result")
sheet.write(0, 0, 'time')
sheet.write(0, 1, 'train_loss')
sheet.write(0, 2, 'train_acc')
sheet.write(0, 3, 'test_loss')
sheet.write(0, 4, 'test_acc')
sheet.write(0, 5, 'test_max_acc')
sheet.write(0, 6, 'test_min_acc')
sheet.write(0, 7, 'test_std_acc')
sheet.write(0, 8, 'train_max_acc')
sheet.write(0, 9, 'train_min_acc')
sheet.write(0, 10, 'train_std_acc')
for epoch in range(len(raw_record.keys())):
time = | np.mean(raw_record[epoch]['time']) | numpy.mean |
"""On-line (live) plots of the DA process for various models and methods.
Liveplotters are given by a list of tuples as property or arguments in
`dapper.mods.HiddenMarkovModel`.
- The first element of the tuple determines whether the liveplotter is shown if
the names of liveplotters are not given by `liveplots` argument in
`assimilate`.
- The second element in the tuple gives the corresponding liveplotter
function/class. See example of function `LPs` in `dapper.mods.Lorenz63`.
The liveplotters can be fine-tuned by each DA experiments via argument of
`liveplots` when calling `assimilate`.
- `liveplots = True` turns on liveplotters set to default in the first
argument of the `HMM.liveplotter` and default liveplotters defined in this module
(`sliding_diagnostics` and `weight_histogram`).
- `liveplots` can also be a list of specified names of liveplotter, which
is the name of the corresponding liveplotting classes/functions.
"""
import matplotlib as mpl
import numpy as np
import scipy.linalg as sla
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d.art3d import juggle_axes
from mpl_tools import is_notebook_or_qt, place, place_ax
from numpy import arange, nan, ones
from struct_tools import DotDict, deep_getattr
import dapper.tools.progressbar as pb
import dapper.tools.viz as viz
from dapper.dpr_config import rc
from dapper.mods.utils import linspace_int
from dapper.tools.chronos import format_time
from dapper.tools.matrices import CovMat
from dapper.tools.progressbar import read1
from dapper.tools.series import FAUSt, RollingArray
from dapper.tools.viz import not_available_text, plot_pause
class LivePlot:
"""Live plotting manager.
Deals with
- Pause, skip.
- Which liveploters to call.
- `plot_u`
- Figure window (title and number).
"""
def __init__(self,
stats,
liveplots,
key0=(0, None, 'u'),
E=None,
P=None,
speed=1.0,
replay=False,
**kwargs):
"""
Initialize plots.
- liveplots: figures to plot; alternatives:
- `"default"/[]/True`: All default figures for this HMM.
- `"all"` : Even more.
- non-empty `list` : Only the figures with these numbers
(int) or names (str).
- `False` : None.
- speed: speed of animation.
- `>100`: instantaneous
- `1` : (default) as quick as possible allowing for
plt.draw() to work on a moderately fast computer.
- `<1` : slower.
"""
# Disable if not rc.liveplotting
self.any_figs = False
if not rc.liveplotting:
return
# Determine whether all/universal/intermediate stats are plotted
self.plot_u = not replay or stats.store_u
# Set speed/pause params
self.params = {
'pause_f': 0.05,
'pause_a': 0.05,
'pause_s': 0.05,
'pause_u': 0.001,
}
# If speed>100: set to inf. Coz pause=1e-99 causes hangup.
for pause in ["pause_"+x for x in "faus"]:
speed = speed if speed < 100 else np.inf
self.params[pause] /= speed
# Write params
self.params.update(getattr(stats.xp, "LP_kwargs", {}))
self.params.update(kwargs)
def get_name(init):
"""Get name of liveplotter function/class."""
try:
return init.__qualname__.split(".")[0]
except AttributeError:
return init.__class__.__name__
# Set up dict of liveplotters
potential_LPs = {}
for show, init in default_liveplotters:
potential_LPs[get_name(init)] = show, init
# Add HMM-specific liveplotters
for show, init in getattr(stats.HMM, 'liveplotters', {}):
potential_LPs[get_name(init)] = show, init
def parse_figlist(lst):
"""Figures requested for this xp. Convert to list."""
if isinstance(lst, str):
fn = lst.lower()
if "all" == fn:
lst = ["all"] # All potential_LPs
elif "default" in fn:
lst = ["default"] # All show_by_default
elif hasattr(lst, '__len__'):
lst = lst # This list (only)
elif lst:
lst = ["default"] # All show_by_default
else:
lst = [None] # None
return lst
figlist = parse_figlist(liveplots)
# Loop over requeted figures
self.figures = {}
for name, (show_by_default, init) in potential_LPs.items():
if (figlist == ["all"]) or \
(name in figlist) or \
(figlist == ["default"] and show_by_default):
# Startup message
if not self.any_figs:
print('Initializing liveplots...')
if is_notebook_or_qt:
pauses = [self.params["pause_" + x] for x in "faus"]
if any((p > 0) for p in pauses):
print("Note: liveplotting does not work very well"
" inside Jupyter notebooks. In particular,"
" there is no way to stop/skip them except"
" to interrupt the kernel (the stop button"
" in the toolbar). Consider using instead"
" only the replay functionality (with infinite"
" playback speed).")
elif not pb.disable_user_interaction:
print('Hit <Space> to pause/step.')
print('Hit <Enter> to resume/skip.')
print('Hit <i> to enter debug mode.')
self.paused = False
self.run_ipdb = False
self.skipping = False
self.any_figs = True
# Init figure
post_title = "" if self.plot_u else "\n(obs times only)"
updater = init(name, stats, key0, self.plot_u, E, P, **kwargs)
if plt.fignum_exists(name) and getattr(updater, 'is_active', 1):
self.figures[name] = updater
fig = plt.figure(name)
win = fig.canvas
ax0 = fig.axes[0]
win.manager.set_window_title("%s" % name)
ax0.set_title(ax0.get_title() + post_title)
self.update(key0, E, P) # Call initial update
plt.pause(0.01) # Draw
def update(self, key, E, P):
"""Update liveplots"""
# Check if there are still open figures
if self.any_figs:
open_figns = plt.get_figlabels()
live_figns = set(self.figures.keys())
self.any_figs = bool(live_figns.intersection(open_figns))
else:
return
# Playback control
SPACE = b' '
CHAR_I = b'i'
ENTERs = [b'\n', b'\r'] # Linux + Windows
def pause():
"""Loop until user decision is made."""
ch = read1()
while True:
# Set state (pause, skipping, ipdb)
if ch in ENTERs:
self.paused = False
elif ch == CHAR_I:
self.run_ipdb = True
# If keypress valid, resume execution
if ch in ENTERs + [SPACE, CHAR_I]:
break
ch = read1()
# Pause to enable zoom, pan, etc. of mpl GUI
plot_pause(0.01) # Don't use time.sleep()!
# Enter pause loop
if self.paused:
pause()
else:
if key == (0, None, 'u'):
# Skip read1 for key0 (coz it blocks)
pass
else:
ch = read1()
if ch == SPACE:
# Pause
self.paused = True
self.skipping = False
pause()
elif ch in ENTERs:
# Toggle skipping
self.skipping = not self.skipping
elif ch == CHAR_I:
# Schedule debug
# Note: The reason we dont set_trace(frame) right here is:
# - I could not find the right frame, even doing
# > frame = inspect.stack()[0]
# > while frame.f_code.co_name != "assimilate":
# > frame = frame.f_back
# - It just restarts the plot.
self.run_ipdb = True
# Update figures
if not self.skipping:
faus = key[-1]
if faus != 'u' or self.plot_u:
for name, (updater) in self.figures.items():
if plt.fignum_exists(name) and \
getattr(updater, 'is_active', 1):
_ = plt.figure(name)
updater(key, E, P)
plot_pause(self.params['pause_'+faus])
if self.run_ipdb:
self.run_ipdb = False
import inspect
import ipdb
print("Entering debug mode (ipdb).")
print("Type '?' (and Enter) for usage help.")
print("Type 'c' to continue the assimilation.")
ipdb.set_trace(inspect.stack()[2].frame)
# TODO 6:
# - iEnKS diagnostics don't work at all when store_u=False
star = "${}^*$"
class sliding_diagnostics:
"""Plots a sliding window (like a heart rate monitor) of certain diagnostics."""
def __init__(self, fignum, stats, key0, plot_u,
E, P, Tplot=None, **kwargs):
# STYLE TABLES - Defines which/how diagnostics get plotted
styles = {}
def lin(a, b): return (lambda x: a + b*x)
divN = 1/getattr(stats.xp, 'N', 99)
# Columns: transf, shape, plt kwargs
styles['RMS'] = {
'err.rms': [None, None, dict(c='k', label='Error')],
'std.rms': [None, None, dict(c='b', label='Spread', alpha=0.6)],
}
styles['Values'] = {
'skew': [None, None, dict(c='g', label=star+r'Skew/$\sigma^3$')],
'kurt': [None, None, dict(c='r', label=star+r'Kurt$/\sigma^4{-}3$')],
'trHK': [None, None, dict(c='k', label=star+'HK')],
'infl': [lin(-10, 10), 'step', dict(c='c', label='10(infl-1)')],
'N_eff': [lin(0, divN), 'dirac', dict(c='y', label='N_eff/N', lw=3)],
'iters': [lin(0, .1), 'dirac', dict(c='m', label='iters/10')],
'resmpl': [None, 'dirac', dict(c='k', label='resampled?')],
}
nAx = len(styles)
GS = {'left': 0.125, 'right': 0.76}
fig, axs = place.freshfig(fignum, figsize=(5, 1+nAx),
nrows=nAx, sharex=True, gridspec_kw=GS)
axs[0].set_title("Diagnostics")
for style, ax in zip(styles, axs):
ax.set_ylabel(style)
ax.set_xlabel('Time (t)')
place_ax.adjust_position(ax, y0=0.03)
self.T_lag, K_lag, a_lag = validate_lag(Tplot, stats.HMM.t)
def init_ax(ax, style_table):
lines = {}
for name in style_table:
# SKIP -- if stats[name] is not in existence
# Note: The nan check/deletion comes after the first kObs.
try:
stat = deep_getattr(stats, name)
except AttributeError:
continue
# try: val0 = stat[key0[0]]
# except KeyError: continue
# PS: recall (from series.py) that even if store_u is false, stat[k] is
# still present if liveplots=True via the k_tmp functionality.
# Unpack style
ln = {}
ln['transf'] = style_table[name][0] or (lambda x: x)
ln['shape'] = style_table[name][1]
ln['plt'] = style_table[name][2]
# Create series
if isinstance(stat, FAUSt):
ln['plot_u'] = plot_u
K_plot = comp_K_plot(K_lag, a_lag, ln['plot_u'])
else:
ln['plot_u'] = False
K_plot = a_lag
ln['data'] = RollingArray(K_plot)
ln['tt'] = RollingArray(K_plot)
# Plot (init)
ln['handle'], = ax.plot(ln['tt'], ln['data'], **ln['plt'])
# Plotting only nans yield ugly limits. Revert to defaults.
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
lines[name] = ln
return lines
# Plot
self.d = [init_ax(ax, styles[style]) for style, ax in zip(styles, axs)]
# Horizontal line at y=0
self.baseline0, = ax.plot(
ax.get_xlim(), [0, 0], c=0.5*ones(3), lw=0.7, label='_nolegend_')
# Store
self.axs = axs
self.stats = stats
self.init_incomplete = True
# Update plot
def __call__(self, key, E, P):
k, kObs, faus = key
stats = self.stats
chrono = stats.HMM.t
ax0, ax1 = self.axs
def update_arrays(lines):
for name, ln in lines.items():
stat = deep_getattr(stats, name)
t = chrono.tt[k] # == chrono.ttObs[kObs]
if isinstance(stat, FAUSt):
# ln['data'] will contain duplicates for f/a times.
if ln['plot_u']:
val = stat[key]
ln['tt'] .insert(k, t)
ln['data'].insert(k, ln['transf'](val))
elif 'u' not in faus:
val = stat[key]
ln['tt'] .insert(kObs, t)
ln['data'].insert(kObs, ln['transf'](val))
else:
# ln['data'] will not contain duplicates, coz only 'a' is input.
if 'a' in faus:
val = stat[kObs]
ln['tt'] .insert(kObs, t)
ln['data'].insert(kObs, ln['transf'](val))
elif 'f' in faus:
pass
def update_plot_data(ax, lines):
def bend_into(shape, xx, yy):
# Get arrays. Repeat (to use for intermediate nodes).
yy = yy.array.repeat(3)
xx = xx.array.repeat(3)
if len(xx) == 0:
pass # shortcircuit any modifications
elif shape == 'step':
yy = np.hstack([yy[1:], nan]) # roll leftward
elif shape == 'dirac':
nonlocal nDirac
axW = np.diff(ax.get_xlim())
yy[0::3] = False # set datapoin to 0
xx[2::3] = nan # make datapoint disappear
xx += nDirac*axW/100 # offset datapoint horizontally
nDirac += 1
return xx, yy
nDirac = 1
for _name, ln in lines.items():
ln['handle'].set_data(*bend_into(ln['shape'], ln['tt'], ln['data']))
def finalize_init(ax, lines, mm):
# Rm lines that only contain NaNs
for name in list(lines):
ln = lines[name]
stat = deep_getattr(stats, name)
if not stat.were_changed:
ln['handle'].remove() # rm from axes
del lines[name] # rm from dict
# Add legends
if lines:
ax.legend(loc='upper left',
bbox_to_anchor=(1.01, 1), borderaxespad=0)
if mm:
ax.annotate(star+": mean of\nmarginals",
xy=(0, -1.5/len(lines)),
xycoords=ax.get_legend().get_frame(),
bbox=dict(alpha=0.0), fontsize='small')
# coz placement of annotate needs flush sometimes:
plot_pause(0.01)
# Insert current stats
for lines, ax in zip(self.d, self.axs):
update_arrays(lines)
update_plot_data(ax, lines)
# Set x-limits (time)
sliding_xlim(ax0, self.d[0]['err.rms']['tt'], self.T_lag, margin=True)
self.baseline0.set_xdata(ax0.get_xlim())
# Set y-limits
data0 = [ln['data'].array for ln in self.d[0].values()]
data1 = [ln['data'].array for ln in self.d[1].values()]
ax0.set_ylim(0, d_ylim(data0, ax0 , cC=0.2, cE=0.9)[1])
ax1.set_ylim(*d_ylim(data1, ax1, Max=4, Min=-4, cC=0.3, cE=0.9))
# Init legend. Rm nan lines.
if self.init_incomplete and 'a' == faus:
self.init_incomplete = False
finalize_init(ax0, self.d[0], False)
finalize_init(ax1, self.d[1], True)
def sliding_xlim(ax, tt, lag, margin=False):
dt = lag/20 if margin else 0
if tt.nFilled == 0:
return # Quit
t1, t2 = tt.span() # Get suggested span.
s1, s2 = ax.get_xlim() # Get previous lims.
# If zero span (eg tt holds single 'f' and 'a'):
if t1 == t2:
t1 -= 1 # add width
t2 += 1 # add width
# If user has skipped (too much):
elif np.isnan(t1):
s2 -= dt # Correct for dt.
span = s2-s1 # Compute previous span
# If span<lag:
if span < lag:
span += (t2-s2) # Grow by "dt".
span = min(lag, span) # Bound
t1 = t2 - span # Set span.
ax.set_xlim(t1, t2 + dt) # Set xlim to span
class weight_histogram:
"""Plots histogram of weights. Refreshed each analysis."""
def __init__(self, fignum, stats, key0, plot_u, E, P, **kwargs):
if not hasattr(stats, 'w'):
self.is_active = False
return
fig, ax = place.freshfig(fignum, figsize=(7, 3), gridspec_kw={'bottom': .15})
ax.set_xscale('log')
ax.set_xlabel('Weigth')
ax.set_ylabel('Count')
self.stats = stats
self.ax = ax
self.hist = []
self.bins = np.exp(np.linspace(np.log(1e-10), np.log(1), 31))
def __call__(self, key, E, P):
k, kObs, faus = key
if 'a' == faus:
w = self.stats.w[key]
N = len(w)
ax = self.ax
self.is_active = N < 10001
if not self.is_active:
not_available_text(ax, 'Not computed (N > threshold)')
return
counted = w > self.bins[0]
_ = [b.remove() for b in self.hist]
nn, _, self.hist = ax.hist(
w[counted], bins=self.bins, color='b')
ax.set_ylim(top=max(nn))
ax.set_title('N: {:d}. N_eff: {:.4g}. Not shown: {:d}. '.
format(N, 1/(w@w), N-np.sum(counted)))
class spectral_errors:
"""Plots the (spatial-RMS) error as a functional of the SVD index."""
def __init__(self, fignum, stats, key0, plot_u, E, P, **kwargs):
fig, ax = place.freshfig(fignum, figsize=(6, 3))
ax.set_xlabel('Sing. value index')
ax.set_yscale('log')
self.init_incomplete = True
self.ax = ax
self.plot_u = plot_u
try:
self.msft = stats.umisf
self.sprd = stats.svals
except AttributeError:
self.is_active = False
not_available_text(ax, "Spectral stats not being computed")
# Update plot
def __call__(self, key, E, P):
k, kObs, faus = key
ax = self.ax
if self.init_incomplete:
if self.plot_u or 'f' == faus:
self.init_incomplete = False
msft = abs(self.msft[key])
sprd = self.sprd[key]
if np.any(np.isinf(msft)):
not_available_text(ax, "Spectral stats not finite")
self.is_active = False
else:
self.line_msft, = ax.plot(
msft, 'k', lw=2, label='Error')
self.line_sprd, = ax.plot(
sprd, 'b', lw=2, label='Spread', alpha=0.9)
ax.get_xaxis().set_major_locator(
MaxNLocator(integer=True))
ax.legend()
else:
msft = abs(self.msft[key])
sprd = self.sprd[key]
self.line_sprd.set_ydata(sprd)
self.line_msft.set_ydata(msft)
# ax.set_ylim(*d_ylim(msft))
# ax.set_ylim(bottom=1e-5)
ax.set_ylim([1e-3, 1e1])
class correlations:
"""Plots the state (auto-)correlation matrix."""
half = True # Whether to show half/full (symmetric) corr matrix.
def __init__(self, fignum, stats, key0, plot_u, E, P, **kwargs):
GS = {'height_ratios': [4, 1], 'hspace': 0.09, 'top': 0.95}
fig, (ax, ax2) = place.freshfig(fignum, figsize=(5, 6), nrows=2, gridspec_kw=GS)
if E is None and np.isnan(
P.diag if isinstance(P, CovMat) else P).all():
not_available_text(ax, (
'Not available in replays'
'\ncoz full Ens/Cov not stored.'))
self.is_active = False
return
Nx = len(stats.mu[key0])
if Nx <= 1003:
C = np.eye(Nx)
# Mask half
mask = np.zeros_like(C, dtype=np.bool)
mask[np.tril_indices_from(mask)] = True
# Make colormap. Log-transform cmap,
# but not internally in matplotlib,
# so as to avoid transforming the colorbar too.
cmap = plt.get_cmap('RdBu_r')
trfm = mpl.colors.SymLogNorm(linthresh=0.2, linscale=0.2,
base=np.e, vmin=-1, vmax=1)
cmap = cmap(trfm(np.linspace(-0.6, 0.6, cmap.N)))
cmap = mpl.colors.ListedColormap(cmap)
#
VM = 1.0 # abs(np.percentile(C,[1,99])).max()
im = ax.imshow(C, cmap=cmap, vmin=-VM, vmax=VM)
# Colorbar
_ = ax.figure.colorbar(im, ax=ax, shrink=0.8)
# Tune plot
plt.box(False)
ax.set_facecolor('w')
ax.grid(False)
ax.set_title("State correlation matrix:", y=1.07)
ax.xaxis.tick_top()
# ax2 = inset_axes(ax,width="30%",height="60%",loc=3)
line_AC, = ax2.plot(arange(Nx), ones(Nx), label='Correlation')
line_AA, = ax2.plot(arange(Nx), ones(Nx), label='Abs. corr.')
_ = ax2.hlines(0, 0, Nx-1, 'k', 'dotted', lw=1)
# Align ax2 with ax
bb_AC = ax2.get_position()
bb_C = ax.get_position()
ax2.set_position([bb_C.x0, bb_AC.y0, bb_C.width, bb_AC.height])
# Tune plot
ax2.set_title("Auto-correlation:")
ax2.set_ylabel("Mean value")
ax2.set_xlabel("Distance (in state indices)")
ax2.set_xticklabels([])
ax2.set_yticks([0, 1] + list(ax2.get_yticks()[[0, -1]]))
ax2.set_ylim(top=1)
ax2.legend(frameon=True, facecolor='w',
bbox_to_anchor=(1, 1), loc='upper left', borderaxespad=0.02)
self.ax = ax
self.ax2 = ax2
self.im = im
self.line_AC = line_AC
self.line_AA = line_AA
self.mask = mask
if hasattr(stats, 'w'):
self.w = stats.w
else:
not_available_text(ax)
# Update plot
def __call__(self, key, E, P):
# Get cov matrix
if E is not None:
if hasattr(self, 'w'):
C = np.cov(E, rowvar=False, aweights=self.w[key])
else:
C = np.cov(E, rowvar=False)
else:
assert P is not None
C = P.full if isinstance(P, CovMat) else P
C = C.copy()
# Compute corr from cov
std = np.sqrt(np.diag(C))
C /= std[:, None]
C /= std[None, :]
# Mask
if self.half:
C = np.ma.masked_where(self.mask, C)
# Plot
self.im.set_data(C)
# Auto-corr function
ACF = circulant_ACF(C)
AAF = circulant_ACF(C, do_abs=True)
self.line_AC.set_ydata(ACF)
self.line_AA.set_ydata(AAF)
def circulant_ACF(C, do_abs=False):
"""Compute the auto-covariance-function corresponding to `C`.
This assumes it is the cov/corr matrix of a 1D periodic domain.
"""
M = len(C)
# cols = np.flipud(sla.circulant(np.arange(M)[::-1]))
cols = sla.circulant(np.arange(M))
ACF = np.zeros(M)
for i in range(M):
row = C[i, cols[i]]
if do_abs:
row = abs(row)
ACF += row
# Note: this actually also accesses masked values in C.
return ACF/M
def sliding_marginals(
obs_inds = (),
dims = (),
labels = (),
Tplot = None,
ens_props = dict(alpha=0.4), # noqa
zoomy = 1.0,
):
# Store parameters
params_orig = DotDict(**locals())
def init(fignum, stats, key0, plot_u, E, P, **kwargs):
xx, yy, mu, std, chrono = \
stats.xx, stats.yy, stats.mu, stats.std, stats.HMM.t
# Set parameters (kwargs takes precedence over params_orig)
p = DotDict(**{
kw: kwargs.get(kw, val) for kw, val in params_orig.items()})
# Lag settings:
T_lag, K_lag, a_lag = validate_lag(p.Tplot, chrono)
K_plot = comp_K_plot(K_lag, a_lag, plot_u)
# Extend K_plot forther for adding blanks in resampling (PartFilt):
has_w = hasattr(stats, 'w')
if has_w:
K_plot += a_lag
# Chose marginal dims to plot
if not p.dims:
Nx = min(10, xx.shape[-1])
DimsX = linspace_int(xx.shape[-1], Nx)
else:
Nx = len(p.dims)
DimsX = p.dims
# Pre-process obs dimensions
# Rm inds of obs if not in DimsX
iiY = [i for i, m in enumerate(p.obs_inds) if m in DimsX]
# Rm obs_inds if not in DimsX
DimsY = [m for i, m in enumerate(p.obs_inds) if m in DimsX]
# Get dim (within y) of each x
DimsY = [DimsY.index(m) if m in DimsY else None for m in DimsX]
Ny = len(iiY)
# Set up figure, axes
fig, axs = place.freshfig(fignum, figsize=(5, 7), nrows=Nx, sharex=True)
if Nx == 1:
axs = [axs]
# Tune plots
axs[0].set_title("Marginal time series")
for ix, (m, ax) in enumerate(zip(DimsX, axs)):
# ax.set_ylim(*viz.stretch(*viz.xtrema(xx[:, m]), 1/p.zoomy))
if not p.labels:
ax.set_ylabel("$x_{%d}$" % m)
else:
ax.set_ylabel(p.labels[ix])
axs[-1].set_xlabel('Time (t)')
plot_pause(0.05)
plt.tight_layout()
# Allocate
d = DotDict() # data arrays
h = DotDict() # plot handles
# Why "if True" ? Just to indent the rest of the line...
if True:
d.t = RollingArray((K_plot,))
if True:
d.x = RollingArray((K_plot, Nx))
h.x = []
if True:
d.y = RollingArray((K_plot, Ny))
h.y = []
if E is not None:
d.E = RollingArray((K_plot, len(E), Nx))
h.E = []
if P is not None:
d.mu = RollingArray((K_plot, Nx))
h.mu = []
if P is not None:
d.s = RollingArray((K_plot, 2, Nx))
h.s = []
# Plot (invisible coz everything here is nan, for the moment).
for ix, (_m, iy, ax) in enumerate(zip(DimsX, DimsY, axs)):
if True:
h.x += ax.plot(d.t, d.x[:, ix], 'k')
if iy != None:
h.y += ax.plot(d.t, d.y[:, iy], 'g*', ms=10)
if 'E' in d:
h.E += [ax.plot(d.t, d.E[:, :, ix], **p.ens_props)]
if 'mu' in d:
h.mu += ax.plot(d.t, d.mu[:, ix], 'b')
if 's' in d:
h.s += [ax.plot(d.t, d.s[:, :, ix], 'b--', lw=1)]
def update(key, E, P):
k, kObs, faus = key
EE = duplicate_with_blanks_for_resampled(E, DimsX, key, has_w)
# Roll data array
ind = k if plot_u else kObs
for Ens in EE: # If E is duplicated, so must the others be.
if 'E' in d:
d.E .insert(ind, Ens)
if 'mu' in d:
d.mu.insert(ind, mu[key][DimsX])
if 's' in d:
d.s .insert(ind, mu[key][DimsX] + [[1], [-1]]*std[key][DimsX])
if True:
d.t .insert(ind, chrono.tt[k])
if True:
d.y .insert(ind, yy[kObs, iiY]
if kObs is not None else nan*ones(Ny))
if True:
d.x .insert(ind, xx[k, DimsX])
# Update graphs
for ix, (_m, iy, ax) in enumerate(zip(DimsX, DimsY, axs)):
sliding_xlim(ax, d.t, T_lag, True)
if True:
h.x[ix] .set_data(d.t, d.x[:, ix])
if iy != None:
h.y[iy] .set_data(d.t, d.y[:, iy])
if 'mu' in d:
h.mu[ix] .set_data(d.t, d.mu[:, ix])
if 's' in d:
[h.s[ix][b].set_data(d.t, d.s[:, b, ix]) for b in [0, 1]]
if 'E' in d:
[h.E[ix][n].set_data(d.t, d.E[:, n, ix]) for n in range(len(E))]
if 'E' in d:
update_alpha(key, stats, h.E[ix])
# TODO 3: fixup. This might be slow?
# In any case, it is very far from tested.
# Also, relim'iting all of the time is distracting.
# Use d_ylim?
if 'E' in d:
lims = d.E
elif 'mu' in d:
lims = d.mu
lims = np.array(viz.xtrema(lims[..., ix]))
if lims[0] == lims[1]:
lims += [-.5, +.5]
ax.set_ylim(*viz.stretch(*lims, 1/p.zoomy))
return
return update
return init
def phase_particles(
is_3d = True,
obs_inds = (),
dims = (),
labels = (),
Tplot = None,
ens_props = dict(alpha=0.4), # noqa
zoom = 1.5,
):
# Store parameters
params_orig = DotDict(**locals())
M = 3 if is_3d else 2
def init(fignum, stats, key0, plot_u, E, P, **kwargs):
xx, yy, mu, _, chrono = \
stats.xx, stats.yy, stats.mu, stats.std, stats.HMM.t
# Set parameters (kwargs takes precedence over params_orig)
p = DotDict(**{
kw: kwargs.get(kw, val) for kw, val in params_orig.items()})
# Lag settings:
has_w = hasattr(stats, 'w')
if p.Tplot == 0:
K_plot = 1
else:
T_lag, K_lag, a_lag = validate_lag(p.Tplot, chrono)
K_plot = comp_K_plot(K_lag, a_lag, plot_u)
# Extend K_plot forther for adding blanks in resampling (PartFilt):
if has_w:
K_plot += a_lag
# Dimension settings
if not p.dims:
p.dims = arange(M)
if not p.labels:
p.labels = ["$x_%d$" % d for d in p.dims]
assert len(p.dims) == M
# Set up figure, axes
fig, _ = place.freshfig(fignum, figsize=(5, 5))
ax = plt.subplot(111, projection='3d' if is_3d else None)
ax.set_facecolor('w')
ax.set_title("Phase space trajectories")
# Tune plot
for ind, (s, i, t) in enumerate(zip(p.labels, p.dims, "xyz")):
viz.set_ilim(ax, ind, *viz.stretch(*viz.xtrema(xx[:, i]), 1/p.zoom))
eval("ax.set_%slabel('%s')" % (t, s))
# Allocate
d = DotDict() # data arrays
h = DotDict() # plot handles
s = DotDict() # scatter handles
if E is not None:
d.E = RollingArray((K_plot, len(E), M))
h.E = []
if P is not None:
d.mu = RollingArray((K_plot, M))
if True:
d.x = RollingArray((K_plot, M))
if list(p.obs_inds) == list(p.dims):
d.y = RollingArray((K_plot, M))
# Plot tails (invisible coz everything here is nan, for the moment).
if 'E' in d:
h.E += [ax.plot(*xn, **p.ens_props)[0]
for xn in np.transpose(d.E, [1, 2, 0])]
if 'mu' in d:
h.mu = ax.plot(*d.mu.T, 'b', lw=2)[0]
if True:
h.x = ax.plot(*d.x .T, 'k', lw=3)[0]
if 'y' in d:
h.y = ax.plot(*d.y .T, 'g*', ms=14)[0]
# Scatter. NB: don't init with nan's coz it's buggy
# (wrt. get_color() and _offsets3d) since mpl 3.1.
if 'E' in d:
s.E = ax.scatter(*E.T[p.dims], s=3**2,
c=[hn.get_color() for hn in h.E])
if 'mu' in d:
s.mu = ax.scatter(*ones(M), s=8**2,
c=[h.mu.get_color()])
if True:
s.x = ax.scatter(*ones(M), s=14**2,
c=[h.x.get_color()], marker=(5, 1), zorder=99)
def update(key, E, P):
k, kObs, faus = key
show_y = 'y' in d and kObs is not None
def update_tail(handle, newdata):
handle.set_data(newdata[:, 0], newdata[:, 1])
if is_3d:
handle.set_3d_properties(newdata[:, 2])
def update_sctr(handle, newdata):
if is_3d:
handle._offsets3d = juggle_axes(*newdata.T, 'z')
else:
handle.set_offsets(newdata)
EE = duplicate_with_blanks_for_resampled(E, p.dims, key, has_w)
# Roll data array
ind = k if plot_u else kObs
for Ens in EE: # If E is duplicated, so must the others be.
if 'E' in d:
d.E .insert(ind, Ens)
if True:
d.x .insert(ind, xx[k, p.dims])
if 'y' in d:
d.y .insert(ind, yy[kObs, :] if show_y else nan*ones(M))
if 'mu' in d:
d.mu.insert(ind, mu[key][p.dims])
# Update graph
update_sctr(s.x, d.x[[-1]])
update_tail(h.x, d.x)
if 'y' in d:
update_tail(h.y, d.y)
if 'mu' in d:
update_sctr(s.mu, d.mu[[-1]])
update_tail(h.mu, d.mu)
else:
update_sctr(s.E, d.E[-1])
for n in range(len(E)):
update_tail(h.E[n], d.E[:, n, :])
update_alpha(key, stats, h.E, s.E)
return
return update
return init
def validate_lag(Tplot, chrono):
"""Return validated `T_lag` such that is is:
- equal to `Tplot` with fallback: `HMM.t.Tplot`.
- no longer than `HMM.t.T`.
Also return corresponding `K_lag`, `a_lag`.
"""
# Defaults
if Tplot is None:
Tplot = chrono.Tplot
# Rename
T_lag = Tplot
assert T_lag >= 0
# Validate T_lag
t2 = chrono.tt[-1]
t1 = max(chrono.tt[0], t2-T_lag)
T_lag = t2-t1
K_lag = int(T_lag / chrono.dt) + 1 # Lag in indices
a_lag = K_lag//chrono.dkObs + 1 # Lag in obs indices
return T_lag, K_lag, a_lag
def comp_K_plot(K_lag, a_lag, plot_u):
K_plot = 2*a_lag # Sum of lags of {f,a} series.
if plot_u:
K_plot += K_lag # Add lag of u series.
return K_plot
def update_alpha(key, stats, lines, scatters=None):
"""Adjust color alpha (for particle filters)."""
k, kObs, faus = key
if kObs is None:
return
if faus == 'f':
return
if not hasattr(stats, 'w'):
return
# Compute alpha values
w = stats.w[key]
alpha = (w/w.max()).clip(0.1, 0.4)
# Set line alpha
for line, a in zip(lines, alpha):
line.set_alpha(a)
# Scatter plot does not have alpha. => Fake it.
if scatters is not None:
colors = scatters.get_facecolor()[:, :3]
if len(colors) == 1:
colors = colors.repeat(len(w), axis=0)
scatters.set_color(np.hstack([colors, alpha[:, None]]))
def duplicate_with_blanks_for_resampled(E, dims, key, has_w):
"""Particle filter: insert breaks for resampled particles."""
if E is None:
return [E]
EE = []
E = E[:, dims]
if has_w:
k, kObs, faus = key
if faus == 'f':
pass
elif faus == 'a':
_Ea[0] = E[:, 0] # Store (1st dim of) ens.
elif faus == 'u' and kObs is not None:
# Find resampled particles. Insert duplicate ensemble. Write nans (breaks).
resampled = _Ea[0] != E[:, 0] # Mark as resampled if ens changed.
# Insert current ensemble (copy to avoid overwriting).
EE.append(E.copy())
EE[0][resampled] = nan # Write breaks
# Always: append current ensemble
EE.append(E)
return EE
_Ea = [None] # persistent storage for ens
def d_ylim(data, ax=None, cC=0, cE=1, pp=(1, 99), Min=-1e20, Max=+1e20):
"""Provide new ylim's intelligently, from percentiles of the data.
- `data`: iterable of arrays for computing percentiles.
- `pp`: percentiles
- `ax`: If present, then the delta_zoom in/out is also considered.
- `cE`: exansion (widenting) rate ∈ [0,1].
Default: 1, which immediately expands to percentile.
- `cC`: compression (narrowing) rate ∈ [0,1].
Default: 0, which does not allow compression.
- `Min`/`Max`: bounds
Despite being a little involved,
the cost of this subroutine is typically not substantial
because there's usually not that much data to sort through.
"""
# Find "reasonable" limits (by percentiles), looping over data
maxv = minv = -np.inf # init
for d in data:
d = d[np.isfinite(d)]
if len(d):
perc = np.array([-1, 1]) * np.percentile(d, pp)
minv, maxv = np.maximum([minv, maxv], perc)
minv *= -1
# Pry apart equal values
if np.isclose(minv, maxv):
maxv += 0.5
minv -= 0.5
# Make the zooming transition smooth
if ax is not None:
current = ax.get_ylim()
# Set rate factor as compress or expand factor.
c0 = cC if minv > current[0] else cE
c1 = cC if maxv < current[1] else cE
# Adjust
minv = np.interp(c0, (0, 1), (current[0], minv))
maxv = np.interp(c1, (0, 1), (current[1], maxv))
# Bounds
maxv = min(Max, maxv)
minv = max(Min, minv)
# Set (if anything's changed)
def worth_updating(a, b, curr):
# Note: should depend on cC and cE
d = abs(curr[1]-curr[0])
lower = abs(a-curr[0]) > 0.002*d
upper = abs(b-curr[1]) > 0.002*d
return lower and upper
# if worth_updating(minv,maxv,current):
# ax.set_ylim(minv,maxv)
# Some mpl versions don't handle inf limits.
if not np.isfinite(minv):
minv = None
if not np.isfinite(maxv):
maxv = None
return minv, maxv
def spatial1d(
obs_inds = None,
periodicity = None,
dims = (),
ens_props = {'color': 'b', 'alpha': 0.1}, # noqa
conf_mult = None,
):
# Store parameters
params_orig = DotDict(**locals())
def init(fignum, stats, key0, plot_u, E, P, **kwargs):
xx, yy, mu = stats.xx, stats.yy, stats.mu
# Set parameters (kwargs takes precedence over params_orig)
p = DotDict(**{
kw: kwargs.get(kw, val) for kw, val in params_orig.items()})
if not p.dims:
M = xx.shape[-1]
p.dims = arange(M)
else:
M = len(p.dims)
# Make periodic wrapper
ii, wrap = viz.setup_wrapping(M, p.periodicity)
# Set up figure, axes
fig, ax = place.freshfig(fignum, figsize=(8, 5))
fig.suptitle("1d amplitude plot")
# Nans
nan1 = wrap(nan* | ones(M) | numpy.ones |
import numpy as np
import random
from numpy import isclose
import pytest
import matplotlib
matplotlib.use('Agg') # use a non-GUI backend, so plots are not shown during testing
from neurodiffeq.neurodiffeq import safe_diff as diff
from neurodiffeq.networks import FCNN
from neurodiffeq.pde import DirichletControlPoint, NeumannControlPoint, Point, CustomBoundaryCondition
from neurodiffeq.pde import solve2D, solve2D_system, Monitor2D, make_animation
from neurodiffeq.pde import Solution
from neurodiffeq.pde import Solution2D
from neurodiffeq.generators import PredefinedGenerator, Generator2D
from neurodiffeq.conditions import DirichletBVP2D, DirichletBVP
import torch
import torch.nn as nn
import torch.optim as optim
@pytest.fixture(autouse=True)
def magic():
torch.manual_seed(42)
np.random.seed(42)
def test_monitor():
laplace = lambda u, x, y: diff(u, x, order=2) + diff(u, y, order=2)
bc = DirichletBVP2D(
x_min=0, x_min_val=lambda y: torch.sin(np.pi * y),
x_max=1, x_max_val=lambda y: 0,
y_min=0, y_min_val=lambda x: 0,
y_max=1, y_max_val=lambda x: 0
)
net = FCNN(n_input_units=2, hidden_units=(32, 32))
with pytest.warns(FutureWarning):
solve2D(
pde=laplace, condition=bc, xy_min=(0, 0), xy_max=(1, 1),
net=net, max_epochs=3,
train_generator=Generator2D((32, 32), (0, 0), (1, 1), method='equally-spaced-noisy'),
batch_size=64,
monitor=Monitor2D(check_every=1, xy_min=(0, 0), xy_max=(1, 1))
)
def test_train_generator():
laplace = lambda u, x, y: diff(u, x, order=2) + diff(u, y, order=2)
bc = DirichletBVP2D(
x_min=0, x_min_val=lambda y: torch.sin(np.pi * y),
x_max=1, x_max_val=lambda y: 0,
y_min=0, y_min_val=lambda x: 0,
y_max=1, y_max_val=lambda x: 0
)
net = FCNN(n_input_units=2, hidden_units=(32, 32))
with pytest.raises(ValueError), pytest.warns(FutureWarning):
solution_neural_net_laplace, _ = solve2D(
pde=laplace, condition=bc,
net=net, max_epochs=3, batch_size=64
)
def test_laplace():
laplace = lambda u, x, y: diff(u, x, order=2) + diff(u, y, order=2)
bc = DirichletBVP2D(
x_min=0, x_min_val=lambda y: torch.sin(np.pi * y),
x_max=1, x_max_val=lambda y: 0,
y_min=0, y_min_val=lambda x: 0,
y_max=1, y_max_val=lambda x: 0,
)
net = FCNN(n_input_units=2, hidden_units=(32, 32))
with pytest.warns(FutureWarning):
solution_neural_net_laplace, loss_history = solve2D(
pde=laplace, condition=bc, xy_min=(0, 0), xy_max=(1, 1),
net=net, max_epochs=3,
train_generator=Generator2D((32, 32), (0, 0), (1, 1), method='equally-spaced-noisy',
xy_noise_std=(0.01, 0.01)),
batch_size=64
)
assert isinstance(solution_neural_net_laplace, Solution2D)
assert isinstance(loss_history, dict)
keys = ['train_loss', 'valid_loss']
for key in keys:
assert key in loss_history
assert isinstance(loss_history[key], list)
assert len(loss_history[keys[0]]) == len(loss_history[keys[1]])
# def test_pde_system():
# def _network_output_2input(net, xs, ys, ith_unit):
# xys = torch.cat((xs, ys), 1)
# nn_output = net(xys)
# if ith_unit is not None:
# return nn_output[:, ith_unit].reshape(-1, 1)
# else:
# return nn_output
#
# class BCOnU(Condition):
# """for u(x, y), impose u(x, -1) = u(x, 1) = 0; dudx(0, y) = dudy(L, y) = 0"""
#
# def __init__(self, x_min, x_max, y_min, y_max):
# super().__init__()
# self.x_min = x_min
# self.x_max = x_max
# self.y_min = y_min
# self.y_max = y_max
#
# def enforce(self, net, x, y):
# uxy = _network_output_2input(net, x, y, self.ith_unit)
#
# x_ones = torch.ones_like(x, requires_grad=True)
# x_ones_min = self.x_min * x_ones
# x_ones_max = self.x_max * x_ones
# uxminy = _network_output_2input(net, x_ones_min, y, self.ith_unit)
# uxmaxy = _network_output_2input(net, x_ones_max, y, self.ith_unit)
#
# x_tilde = (x - self.x_min) / (self.x_max - self.x_min)
# y_tilde = (y - self.y_min) / (self.y_max - self.y_min)
#
# return y_tilde * (1 - y_tilde) * (
# uxy - x_tilde * (self.x_max - self.x_min) * diff(uxminy, x_ones_min) \
# + 0.5 * x_tilde ** 2 * (self.x_max - self.x_min) * (
# diff(uxminy, x_ones_min) - diff(uxmaxy, x_ones_max)
# )
# )
#
# class BCOnP(Condition):
# """for p(x, y), impose p(0, y) = p_max; p(L, y) = p_min"""
#
# def __init__(self, x_min, x_max, p_x_min, p_x_max):
# super().__init__()
# self.x_min = x_min
# self.x_max = x_max
# self.p_x_min = p_x_min
# self.p_x_max = p_x_max
#
# def enforce(self, net, x, y):
# uxy = _network_output_2input(net, x, y, self.ith_unit)
# x_tilde = (x - self.x_min) / (self.x_max - self.x_min)
#
# return (1 - x_tilde) * self.p_x_min + x_tilde * self.p_x_max \
# + x_tilde * (1 - x_tilde) * uxy
#
# L = 2.0
# mu = 1.0
# P1, P2 = 1.0, 0.0
# def poiseuille(u, v, p, x, y):
# return [
# mu * (diff(u, x, order=2) + diff(u, y, order=2)) - diff(p, x),
# mu * (diff(v, x, order=2) + diff(v, y, order=2)) - diff(p, y),
# diff(u, x) + diff(v, y)
# ]
# def zero_divergence(u, v, p, x, y):
# return torch.sum( (diff(u, x) + diff(v, y))**2 )
#
# bc_on_u = BCOnU(
# x_min=0,
# x_max=L,
# y_min=-1,
# y_max=1,
# )
# bc_on_v = DirichletBVP2D(
# x_min=0, x_min_val=lambda y: 0,
# x_max=L, x_max_val=lambda y: 0,
# y_min=-1, y_min_val=lambda x: 0,
# y_max=1, y_max_val=lambda x: 0
# )
# bc_on_p = BCOnP(
# x_min=0,
# x_max=L,
# p_x_min=P1,
# p_x_max=P2,
# )
# conditions = [bc_on_u, bc_on_v, bc_on_p]
#
# nets = [
# FCNN(n_input_units=2, hidden_units=(32, 32), actv=nn.Softplus)
# for _ in range(3)
# ]
#
# # use one neural network for each dependent variable
# solution_neural_net_poiseuille, _ = solve2D_system(
# pde_system=poiseuille, conditions=conditions, xy_min=(0, -1), xy_max=(L, 1),
# train_generator=Generator2D((32, 32), (0, -1), (L, 1), method='equally-spaced-noisy'),
# max_epochs=300, batch_size=64, nets=nets, additional_loss_term=zero_divergence,
# monitor=Monitor2D(check_every=10, xy_min=(0, -1), xy_max=(L, 1))
# )
#
# def solution_analytical_poiseuille(xs, ys):
# us = (P1 - P2) / (L * 2 * mu) * (1 - ys ** 2)
# vs = np.zeros_like(xs)
# ps = P1 + (P2 - P1) * xs / L
# return [us, vs, ps]
#
# xs, ys = np.linspace(0, L, 101), np.linspace(-1, 1, 101)
# xx, yy = np.meshgrid(xs, ys)
# u_ana, v_ana, p_ana = solution_analytical_poiseuille(xx, yy)
# u_net, v_net, p_net = solution_neural_net_poiseuille(xx, yy, to_numpy=True)
#
# assert isclose(u_ana, u_net, atol=0.01).all()
# assert isclose(v_ana, v_net, atol=0.01).all()
# assert isclose(p_ana, p_net, atol=0.01).all()
def test_arbitrary_boundary():
def solution_analytical_problem_c(x, y):
return np.log(1 + x ** 2 + y ** 2)
def gradient_solution_analytical_problem_c(x, y):
return 2 * x / (1 + x ** 2 + y ** 2), 2 * y / (1 + x ** 2 + y ** 2),
# creating control points for Dirichlet boundary conditions
edge_length = 2.0 / np.sin(np.pi / 3) / 4
points_on_each_edge = 11
step_size = edge_length / (points_on_each_edge - 1)
direction_theta = np.pi * 2 / 3
left_turn_theta = np.pi * 1 / 3
right_turn_theta = -np.pi * 2 / 3
dirichlet_control_points_problem_c = []
point_x, point_y = 0.0, -1.0
for i_edge in range(6):
for i_step in range(points_on_each_edge - 1):
dirichlet_control_points_problem_c.append(
DirichletControlPoint(
loc=(point_x, point_y),
val=solution_analytical_problem_c(point_x, point_y)
)
)
point_x += step_size * np.cos(direction_theta)
point_y += step_size * np.sin(direction_theta)
direction_theta += left_turn_theta if (i_edge % 2 == 0) else right_turn_theta
# dummy control points to form closed domain
radius_circle = 1.0 / np.sin(np.pi / 6)
center_circle_x = radius_circle * np.cos(np.pi / 6)
center_circle_y = 0.0
dirichlet_control_points_problem_c_dummy = []
for theta in np.linspace(-np.pi * 5 / 6, np.pi * 5 / 6, 60):
point_x = center_circle_x + radius_circle * np.cos(theta)
point_y = center_circle_y + radius_circle * np.sin(theta)
dirichlet_control_points_problem_c_dummy.append(
DirichletControlPoint(
loc=(point_x, point_y),
val=solution_analytical_problem_c(point_x, point_y)
)
)
# all Dirichlet control points
dirichlet_control_points_problem_c_all = \
dirichlet_control_points_problem_c + dirichlet_control_points_problem_c_dummy
# creating control points for Neumann boundary condition
edge_length = 2.0 / np.sin(np.pi / 3) / 4
points_on_each_edge = 11
step_size = edge_length / (points_on_each_edge - 1)
normal_theta = np.pi / 6
direction_theta = -np.pi * 1 / 3
left_turn_theta = np.pi * 1 / 3
right_turn_theta = -np.pi * 2 / 3
neumann_control_points_problem_c = []
point_x, point_y = 0.0, 1.0
for i_edge in range(6):
normal_x = np.cos(normal_theta)
normal_y = np.sin(normal_theta)
# skip the points on the "tip", their normal vector is undefined?
point_x += step_size * np.cos(direction_theta)
point_y += step_size * np.sin(direction_theta)
for i_step in range(points_on_each_edge - 2):
grad_x, grad_y = gradient_solution_analytical_problem_c(point_x, point_y)
neumann_val = grad_x * normal_x + grad_y * normal_y
neumann_control_points_problem_c.append(
NeumannControlPoint(
loc=(point_x, point_y),
val=neumann_val,
normal_vector=(normal_x, normal_y)
)
)
point_x += step_size * np.cos(direction_theta)
point_y += step_size * np.sin(direction_theta)
direction_theta += left_turn_theta if (i_edge % 2 == 0) else right_turn_theta
normal_theta += left_turn_theta if (i_edge % 2 == 0) else right_turn_theta
# dummy control points to form closed domain
radius_circle = 1.0 / np.sin(np.pi / 6)
center_circle_x = -radius_circle * np.cos(np.pi / 6)
center_circle_y = 0.0
neumann_control_points_problem_c_dummy = []
for theta in np.linspace(np.pi * 1 / 6, np.pi * 11 / 6, 60):
point_x = center_circle_x + radius_circle * np.cos(theta)
point_y = center_circle_y + radius_circle * np.sin(theta)
normal_x = np.cos(theta)
normal_y = np.sin(theta)
grad_x, grad_y = gradient_solution_analytical_problem_c(point_x, point_y)
neumann_val = grad_x * normal_x + grad_y * normal_y
neumann_control_points_problem_c_dummy.append(
NeumannControlPoint(
loc=(point_x, point_y),
val=neumann_val,
normal_vector=(normal_x, normal_y)
)
)
# all Neumann control points
neumann_control_points_problem_c_all = \
neumann_control_points_problem_c + neumann_control_points_problem_c_dummy
cbc_problem_c = CustomBoundaryCondition(
center_point=Point(loc=(0.0, 0.0)),
dirichlet_control_points=dirichlet_control_points_problem_c_all,
neumann_control_points=neumann_control_points_problem_c_all
)
def get_grid(x_from_to, y_from_to, x_n_points=100, y_n_points=100, as_tensor=False):
x_from, x_to = x_from_to
y_from, y_to = y_from_to
if as_tensor:
x = torch.linspace(x_from, x_to, x_n_points)
y = torch.linspace(y_from, y_to, y_n_points)
return torch.meshgrid(x, y)
else:
x = np.linspace(x_from, x_to, x_n_points)
y = | np.linspace(y_from, y_to, y_n_points) | numpy.linspace |
import glob as glob
import matplotlib as mpl
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
import bz2
import corner
import json
import pathlib
import pickle
import warnings
from astropy import constants as const
from astropy import units as uni
from astropy.io import ascii, fits
from astropy.time import Time
from mpl_toolkits.axes_grid1 import ImageGrid
warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered")
warnings.filterwarnings("ignore", r"Degrees of freedom <= 0 for slice")
def _bad_idxs(s):
if s == "[]":
return []
else:
# Merges indices/idxs specified in `s` into a single numpy array of
# indices to omit
s = s.strip("[]").split(",")
bad_idxs = list(map(_to_arr, s))
bad_idxs = np.concatenate(bad_idxs, axis=0)
return bad_idxs
def _to_arr(idx_or_slc):
# Converts str to 1d numpy array
# or slice to numpy array of ints.
# This format makes it easier for flattening multiple arrays in `_bad_idxs`
if ":" in idx_or_slc:
lower, upper = map(int, idx_or_slc.split(":"))
return np.arange(lower, upper + 1)
else:
return np.array([int(idx_or_slc)])
def compress_pickle(fname_out, fpath_pickle):
data = load_pickle(fpath_pickle)
with bz2.BZ2File(f"{fname_out}.pbz2", "wb") as f:
pickle.dump(data, f)
def decompress_pickle(fname):
data = bz2.BZ2File(fname, "rb")
return pickle.load(data)
def get_evidences(base_dir, relative_to_spot_only=False):
fit_R0 = "fitR0" if "fit_R0" in base_dir else "NofitR0"
species = ["Na", "K", "TiO", "Na_K", "Na_TiO", "K_TiO", "Na_K_TiO"]
model_names_dict = {
"clear": f"NoHet_FitP0_NoClouds_NoHaze_{fit_R0}",
"clear+cloud": f"NoHet_FitP0_Clouds_NoHaze_{fit_R0}",
"clear+haze": f"NoHet_FitP0_NoClouds_Haze_{fit_R0}",
"clear+cloud+haze": f"NoHet_FitP0_Clouds_Haze_{fit_R0}",
"clear+spot": f"Het_FitP0_NoClouds_NoHaze_{fit_R0}",
"clear+spot+cloud": f"Het_FitP0_Clouds_NoHaze_{fit_R0}",
"clear+spot+haze": f"Het_FitP0_NoClouds_Haze_{fit_R0}",
"clear+spot+cloud+haze": f"Het_FitP0_Clouds_Haze_{fit_R0}",
}
data_dict = {
sp: {
model_name: load_pickle(f"{base_dir}/HATP23_E1_{model_id}_{sp}/retrieval.pkl")
for (model_name, model_id) in model_names_dict.items()
}
for sp in species
}
lnZ = {}
lnZ_err = {}
for species_name, species_data in data_dict.items():
lnZ[species_name] = {}
lnZ_err[species_name] = {}
for model_name, model_data in species_data.items():
lnZ[species_name][model_name] = model_data["lnZ"]
lnZ_err[species_name][model_name] = model_data["lnZerr"]
df_lnZ = pd.DataFrame(lnZ)
df_lnZ_err = pd.DataFrame(lnZ_err)
# Get log evidence for spot-only model and compute relative to this instead
if relative_to_spot_only:
model_id = f"Het_FitP0_NoClouds_NoHaze_{fit_R0}_no_features"
df_lnZ_min = load_pickle(f"{base_dir}/HATP23_E1_{model_id}/retrieval.pkl")
#print(f"spot only lnZ: {df_lnZ_min['lnZ']} +/- {df_lnZ_min['lnZerr']}")
species_min = "no_features"
model_min = "spot only"
else:
species_min = df_lnZ.min().idxmin()
model_min = df_lnZ[species_min].idxmin()
df_lnZ_min = data_dict[species_min][model_min]
df_Delta_lnZ = df_lnZ - df_lnZ_min["lnZ"]
df_Delta_lnZ_err = np.sqrt(df_lnZ_err ** 2 + df_lnZ_min["lnZerr"] ** 2)
return df_Delta_lnZ, df_Delta_lnZ_err, species_min, model_min, data_dict
def get_phases(t, P, t0):
"""
Given input times, a period (or posterior dist of periods)
and time of transit center (or posterior), returns the
phase at each time t. From juliet =]
"""
if type(t) is not float:
phase = ((t - np.median(t0)) / np.median(P)) % 1
ii = np.where(phase >= 0.5)[0]
phase[ii] = phase[ii] - 1.0
else:
phase = ((t - np.median(t0)) / np.median(P)) % 1
if phase >= 0.5:
phase = phase - 1.0
return phase
def get_result(fpath, key="t0", unc=True):
data = np.genfromtxt(fpath, encoding=None, dtype=None)
for line in data:
if key in line:
if unc:
return line
else:
return line[1]
print(f"{key} not found. Check results.dat file.")
def get_table_stats(df, ps=[0.16, 0.5, 0.84], columns=None):
ps_strs = [f"{p*100:.0f}%" for p in ps]
df_stats = df.describe(percentiles=ps).loc[ps_strs]
df_latex = pd.DataFrame(columns=df.columns)
df_latex.loc["p"] = df_stats.loc[ps_strs[1]]
df_latex.loc["p_u"] = df_stats.loc[ps_strs[2]] - df_stats.loc[ps_strs[1]]
df_latex.loc["p_d"] = df_stats.loc[ps_strs[1]] - df_stats.loc[ps_strs[0]]
latex_strs = df_latex.apply(write_latex_row2, axis=0)
return pd.DataFrame(latex_strs, columns=columns)
def load_pickle(fpath):
with open(fpath, "rb") as f:
data = pickle.load(f, encoding="latin") # Python 2 -> 3
return data
def myparser(s):
dt, day_frac = s.split(".")
dt = datetime.strptime(dt, "%Y-%m-%d")
ms = 86_400_000.0 * float(f".{day_frac}")
ms = timedelta(milliseconds=int(ms))
return dt + ms
def plot_binned(
ax,
idxs_used,
fluxes,
bins,
offset,
colors,
annotate=False,
utc=False,
species=None,
bold_species=True,
plot_kwargs=None,
annotate_kwargs=None,
annotate_rms_kwargs=None,
models=None,
):
"""
Plots binned light curves.
Parameters
----------
ax : matplotib.axes object
Current axis to plot on
idxs_used: index, time, phase, etc.
fluxes : ndarray
`time[idxs_used]` x `wbin` array of fluxes. Each column corresponds to a wavelength
binned LC, where `wbin` is the number of wavelength bins
bins : ndarray
`wbin` x 2 array of wavelength bins. The first column holds the lower
bound of each bin, and the second column holds the upper bound for each.
offset : int, float
How much space to put between each binned LC on `ax`
colors : ndarray
`wbin` x 3 array of RGB values to set color palette
annotate : bool, optional
Whether to annotate wavelength bins on plot. Default is True.
utc : bool, optional
Whether to convert `time` to UTC or not. Default is False.
bold_species : bool, optional
Whether to make annotated bins bold if they are in
plot_kwargs : dict, optional
Optional keyword arguments to pass to plot function
annotate_kwargs : dict, optional
Optional keyword arguments to pass to annotate function
Returns
-------
ax : matplotib.axes object
Current axis that was plotted on.
"""
if plot_kwargs is None:
plot_kwargs = {}
if annotate_kwargs is None:
annotate_kwargs = {}
if annotate_rms_kwargs is None:
annotate_rms_kwargs = {}
offs = 0
if idxs_used is None:
idx_used = range
slc = slice(0, len(fluxes.shape[0]) + 1)
else:
slc = idxs_used
# fluxes = fluxes[slc, :]
N = bins.shape[0] # number of wavelength bins
for i in range(N):
wav_bin = [round(bins[i][j], 3) for j in range(2)]
if utc:
t_date = Time(time, format="jd")
ax.plot_date(
t_date.plot_date,
fluxes[:, i] + offs,
c=colors[i],
label=wav_bin,
**plot_kwargs,
)
else:
ax.plot(
idxs_used,
fluxes[:, i] + offs,
c=0.9 * colors[i],
label=wav_bin,
# mec=0.9*colors[i],
**plot_kwargs,
)
if models is not None:
ax.plot(idxs_used, models[:, i] + offs, c=0.6 * colors[i], lw=2)
if annotate:
# trans = transforms.blended_transform_factory(
# ax.transAxes, ax.transData
# )
trans = transforms.blended_transform_factory(ax.transData, ax.transData)
# Annotate wavelength bins
ann = ax.annotate(
wav_bin,
# xy=(0, 1.004*(1 + offs)),
xy=(idxs_used[-1], 1.002 * (1 + offs)),
xycoords=trans,
**annotate_kwargs,
)
rms = | np.std(fluxes[:, i]) | numpy.std |
from __future__ import print_function
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from matplotlib.colors import LinearSegmentedColormap
import os
label_size = 28
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
mpl.rc('font', family='serif', size=34, serif="Times New Roman")
#mpl.rcParams['text.usetex'] = True
#mpl.rcParams['text.latex.preamble'] = [r'\boldmath']
mpl.rcParams['legend.fontsize'] = "medium"
mpl.rc('savefig', format ="pdf", pad_inches= 0.1)
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
mpl.rcParams['figure.figsize'] = 8, 6
mpl.rcParams['lines.linewidth'] = 2
colors_red = [(1, 1, 1), (1, 0, 0), (0, 0, 0)]
colors_blue= [(1, 1, 1), (0, 0, 1), (0, 0, 0)]
cm_red = LinearSegmentedColormap.from_list("GoF_red", colors_red, N=20)
cm_blue= LinearSegmentedColormap.from_list("GoF_blue", colors_blue, N=20)
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
print("We have to invert the sin problem x1, x2 -> x2, x1")
file0_name = os.environ['learningml']+"/GoF/data/accept_reject/sin1diff_data/data_sin1diff_5_and_5_periods2D_10000_sample_0.txt"
file1_name = os.environ['learningml']+"/GoF/data/accept_reject/sin1diff_data/data_sin1diff_5_and_6_periods2D_10000_sample_0.txt"
name = "data_sin1diff_5_and_6_periods2D_10000_sample_0"
data0 = np.loadtxt(file0_name)
data1 = np.loadtxt(file1_name)
xedges = np.linspace(-1.,1.,51)
yedges = np.linspace(-1.,1.,51)
H, xedges, yedges = np.histogram2d(data0[:,0], data0[:,1], bins=(xedges, yedges))
fig = plt.figure()
ax = fig.add_axes([0.2,0.15,0.75,0.8])
ax.imshow(H, interpolation='nearest', origin='low', extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], cmap=cm_blue, aspect='auto')
ax.set_xlabel(r"$\theta_1$")
ax.set_ylabel(r"$\theta_2$")
fig.savefig(name+"_2Dhist_noCPV.pdf")
plt.close(fig)
print("plotting "+name+"_2Dhist_noCPV.pdf")
xedges = np.linspace(-1.,1.,51)
yedges = np.linspace(-1.,1.,51)
H, xedges, yedges = np.histogram2d(data1[:,0], data1[:,1], bins=(xedges, yedges))
fig = plt.figure()
ax = fig.add_axes([0.2,0.15,0.75,0.8])
ax.imshow(H, interpolation='nearest', origin='low', extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], cmap=cm_red, aspect='auto')
ax.set_xlabel(r"$\theta_1$")
ax.set_ylabel(r"$\theta_2$")
fig.savefig(name+"_2Dhist_CPV.pdf")
plt.close(fig)
print("plotting "+name+"_2Dhist_CPV.pdf")
print("data0.shape : ",data0.shape)
#print("data0 : \n", data0[:10,0])
x1min = min( [np.min(data0[:,1]),np.min(data1[:,1])])
x1max = max( [np.max(data0[:,1]),np.max(data1[:,1])])
x2min = min( [np.min(data0[:,0]),np.min(data1[:,0])])
x2max = max( [np.max(data0[:,0]),np.max(data1[:,0])])
xmin = min(x1min, x2min)
xmax = max(x1max, x2max)
x1bins = np.linspace(xmin, xmax, 51)
x2bins = | np.linspace(xmin, xmax, 51) | numpy.linspace |
# -*- coding: utf-8 -*-
import numpy as np
__author__ = "<NAME>"
__email__ = "<EMAIL>"
def get_principal_components(zeta, eta):
"""Return the principal components of a traceless second-rank symmetric
Cartesian tensor.
Args:
zeta: The zeta parameter in PAS, according to the Haeberlen convention.
eta: The eta parameter in PAS, according to the Haeberlen convention.
"""
xx = -0.5 * zeta * (eta + 1.0)
yy = 0.5 * zeta * (eta - 1.0)
zz = zeta
return [xx, yy, zz]
def get_Haeberlen_components(tensors):
"""Return zeta and eta parameters of the tensor using the Haeberlen convention.
Args:
ndarray tensors: A `N x 3 x 3` ndarray of `N` traceless symmetric second-rank
Cartesian tensors.
"""
n = tensors.shape[0]
eig_val = np.linalg.eigvalsh(tensors)
eig_val_sort_ = np.argsort(np.abs(eig_val), axis=1, kind="mergesort")
eig_val_sort_ = (eig_val_sort_.T + 3 * np.arange(n)).T.ravel()
eig_val_sorted = eig_val.ravel()[eig_val_sort_].reshape(n, 3)
eig_val_sort_ = eig_val = None
del eig_val_sort_, eig_val
zeta = eig_val_sorted[:, -1]
eta = (eig_val_sorted[:, 0] - eig_val_sorted[:, 1]) / zeta
return zeta, eta
def x_y_from_zeta_eta(zeta, eta):
"""Convert the zeta, eta coordinates from the Haeberlen convention to the
x-y notation."""
xa = np.empty(zeta.size)
ya = np.empty(zeta.size)
index = np.where(zeta >= 0)
temp = np.tan(0.7853981634 * eta[index])
ya[index] = | np.sqrt(zeta[index] * zeta[index] / (temp * temp + 1.0)) | numpy.sqrt |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.