prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 5 13:04:27 2020
This file implements linear, exponential, and periodical (sinusoidal) schedules
for the temperature in Boltzmann exploration. It was used in an ealier version
of SPAQL.
Run it to see an example of how the probabilities of picking an action evolve
for each schedule.
"""
import numpy as np
eps = 1e-2
def get_linear_alpha(c, I, M):
"""
c: current iteration
I: maximum number of iterations
M: maximum alpha
"""
alpha = 1/I
ret = M * (1 - alpha*c)
if ret < eps:
ret = eps
return ret
def get_exp_alpha(c, I, M):
"""
c: current iteration
I: maximum number of iterations
M: maximum alpha
"""
alpha = np.log(M + 1)/I
ret = np.exp(alpha*(I-c)) - 1
if ret < eps:
ret = eps
return ret
def get_sin_alpha(c, I, M, periods=2):
"""
c: current iteration
I: maximum number of iterations
M: maximum alpha
periods: number of times actions are uniformly sampled
"""
alpha = np.pi/I * (2*periods - 1)
ret = M * (1 - np.cos(alpha*(I-c)))/2
if ret < eps:
ret = eps
return ret
if __name__ == "__main__":
import matplotlib.pyplot as plt
nIters = 500
max_alpha = 5
labels = [f"Action {i}" for i in range(1, 6)]
fig, ax = plt.subplots(2, 2)
ax = ax.flatten()
ax1 = ax[0]
iteration = np.arange(0, nIters)
q = 5 * np.random.random((1,5))
y = np.zeros((iteration.size, q.size))
for a in iteration:
alpha = get_linear_alpha(a, nIters/2, max_alpha)
q1 = np.exp(q/alpha)
y[a,:] = q1/np.sum(q1)
ax1.stackplot(iteration, np.transpose(y), labels=labels)
ax1.set_xlabel(r"Training iteration")
ax1.set_ylabel("P(action)")
ax1.legend(loc='upper right')
ax1.set_title("Linear schedule")
ax2 = ax[1]
y = np.zeros((iteration.size, q.size))
for a in iteration:
alpha = get_exp_alpha(a, nIters/2, max_alpha)
q1 = np.exp(q/alpha)
y[a,:] = q1/np.sum(q1)
ax2.stackplot(iteration, np.transpose(y), labels=labels)
ax2.set_xlabel("Training iteration")
ax2.set_ylabel("P(action)")
ax2.legend(loc='upper right')
ax2.set_title("Exponential schedule")
ax3 = ax[2]
y = np.zeros((iteration.size, q.size))
for a in iteration:
alpha = get_sin_alpha(a, nIters/2, max_alpha, periods=1)
q1 = np.exp(q/alpha)
y[a,:] = q1/np.sum(q1)
ax3.stackplot(iteration, np.transpose(y), labels=labels)
ax3.set_xlabel("Training iteration")
ax3.set_ylabel("P(action)")
ax3.legend(loc='upper right')
ax3.set_title("Sinusoidal schedule, 1 period")
ax3 = ax[3]
y = | np.zeros((iteration.size, q.size)) | numpy.zeros |
# -*- coding: utf-8 -*-
import time
import numpy as np
import pytest
from africanus.gridding.perleypolyhedron import (kernels,
gridder,
degridder)
from africanus.gridding.perleypolyhedron import dask as dwrap
from africanus.dft.kernels import im_to_vis
from africanus.constants import c as lightspeed
class clock:
def __init__(self, identifier="untitled"):
self._id = identifier
self._elapsed = 0.0
self._onenter = 0.0
self._onexit = 0.0
def __enter__(self):
self._onenter = time.time()
return self
def __exit__(self, extype, exval, tb):
self._onexit = time.time()
self._elapsed = self._onexit - self._onenter
@property
def elapsed(self):
return self._elapsed
def __str__(self):
res = "{0:s}: Walltime {1:.0f}m{2:.2f}s elapsed".format(
self._id, self.elapsed // 60,
self.elapsed - (self.elapsed // 60) * 60)
return res
__repr__ = __str__
def test_gridder_dask():
da = pytest.importorskip("dask.array")
with clock("DASK gridding") as tictoc:
# construct kernel
W = 5
OS = 9
kern = kernels.pack_kernel(kernels.kbsinc(W, oversample=OS), W, OS)
nrow = int(1e6)
np.random.seed(0)
# simulate some ficticious baselines rotated by an hour angle
row_chunks = nrow // 10
uvw = np.zeros((nrow, 3), dtype=np.float64)
blpos = np.random.uniform(26, 10000, size=(25, 3))
ntime = int(nrow / 25.0)
d0 = np.pi / 4.0
for n in range(25):
for ih0, h0 in enumerate(
np.linspace(np.deg2rad(-20), np.deg2rad(20), ntime)):
s = np.sin
c = np.cos
R = np.array([[s(h0), c(h0), 0],
[-s(d0) * c(h0),
s(d0) * s(h0),
c(d0)],
[c(d0) * c(h0), -c(d0) * s(h0),
s(d0)]])
uvw[n * ntime + ih0, :] = np.dot(R, blpos[n, :].T)
uvw = da.from_array(uvw, chunks=(row_chunks, 3))
pxacrossbeam = 5
nchan = 128
frequency = da.from_array(np.linspace(1.0e9, 1.4e9, nchan),
chunks=(nchan, ))
wavelength = lightspeed / frequency
cell = da.rad2deg(
wavelength[0] /
(max(da.max(da.absolute(uvw[:, 0])),
da.max(da.absolute(uvw[:, 1]))) * pxacrossbeam))
npixfacet = 100
fftpad = 1.1
image_centres = da.from_array(np.array([[0, d0]]), chunks=(1, 2))
chanmap = da.from_array(np.zeros(nchan, dtype=np.int64),
chunks=(nchan, ))
detaper_facet = kernels.compute_detaper_dft_seperable(
int(npixfacet * fftpad), kernels.unpack_kernel(kern, W, OS), W,
OS)
vis_dft = da.ones(shape=(nrow, nchan, 2),
chunks=(row_chunks, nchan, 2),
dtype=np.complex64)
vis_grid_facet = dwrap.gridder(
uvw,
vis_dft,
wavelength,
chanmap,
int(npixfacet * fftpad),
cell * 3600.0,
image_centres, (0, d0),
kern,
W,
OS,
"None",
"None",
"I_FROM_XXYY",
"conv_1d_axisymmetric_packed_scatter",
do_normalize=True)
vis_grid_facet = vis_grid_facet.compute()
ftvisfacet = (np.fft.fftshift(
np.fft.ifft2(np.fft.ifftshift(
vis_grid_facet[0, :, :]))).reshape(
(1, int(npixfacet * fftpad), int(
npixfacet * fftpad)))).real / detaper_facet * int(
npixfacet * fftpad)**2
ftvisfacet = ftvisfacet[:,
int(npixfacet * fftpad) // 2 - npixfacet //
2:int(npixfacet * fftpad) // 2 -
npixfacet // 2 + npixfacet,
int(npixfacet * fftpad) // 2 - npixfacet //
2:int(npixfacet * fftpad) // 2 -
npixfacet // 2 + npixfacet]
print(tictoc)
assert (np.abs(np.max(ftvisfacet[0, :, :]) - 1.0) < 1.0e-6)
def test_gridder_nondask():
with clock("Non-DASK gridding") as tictoc:
# construct kernel
W = 5
OS = 9
kern = kernels.pack_kernel(kernels.kbsinc(W, oversample=OS), W, OS)
nrow = int(1e6)
np.random.seed(0)
# simulate some ficticious baselines rotated by an hour angle
uvw = np.zeros((nrow, 3), dtype=np.float64)
blpos = np.random.uniform(26, 10000, size=(25, 3))
ntime = int(nrow / 25.0)
d0 = np.pi / 4.0
for n in range(25):
for ih0, h0 in enumerate(
np.linspace(np.deg2rad(-20), np.deg2rad(20), ntime)):
s = np.sin
c = np.cos
R = np.array([[s(h0), c(h0), 0],
[-s(d0) * c(h0),
s(d0) * s(h0),
c(d0)],
[c(d0) * c(h0), -c(d0) * s(h0),
s(d0)]])
uvw[n * ntime + ih0, :] = np.dot(R, blpos[n, :].T)
pxacrossbeam = 5
nchan = 128
frequency = np.linspace(1.0e9, 1.4e9, nchan)
wavelength = lightspeed / frequency
cell = np.rad2deg(
wavelength[0] /
(max(np.max(np.absolute(uvw[:, 0])),
np.max(np.absolute(uvw[:, 1]))) * pxacrossbeam))
npixfacet = 100
fftpad = 1.1
image_centres = np.array([[0, d0]])
chanmap = np.zeros(nchan, dtype=np.int64)
detaper_facet = kernels.compute_detaper_dft_seperable(
int(npixfacet * fftpad), kernels.unpack_kernel(kern, W, OS), W,
OS)
vis_dft = np.ones((nrow, nchan, 2), dtype=np.complex64)
vis_grid_facet = gridder.gridder(
uvw,
vis_dft,
wavelength,
chanmap,
int(npixfacet * fftpad),
cell * 3600.0,
image_centres[0, :], (0, d0),
kern,
W,
OS,
"None",
"None",
"I_FROM_XXYY",
"conv_1d_axisymmetric_packed_scatter",
do_normalize=True)
ftvisfacet = (np.fft.fftshift(
np.fft.ifft2(np.fft.ifftshift(
vis_grid_facet[0, :, :]))).reshape(
(1, int(npixfacet * fftpad), int(
npixfacet * fftpad)))).real / detaper_facet * int(
npixfacet * fftpad)**2
ftvisfacet = ftvisfacet[:,
int(npixfacet * fftpad) // 2 - npixfacet //
2:int(npixfacet * fftpad) // 2 -
npixfacet // 2 + npixfacet,
int(npixfacet * fftpad) // 2 - npixfacet //
2:int(npixfacet * fftpad) // 2 -
npixfacet // 2 + npixfacet]
print(tictoc)
assert (np.abs(np.max(ftvisfacet[0, :, :]) - 1.0) < 1.0e-6)
def test_degrid_dft_packed_nondask():
# construct kernel
W = 5
OS = 3
kern = kernels.pack_kernel(kernels.kbsinc(W, oversample=OS),
W,
oversample=OS)
nrow = int(5e4)
uvw = np.column_stack(
(5000.0 * np.cos(np.linspace(0, 2 * np.pi, nrow)),
5000.0 * np.sin(np.linspace(0, 2 * np.pi, nrow)), np.zeros(nrow)))
pxacrossbeam = 10
nchan = 1024
frequency = np.linspace(1.0e9, 1.4e9, nchan)
wavelength = lightspeed / frequency
cell = np.rad2deg(
wavelength[0] /
(2 * max(np.max(np.abs(uvw[:, 0])), np.max(np.abs(uvw[:, 1]))) *
pxacrossbeam))
npix = 512
mod = np.zeros((1, npix, npix), dtype=np.complex64)
mod[0, npix // 2 - 5, npix // 2 - 5] = 1.0
ftmod = np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(
mod[0, :, :]))).reshape((1, npix, npix))
chanmap = np.zeros(nchan, dtype=np.int64)
with clock("Non-DASK degridding") as tictoc:
degridder.degridder(
uvw,
ftmod,
wavelength,
chanmap,
cell * 3600.0,
(0, np.pi / 4.0),
(0, np.pi / 4.0),
kern,
W,
OS,
"None", # no faceting
"None", # no faceting
"XXYY_FROM_I",
"conv_1d_axisymmetric_packed_gather")
print(tictoc)
def test_degrid_dft_packed_dask():
da = pytest.importorskip("dask.array")
# construct kernel
W = 5
OS = 3
kern = kernels.pack_kernel(kernels.kbsinc(W, oversample=OS),
W,
oversample=OS)
nrow = int(5e4)
nrow_chunk = nrow // 32
uvw = np.column_stack(
(5000.0 * np.cos( | np.linspace(0, 2 * np.pi, nrow) | numpy.linspace |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cm as cm
import netCDF4
import scipy.interpolate as intrp
import datetime
import gsw
import seawater as sw
import os
from mpl_toolkits.basemap import Basemap
import cmocean
import pygamma
import copy
import glob
import xarray as xr
from holteandtalley import HolteAndTalley
import time
class grids_one_buoy():
def __init__(self,filename,**kargs):
if "den_ml_crit" in kargs:
den_ml_crit = kargs["den_ml_crit"]
else:
den_ml_crit = 0.03
if "DO_ml_crit" in kargs:
DO_ml_crit = kargs["DO_ml_crit"]
else:
#DO_ml_crit = 1. #Kortzinger 2008 proportional to 0.03 kg/m3 if 0.125 kg/m-3 in kortzinger
#DO_ml_crit = 5. #Kortzinger 2008
DO_ml_crit = 2.5
if "dz" in kargs:
dz = kargs["dz"]
else:
dz = 5.
if "dzLT" in kargs:
dzLT = kargs["dzLT"]
else:
dzLT = 20.
if "gridding" in kargs:
gridding = kargs["gridding"]
else:
gridding = False
if "display_info" in kargs:
display_info = kargs["display_info"]
else:
display_info = False
if "verbose" in kargs:
verbose = kargs["verbose"]
else:
verbose = False
if "clear_short" in kargs:
#clears short cut propfiles at 950 m
clear_short = kargs["clear_short"]
else:
clear_short = False
nfc = netCDF4.Dataset(filename)
metadata = nfc.__dict__["Comments"]
if display_info:
display(nfc)
variables = list(nfc.variables.keys())
#print(nfc)
self.raw = dict()
self.raw["depth"] = nfc["Depth"][:]
self.raw["Lat"] = nfc["Lat"][:]
self.raw["Lon"] = nfc["Lon"][:]
self.raw["Lon"][self.raw["Lon"]>180] = self.raw["Lon"][self.raw["Lon"]>180] - 360.
#UOW CODE
i0 = filename.rfind("/")+1
i1 = filename.rfind("_")
self.raw["code"]= filename[i0:i1]
#WMO code
WMO_str = "WMO ID:"
i0 = metadata.find(WMO_str) + len(WMO_str) + 1
i1 = metadata[i0:].find("\n") + i0
self.raw["WMO_code"] = metadata[i0:i1]
ref_date_str = nfc["REFERENCE_DATE_TIME"][:].tostring().decode("ascii")
ref_date = datetime.datetime.strptime(ref_date_str,"%Y%m%d%H%M%S")
self.raw["date"] = nfc["JULD"][:] + ref_date.toordinal()
self.raw["date_dt"] = convert_time_to_date(self.raw["date"])
#reads the variables
self.raw["depth"] = nfc["Depth"][:].T
if np.ma.isMaskedArray(self.raw["depth"]):
self.raw["depth"].mask = (self.raw["depth"].mask) | (nfc["Depth_QFA"][:].T == 8) | (self.raw["depth"]<0)
else:
self.raw["depth"] = np.ma.array(self.raw["depth"])
self.raw["depth"].mask = (nfc["Depth_QFA"][:].T == 8)
self.raw["Pressure"] = nfc["Pressure"][:].T
if np.ma.isMaskedArray(self.raw["Pressure"]):
self.raw["Pressure"].mask = (self.raw["Pressure"].mask) | (nfc["Pressure_QFA"][:].T == 8)
else:
self.raw["Pressure"] = np.ma.array(self.raw["Pressure"])
self.raw["Pressure"].mask = (nfc["Pressure_QFA"][:].T == 8)
self.raw["Temperature"] = nfc["Temperature"][:].T
if np.ma.isMaskedArray(self.raw["Temperature"]):
self.raw["Temperature"].mask = (self.raw["Temperature"].mask) | (nfc["Temperature_QFA"][:].T == 8)
else:
self.raw["Temperature"] = np.ma.array(self.raw["Temperature"])
self.raw["Temperature"].mask = (nfc["Temperature_QFA"][:].T == 8)
self.raw["Salinity"] = nfc["Salinity"][:].T
if np.ma.isMaskedArray(self.raw["Salinity"]):
self.raw["Salinity"].mask = (self.raw["Salinity"].mask) | (nfc["Salinity_QFA"][:].T == 8)
else:
self.raw["Salinity"] = np.ma.array(self.raw["Salinity"])
self.raw["Salinity"].mask = (nfc["Salinity_QFA"][:].T == 8)
#derived values
self.raw["SA"] = gsw.SA_from_SP( self.raw["Salinity"], self.raw["Pressure"], self.raw["Lon"], self.raw["Lat"] ) #-10.1325
self.raw["CT"] = gsw.CT_from_t(self.raw["SA"],self.raw["Temperature"],self.raw["Pressure"]) #-10.1325
self.raw["Sigma_theta"] = gsw.sigma0(self.raw["SA"],self.raw["CT"])
self.raw["gamma_n"] = np.transpose(pygamma.gamma_n( self.raw["Salinity"].T, self.raw["Temperature"].T, self.raw["Pressure"].T, self.raw["Lon"], self.raw["Lat"] )[0])
if not np.ma.isMaskedArray(self.raw["gamma_n"]):
self.raw["gamma_n"] = np.ma.array( self.raw["gamma_n"] )
self.raw["gamma_n"].mask = np.copy( self.raw["Sigma_theta"].mask )
#biogeochemical
bg_vars = ["Oxygen","OxygenSat","Nitrate","DIC_LIAR","TALK_LIAR","pCO2_LIAR","Chla_corr","POC"]
self.raw_bg = dict()
if "Oxygen" in variables:
self.raw_bg["Oxygen"] = nfc["Oxygen"][:].T
if np.ma.isMaskedArray(self.raw_bg["Oxygen"]):
self.raw_bg["Oxygen"].mask = (self.raw_bg["Oxygen"].mask) | (nfc["Oxygen_QFA"][:].T == 8)
else:
self.raw_bg["Oxygen"] = np.ma.array(self.raw_bg["Oxygen"])
self.raw_bg["Oxygen"].mask = (nfc["Oxygen_QFA"][:].T == 8)
if "OxygenSat" in variables:
self.raw_bg["OxygenSat"] = nfc["OxygenSat"][:].T
if np.ma.isMaskedArray(self.raw_bg["OxygenSat"]):
self.raw_bg["OxygenSat"].mask = (self.raw_bg["OxygenSat"].mask) | (nfc["OxygenSat_QFA"][:].T == 8)
else:
self.raw_bg["OxygenSat"] = np.ma.array(self.raw_bg["OxygenSat"])
self.raw_bg["OxygenSat"].mask = (nfc["OxygenSat_QFA"][:].T == 8)
if "Nitrate" in variables:
self.raw_bg["Nitrate"] = nfc["Nitrate"][:].T
if np.ma.isMaskedArray(self.raw_bg["Nitrate"]):
self.raw_bg["Nitrate"].mask = (self.raw_bg["Nitrate"].mask) | (nfc["Nitrate_QFA"][:].T == 8)
else:
self.raw_bg["Nitrate"] = np.ma.array(self.raw_bg["Nitrate"])
self.raw_bg["Nitrate"].mask = (nfc["Nitrate_QFA"][:].T == 8)
if "DIC_LIAR" in variables:
self.raw_bg["DIC_LIAR"] = nfc["DIC_LIAR"][:].T
if np.ma.isMaskedArray(self.raw_bg["DIC_LIAR"]):
self.raw_bg["DIC_LIAR"].mask = (self.raw_bg["DIC_LIAR"].mask) | (nfc["DIC_LIAR_QFA"][:].T == 8)
else:
self.raw_bg["DIC_LIAR"] = np.ma.array(self.raw_bg["DIC_LIAR"])
self.raw_bg["DIC_LIAR"].mask = (nfc["DIC_LIAR_QFA"][:].T == 8)
if "TALK_LIAR" in variables:
self.raw_bg["TALK_LIAR"] = nfc["TALK_LIAR"][:].T
if np.ma.isMaskedArray(self.raw_bg["TALK_LIAR"]):
self.raw_bg["TALK_LIAR"].mask = (self.raw_bg["TALK_LIAR"].mask) | (nfc["TALK_LIAR_QFA"][:].T == 8)
else:
self.raw_bg["TALK_LIAR"] = np.ma.array(self.raw_bg["TALK_LIAR"])
self.raw_bg["TALK_LIAR"].mask = (nfc["TALK_LIAR_QFA"][:].T == 8)
if "pCO2_LIAR" in variables:
self.raw_bg["pCO2_LIAR"] = nfc["pCO2_LIAR"][:].T
if np.ma.isMaskedArray(self.raw_bg["pCO2_LIAR"]):
self.raw_bg["pCO2_LIAR"].mask = (self.raw_bg["pCO2_LIAR"].mask) | (nfc["pCO2_LIAR_QFA"][:].T == 8)
else:
self.raw_bg["pCO2_LIAR"] = np.ma.array(self.raw_bg["pCO2_LIAR"])
self.raw_bg["pCO2_LIAR"].mask = (nfc["pCO2_LIAR_QFA"][:].T == 8)
if "Chl_a_corr" in variables:
self.raw_bg["Chl_a"] = nfc["Chl_a_corr"][:].T
if np.ma.isMaskedArray(self.raw_bg["Chl_a"]):
self.raw_bg["Chl_a"].mask = (self.raw_bg["Chl_a"].mask) | (nfc["Chl_a_corr_QFA"][:].T == 8)
else:
self.raw_bg["Chl_a"] = np.ma.array(self.raw_bg["Chl_a"])
self.raw_bg["Chl_a"].mask = (nfc["Chl_a_corr_QFA"][:].T == 8)
if "POC" in variables:
self.raw_bg["POC"] = nfc["POC"][:].T
if np.ma.isMaskedArray(self.raw_bg["POC"]):
self.raw_bg["POC"].mask = (self.raw_bg["POC"].mask) | (nfc["POC_QFA"][:].T == 8)
else:
self.raw_bg["POC"] = np.ma.array(self.raw_bg["POC"])
self.raw_bg["POC"].mask = (nfc["POC_QFA"][:].T == 8)
nt = self.raw["Temperature"].shape[1]
#LT
self.raw["LT_ov"] = np.full( self.raw["Temperature"].shape, np.nan )
self.raw["size_ov"] = np.full( self.raw["Temperature"].shape, np.nan )
#grids
self.gr = dict()
self.gr["depth"] = np.arange(0,2000+dz,dz)
nz = self.gr["depth"].size
self.gr["date"] = np.copy(self.raw["date"])
#self.gr["date_dt"] = convert_time_to_date(self.gr["date"])
self.gr["Lon"] = np.copy(self.raw["Lon"])
self.gr["Lat"] = np.copy(self.raw["Lat"])
self.gr["code"] = copy.copy(self.raw["code"])
self.gr["WMO_code"] = copy.copy(self.raw["WMO_code"])
#gridded variables
self.gr["Pressure"] = np.full((nz, nt), np.nan)
self.gr["Temperature"] = np.full((nz, nt), np.nan)
self.gr["Salinity"] = np.full((nz, nt), np.nan)
self.gr["SA"] = np.full((nz, nt), np.nan)
self.gr["CT"] = np.full((nz, nt), np.nan)
self.gr["Sigma_theta"] = np.full((nz, nt), np.nan)
self.gr["gamma_n"] = np.full((nz, nt), np.nan)
self.gr["N2"] = np.full((nz, nt), np.nan)
self.gr["PV"] = np.full((nz, nt), np.nan)
#biogeochemical variables
for var in bg_vars:
self.gr[var] = np.full((nz, nt), np.nan)
#mixing parameters
self.gr["LT"] = np.full((nz, nt), np.nan)
self.gr["mld"] = np.full(nt, np.nan)
self.gr["mld_HT"] = np.full(nt, np.nan)
#self.gr["gpa0"] = np.full(nt, np.nan)
self.gr["mld_DO"] = np.full(nt, np.nan)
self.gr["LT_ml"] = np.full(nt, 0.)
self.gr["LT_ov"] = np.full((nz,nt), 0.)
self.gr["LT_largest_ov"] = np.full(nt, 0.)
self.gr["size_largest_ov"] = np.full(nt, 0.)
self.gr["h_largest_ov"] = np.full(nt, 0.)
self.gr["h_no_ov"] = np.full(nt, 0.)
for i in range(nt):
if verbose:
print("Float %s, profile: %d"%(self.raw["code"],i+1))
#Interpolates temperature
ii = np.argsort(self.raw["depth"][:,i])
z0 = self.raw["depth"][ii,i]
#deletes profiles shorter than 950 m
if clear_short and max(z0)<950:
continue
p0 = self.raw["Pressure"][ii,i]
T0 = self.raw["Temperature"][ii,i]
msk = ~((T0.mask) | (z0.mask))
self.gr["Temperature"][:,i] = grids_interpolates(z0[msk], T0[msk], self.gr["depth"], dz, grid = gridding)
#Pressure
msk = ~((p0.mask) | (z0.mask))
self.gr["Pressure"][:,i] = grids_interpolates(z0[msk], p0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates potential temperature
CT0 = self.raw["CT"][ii,i]
msk = ~((CT0.mask) | (z0.mask))
self.gr["CT"][:,i] = grids_interpolates(z0[msk], CT0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates salinity
S0 = self.raw["Salinity"][ii,i]
msk = ~((S0.mask) | (z0.mask))
self.gr["Salinity"][:,i] = grids_interpolates(z0[msk], S0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates SA
SA0 = self.raw["SA"][ii,i]
msk = ~((SA0.mask) | (z0.mask))
self.gr["SA"][:,i] = grids_interpolates(z0[msk], SA0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates density
Sigma_theta0 = self.raw["Sigma_theta"][ii,i]
msk = ~((Sigma_theta0.mask) | (z0.mask))
self.gr["Sigma_theta"][:,i] = grids_interpolates(z0[msk], Sigma_theta0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates gamma_n
gamma_n0 = self.raw["gamma_n"][ii,i]
msk = ~((gamma_n0.mask) | (z0.mask))
self.gr["gamma_n"][:,i] = grids_interpolates(z0[msk].T, gamma_n0[msk].T, self.gr["depth"], dz, grid = gridding)
##
#interpolates the biogeochemical variables
##
for var in bg_vars:
if var in self.raw_bg.keys():
XX = self.raw_bg[var][ii,i]
msk = ~((XX.mask) | (z0.mask))
if np.nansum(msk)>10:
self.gr[var][:,i] = grids_interpolates(z0[msk], XX[msk],self.gr["depth"], dz, grid = gridding)
#mixed layer depth from density
msk = ~((Sigma_theta0.mask) | (z0.mask))
self.gr["mld"][i] = mixed_layer_depth(z0[msk],np.sort(np.array([Sigma_theta0[msk]]).T), Dd = den_ml_crit)[0]
#Mixed layer Holte and Talley
Pgr = self.gr["Pressure"][:,i]
CTgr = self.gr["CT"][:,i]
SAgr = self.gr["SA"][:,i]
STgr = self.gr["Sigma_theta"][:,i]
msk = ~( np.isnan(Pgr+CTgr+SAgr+STgr))
if np.sum(msk)>10:
html = HolteAndTalley( Pgr[msk], CTgr[msk], SAgr[msk], STgr[msk] )
self.gr["mld_HT"][i] = html.densityMLD
#stratification
#N2,pmid = gsw.Nsquared( self.gr["SA"][:,i], self.gr["CT"][:,i], self.gr["Pressure"][:,i]-10.1325 )
ddendz = first_centered_differences( -self.gr["depth"], self.gr["Sigma_theta"][:,i] )
self.gr["N2"][:,i] = -(1000+self.gr["Sigma_theta"][:,i])**-1*gsw.grav( self.gr["Pressure"][:,i], self.gr["Lat"][i] )*ddendz #-10.1325
self.gr["PV"][:,i] = (1000+self.gr["Sigma_theta"][:,i])**-1*gsw.f( self.gr["Lat"][i] )*ddendz
#self.gr["PV"][:,i] = sw.f( self.gr["Lat"][i] )*self.gr["N2"][:,i]
"""
#geopotential anomaly
msk = ~( (S0.mask) | (T0.mask) | (p0.mask) )
if np.sum(msk)>10:
self.gr["gpa0"][i] = geopotential_anomaly(CT0[msk],SA0[msk], p0[msk])
"""
#calculates thorpe displacements and mean LT
igood = np.where( ~((Sigma_theta0.mask) | (z0.mask) ))[0]
if igood.size<10:
continue
Sigma_theta00 = Sigma_theta0[igood].data
z00 = z0[igood].data
isort = np.argsort( Sigma_theta00)
disp = z00 - z00[isort]
nz1000 = np.where( self.gr["depth"]<=1000 )[0][-1]
for j in range(nz1000):
if self.gr["depth"][j]>1000:
break
jj = (z00>= self.gr["depth"][j]-dzLT) & (z00<= self.gr["depth"][j]+dzLT)
self.gr["LT"][j,i] = np.nanmean(disp[jj]**2)**0.5
#detection of Thorpe overturns
ii1000 = (z00<=1000) & (np.isfinite(Sigma_theta00))
zth,LT, ovsize, ovnum = calculates_thorpe_scale(z00[ii1000], Sigma_theta00[ii1000])
self.raw["LT_ov"][:,i] = grids_interpolates(zth,LT,self.raw["depth"][:,i].data, dz, grid = gridding)
self.raw["size_ov"][:,i] = grids_interpolates(zth,ovsize,self.raw["depth"][:,i].data,dz)
self.gr["LT_ov"][:,i] = grids_interpolates(zth,LT,self.gr["depth"], dz, grid = gridding)
#mean thorpe displacement in the mixed layer
jjmld = np.where(z00<=self.gr["mld"][i])[0]
if jjmld.size>0:
self.gr["LT_ml"][i] = np.nanmean( (disp[jjmld]-np.mean(disp[jjmld]))**2)**0.5
else:
self.gr["LT_ml"][i] = 0.
#stores the size and LT of biggest overturn within the mixed layer
jjml = np.where(zth<=self.gr["mld"][i])[0]
if jjml.size:
j_largest = jjml[ np.argmax(ovsize[jjml]) ]
n_largest_ov = ovnum[ j_largest ]
j_bot_largest = np.where(ovnum == n_largest_ov)[0][-1]
if n_largest_ov>0:
self.gr["size_largest_ov"][i] = ovsize[0]
self.gr["LT_largest_ov"][i] = LT[0]
self.gr["h_largest_ov"][i] = zth[ j_bot_largest]
#first depth with no overturn
i_nov = np.where(ovsize==0.)[0]
if i_nov.size>0:
self.gr["h_no_ov"][i] = zth[ i_nov[0] ]
else:
self.gr["h_no_ov"][i] = zth[ -1 ]
#mixed layer from oxygen
if "Oxygen" in self.raw_bg.keys():
XX = self.raw_bg["Oxygen"][ii,i]
msk = ~XX.mask
if np.nansum(msk)>5:
mld_DO_0 = mixed_layer_depth(z0[msk], -np.array([XX[msk]]).T, Dd = DO_ml_crit)[0]
mld_DO_1 = mixed_layer_depth(z0[msk], np.array([XX[msk]]).T, Dd = DO_ml_crit)[0]
self.gr["mld_DO"][i] = np.nanmin(np.array([mld_DO_0,mld_DO_1]))
#self.gr["mld_DO"][i] = mixed_layer_depth(z0[msk], -np.array([XX[msk]]).T, Dd = DO_ml_crit, crit = "DO")[0]
self.gr["gpa"] = gsw.geo_strf_dyn_height(self.gr["SA"], self.gr["CT"], self.gr["Pressure"], interp_method = "linear", p_ref = 500.)
self.gr["gpa_500_1500"] = np.full(nt, np.nan)
for i in range(nt):
try:
j = np.nanargmin(np.abs(self.gr["Pressure"][:,i]-1500. ))
except:
j = np.nan
if np.isnan(j) or np.abs(self.gr["Pressure"][j,i]-1500)>100:
continue
self.gr["gpa_500_1500"][i] = -self.gr["gpa"][j,i]
#other derived variables
self.gr["AOU"] = 100*self.gr["Oxygen"]/self.gr["OxygenSat"]-self.gr["Oxygen"]
##calculates PT and SP
#self.gr["SP"] = gsw.SP_from_SA( self.gr["SA"], self.gr["Pressure"], self.gr["Lon"], self.gr["Lat"] )
#self.gr["PT"] = gsw.pt_from_CT( self.gr["SA"], self.gr["CT"] )
def calculates_carbon_framework(self,**kargs):
#kargs: CO2file (file for xCO2 data), sp (surface pressure in Pa), timemet (meteo time for surface pressure)
print("Carbon framework")
if "CO2file" in kargs:
CO2args = {"textfile": kargs["CO2file"]}
else:
CO2args = {}
if "ML_zero" in kargs:
ML_zero = kargs["ML_zero"]
else:
ML_zero = True
intCO2 = reads_CO2_file_cape_grim(interpolation = "linear",plots = False, **CO2args)
xCO2 = intCO2(self.gr["date"])
if "sp" in kargs:
if type(kargs["timemet"])==np.datetime64:
kargs["timemet"] = convert_datetime64_to_time(kargs["timemet"])
sp = np.full( self.gr["date"].size, np.nan )
for i in range(self.gr["date"].size):
if i == 0:
time0 = self.gr["date"][0]-5.
if self.gr["date"].size>1:
time1 = 0.5*(self.gr["date"][0]+self.gr["date"][1])
else:
time1 = self.gr["date"][0]+5.
if i==self.gr["date"].size-1:
time0 = 0.5*(self.gr["date"][i-1]+self.gr["date"][i])
time1 = self.gr["date"][i]+5.
else:
time0 = 0.5*(self.gr["date"][i-1]+self.gr["date"][i])
time1 = 0.5*(self.gr["date"][i]+self.gr["date"][i+1])
ij = np.where( (kargs["timemet"]>=time0) & (kargs["timemet"]<=time1) )[0]
if ij.size == 0:
continue
sp[i] = np.nanmean(kargs["sp"]/101325.)
nt = self.gr["date"].size
nz = self.gr["depth"].size
zM = np.tile(self.gr["depth"],(nt,1)).T
mldM = np.tile(self.gr["mld"],(nz,1))
ismld = zM<mldM
Tml = np.copy(self.gr["CT"])
Tml[~ismld] = np.nan
Tml = np.nanmean(Tml, axis = 0)
Sml = np.copy(self.gr["SA"])
Sml[~ismld] = np.nan
Sml = np.nanmean(Sml, axis = 0)
pH2O = partial_pressure_water_vapour( Sml, Tml )
pCO2atm = xCO2*(sp - pH2O)
else:
pCO2atm = np.copy(xCO2)
self.gr["CF"] = carbon_framework(self.gr["DIC_LIAR"], self.gr["TALK_LIAR"], self.gr["SA"],\
self.gr["CT"], self.gr["Pressure"], self.gr["Lon"], self.gr["Lat"], \
self.gr["AOU"], pCO2atm,self.gr["depth"], mld = self.gr["mld"], ML_zero = ML_zero)
self.gr["CF"]["pCO2atm"] = np.copy(pCO2atm)
def calculates_CO2_O2_flux(self, met,**kargs):
if type(met["time"][0]) == np.datetime64:
met["time"] = convert_datetime64_to_time(met["time"])
met["Wsp"],met["wind_dir"] = uv_to_wdir( met["u10"], met["v10"] )
nt = self.gr["date"].size
nz = self.gr["depth"].size
zM = np.tile(self.gr["depth"],(nt,1)).T
mldM = np.tile(self.gr["mld"],(nz,1))
ismld = zM<mldM
Tml = np.copy(self.gr["CT"])
Tml[~ismld] = np.nan
Tml = np.nanmean(Tml, axis = 0)
iif = np.isfinite(Tml)
if np.sum(iif)>2:
intTml = intrp.interp1d( self.gr["date"][iif], Tml[iif], bounds_error = False )
Tml_met = intTml( met["time"])
iif = np.where(np.isfinite(Tml_met))[0]
Tml_met[0:iif[0]] = Tml_met[iif[0]]
Tml_met[iif[-1]+1:] = Tml_met[iif[-1]]
else:
Tml_met = np.nanmean(Tml[iif])*np.ones(met["time"].size)
Sml = np.copy(self.gr["SA"])
Sml[~ismld] = np.nan
Sml = np.nanmean(Sml, axis = 0)
iif = np.isfinite(Sml)
if np.sum(iif)>2:
intSml = intrp.interp1d( self.gr["date"][iif], Sml[iif], bounds_error = False )
Sml_met = intSml( met["time"])
iif = np.where(np.isfinite(Sml_met))[0]
Sml_met[0:iif[0]] = Sml_met[iif[0]]
Sml_met[iif[-1]+1:] = Sml_met[iif[-1]]
else:
Sml_met = np.nanmean(Sml[iif])*np.ones(met["time"].size)
denml = np.copy(self.gr["Sigma_theta"])
denml[~ismld] = np.nan
denml = np.nanmean(denml, axis = 0)
iif = np.isfinite(denml)
if np.sum(iif)>2:
intdenml = intrp.interp1d( self.gr["date"][iif], denml[iif], bounds_error = False )
denml_met = intdenml( met["time"])
iif = np.where(np.isfinite(denml_met))[0]
denml_met[0:iif[0]] = denml_met[iif[0]]
denml_met[iif[-1]+1:] = denml_met[iif[-1]]
else:
denml_met = np.nanmean(denml[iif])*np.ones(met["time"].size)
AOUml = np.copy(self.gr["AOU"])
AOUml[~ismld] = np.nan
AOUml = np.nanmean(AOUml, axis = 0)
iif = np.isfinite(AOUml)
if np.sum(iif)>10:
intAOUml = intrp.interp1d( self.gr["date"][iif], AOUml[iif], bounds_error = False )
AOUml_met = intAOUml( met["time"])
iif = np.where(np.isfinite(AOUml_met))[0]
AOUml_met[0:iif[0]] = AOUml_met[iif[0]]
if iif[-1]>= AOUml_met.size*3./4.:
AOUml_met[iif[-1]+1:] = AOUml_met[iif[-1]]
else:
AOUml_met = np.full(met["time"].size, np.nan)
pCO2ml = np.copy(self.gr["pCO2_LIAR"])
pCO2ml[~ismld] = np.nan
pCO2ml = np.nanmean(pCO2ml, axis = 0)
iif = np.isfinite(pCO2ml)
if np.sum(iif) > 10:
intpCO2ml = intrp.interp1d( self.gr["date"][iif], pCO2ml[iif], bounds_error = False )
pCO2ml_met = intpCO2ml( met["time"])
iif = np.where(np.isfinite(pCO2ml_met))[0]
pCO2ml_met[0:iif[0]] = pCO2ml_met[iif[0]]
if iif[-1]>= pCO2ml_met.size*3./4.:
pCO2ml_met[iif[-1]+1:] = pCO2ml_met[iif[-1]]
else:
pCO2ml_met = np.full(met["time"].size, np.nan)
if "CO2file" in kargs:
CO2args = {"textfile": kargs["CO2file"]}
else:
CO2args = {}
intCO2 = reads_CO2_file_cape_grim(interpolation = "linear",plots = False, **CO2args)
#interpolates CO2
xCO2met = intCO2(met["time"])
pH2Oatm = partial_pressure_water_vapour( Sml_met, Tml_met )
pCO2atm = xCO2met*(met["sp"]/101325. - pH2Oatm)
K0 = CO2_solubility(Sml_met, Tml_met)
#gets the CO2 flux
kwCO2 = kw_wanninkhof(met["Wsp"],Tml_met, gas = "CO2")/100*24. #m/d
FCO2 = kwCO2*K0*(pCO2ml_met - pCO2atm )*365/1000.*(1000+denml_met)/1000 #umol/kg *m/d *365/1000 ~ mol m-2 y-1
#gets the oxygen flux
kwO2 = kw_wanninkhof(met["Wsp"],Tml_met, gas = "O2")/100*24. #m/d
FO2 = -kwO2*(AOUml_met)*365/1000.*(1000+denml_met)/1000 #umol/kg *m/d *365/1000~ mmol m-2 d-1 ~ mol m-2 y-1
self.gr["FCO2"] = np.full(nt, np.nan)
self.gr["FO2"] = np.full(nt, np.nan)
for i in range(nt):
ij = np.where( (np.abs( self.gr["date"][i] - met["time"] )<5.) )[0]
if ij.size == 0:
continue
if np.isnan(pCO2ml[i]) or np.isnan(Tml[i]):
continue
#removes data with ice
if Tml[i]<-1:
if np.sum( np.isfinite(self.gr["CT"][0:2,i]) ) == 0:
continue
self.gr["FCO2"][i] = np.nanmean(FCO2[ij])
self.gr["FO2"][i] = np.nanmean(FO2[ij])
def plots_all_mixing_profiles(self, save = True, show = False):
nprf = self.raw["date"].size
for i in range(nprf):
print("Plot profile %d of %d"%(i+1, nprf))
self.plots_mixing_layer_profile(i, save = save, show = show)
def plots_mixing_layer_profile(self,pn, save = True, show = False):
if save:
if not os.path.exists('prof_ml'):
os.makedirs('prof_ml')
date0 = datetime.datetime.fromordinal(int(self.raw["date"][pn]))
date_str = date0.strftime("%Y %b %d")
if "Oxygen" in self.raw_bg.keys():
nsbp = 4
else:
nsbp = 3
xsize = int(np.round(nsbp*2.5))
fig, ax = plt.subplots(1,nsbp, sharey = True, figsize = (xsize,4))
ax[0].plot(self.gr["CT"][:,pn],self.gr["depth"],"k-", ms = 2)
ax[0].plot(self.raw["CT"][:,pn],self.raw["depth"][:,pn],"ko", ms = 2, mfc = "w")
ax[0].set_ylim(ax[0].get_ylim()[::-1])
ax[0].set_xlabel("$\\Theta$ [$^{\\mathrm{o}}$C]")
ax[0].set_ylabel("Depth [m]")
ax0 = ax[0].twiny()
ax0.plot(self.gr["SA"][:,pn],self.gr["depth"],"-", color = "gray")
ax0.plot(self.raw["SA"][:,pn],self.raw["depth"][:,pn],"o", ms = 2, mfc = "w", mec = "gray")
ax0.set_xlabel("$S_A$", color = "gray")
ax[1].plot(self.gr["Sigma_theta"][:,pn],self.gr["depth"],"k-", ms = 2)
ax[1].plot( self.raw["Sigma_theta"][:,pn], self.raw["depth"][:,pn],"ko", ms = 2, mfc = "w")
ax[1].set_xlabel("$\\sigma_{\\theta}$ [kg m$^{-3}$]")
ax[2].plot(self.raw["size_ov"][:,pn], self.raw["depth"][:,pn], color = "gray", lw = 1)
ax[2].plot(self.raw["LT_ov"][:,pn], self.raw["depth"][:,pn], color = "k")
ax[2].set_xlabel("$L_T$ (black), $l_{ov}$ (gray)")
if "Oxygen" in self.raw_bg:
ax[3].plot(self.gr["Oxygen"][:,pn],self.gr["depth"],"k-", ms = 2)
ax[3].plot( self.raw_bg["Oxygen"][:,pn], self.raw["depth"][:,pn],"ko", ms = 2, mfc = "w")
ax[3].set_xlabel("DO [$\\mu$mol kg$^{-1}$]")
ax3 = ax[3].twiny()
ax3.plot(self.gr["OxygenSat"][:,pn],self.gr["depth"],"-", ms = 2, color = "gray")
ax3.plot( self.raw_bg["OxygenSat"][:,pn], self.raw["depth"][:,pn],"o", ms = 2, mfc = "w", mec = "gray")
ax3.set_xlabel("% DO$_{sat}$", color = "gray")
for ax0 in ax:
l0 = ax0.axhline(self.gr["mld"][pn], color = cm.tab10(0))
l1 = ax0.axhline(self.gr["mld_HT"][pn], color = cm.tab10(2))
l2 = ax0.axhline(self.gr["mld_DO"][pn], color = cm.tab10(3))
l3 = ax0.axhline(self.gr["h_no_ov"][pn], color = cm.tab10(4))
l4 = ax0.axhline(self.gr["h_largest_ov"][pn], color = cm.tab10(5))
l = (l0,l1,l2, l3,l4)
ax[1].legend(l, ["mld$_{\\sigma_{\\theta}}$","mld$_{\\mathrm{HT}}$","mld$_{\\mathrm{DO}}$","$l_{ov}=0$ m","larg$^{\\mathrm{st}}$. eddy"] )
fig.suptitle("Float %s, date %s\nLon: %1.2f Lat: %1.2f"%(self.raw["code"], date_str, self.raw["Lon"][pn], self.raw["Lat"][pn]))
if save:
date_str0 = date0.strftime("%Y%m%d")
figname = "prof_ml/%s_%s.png"%(self.raw["code"],date_str0)
fig.savefig(figname, dpi = 300, bbox_inches = "tight")
if show:
plt.show()
else:
plt.close(fig)
def plots_map_main_variables(self, saves = True, shows = False,**kargs):
if not os.path.exists('float_maps'):
os.makedirs('float_maps')
if self.raw["Temperature"].shape[1] == 1:
print("Only one profile")
return
fig = plt.figure(figsize = (14,8))
ax0 = fig.add_axes([0.10,0.67,0.3,0.3])
width = 15e6; lon_0 = 0; lat_0 = -90
m1 = Basemap(width=width,height=width,projection='aeqd',
lat_0=lat_0,lon_0=lon_0)
m1.drawcoastlines()
m1.fillcontinents()
m1.drawmapboundary(fill_color='skyblue')
m1.fillcontinents(color='#cc9966',lake_color='#99ffff')
m1.drawparallels(np.arange(-80,-20,10),labels=[1,0,0,0])
m1.drawmeridians(np.arange(-180,180,30),labels=[0,0,0,1])
x,y = m1( self.raw["Lon"], self.raw["Lat"])
#plt.scatter(x,y,10,T_gr[5,:])
#plt.plot(x,y,color = "crimson")
cc = plt.scatter(x,y,20, c = self.raw["date"])#-self.raw["date"][0])
loc = mdates.AutoDateLocator()
fig.colorbar(cc, ticks=loc,
format=mdates.AutoDateFormatter(loc))
#cb = fig.colorbar(cc)
#cb.set_label("Survey day")
ax1 = fig.add_axes([0.07,0.35,0.47,0.27])
cfT=ax1.contourf(self.gr["date"], self.gr["depth"], self.gr["CT"],20, cmap = cmocean.cm.thermal)
#ccT = ax1.contour(self.gr["date"], self.gr["depth"], self.gr["Temperature"],20, colors = "w", linewidths = 1)
ax1.plot(self.gr["date"], self.gr["mld"], color = "w", lw = 1)
ax1.plot(self.gr["date"], self.gr["mld_HT"], color = "w", lw = 1, ls = "dotted")
ax1.plot(self.gr["date"], self.gr["mld_DO"], ls = "--", color = "w", lw = 1)
ax1.plot(self.gr["date"],1990*np.ones(self.gr["date"].size),marker = "|", color = "k")
cD = ax1.contour(self.gr["date"], self.gr["depth"], self.gr["gamma_n"],[26.80,27.23,27.50], colors = "skyblue", linewidths = 1)
plt.clabel(cD, fmt = "%1.2f", fontsize = 6)
cb = fig.colorbar(cfT)
ax1.annotate("$\Theta$ [$^{\\mathrm{o}}$C]", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
if "ylim" in kargs:
yl = kargs["ylim"]
else:
yl = ax1.get_ylim()[::-1]
ax1.set_ylim(yl)
ax1.set_ylabel("Depth [m]")
ax1.set_xticklabels([])
#ax1.xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
ax2 = fig.add_axes([0.07,0.05,0.47,0.27])
cfT=ax2.contourf(self.gr["date"], self.gr["depth"], self.gr["SA"],20, cmap = cmocean.cm.haline)
#ccT = ax2.contour(self.gr["date"], self.gr["depth"], self.gr["Salinity"],20, colors = "gray", linewidths = 1)
ax2.plot(self.gr["date"], self.gr["mld"], color = "w", lw = 1)
ax2.plot(self.gr["date"], self.gr["mld_DO"], ls = "--",color = "w", lw = 1)
cb = fig.colorbar(cfT)
ax2.annotate("$S_A$", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8) )
ax2.set_ylim(yl)
ax2.set_ylabel("Depth [m]")
ax2.xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
"""
ax3 = fig.add_axes([0.54,0.65,0.47,0.27])
ccT = ax3.pcolor(self.gr["date"], self.gr["depth"], self.gr["LT"], cmap = cm.inferno)
ax3.plot(self.gr["date"], self.gr["mld"], color = "w", lw = 1)
ax3.plot(self.gr["date"], self.gr["mld_DO"], ls ="--",color = "w", lw = 1)
plt.colorbar(ccT, ax = ax3)
ax3.set_ylim(yl)
ax3.set_ylabel("Depth [m]")
ax3.set_xticklabels([])
ax3.annotate("$L_T$ [m]", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
ax3.set_title("Float: %s"%(self.raw["code"]))
"""
if "Nitrate" in self.gr.keys():
ax3 = fig.add_axes([0.54,0.65,0.47,0.27])
ccT = ax3.contourf(self.gr["date"], self.gr["depth"], self.gr["Nitrate"], 20, cmap = cmocean.cm.matter)
ax3.plot(self.gr["date"], self.gr["mld"], color = "w", lw = 1)
ax3.plot(self.gr["date"], self.gr["mld_DO"], ls ="--",color = "w", lw = 1)
plt.colorbar(ccT, ax = ax3)
ax3.set_ylim(yl)
ax3.set_ylabel("Depth [m]")
ax3.set_xticklabels([])
ax3.annotate("Nitrate [$\\mu$mol kg$^{-1}$]" , xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
ax3.set_title("Float: %s"%(self.raw["code"]))
if "Oxygen" in self.gr.keys():
ax4 = fig.add_axes([0.54,0.35,0.47,0.27])
cfT=ax4.contourf(self.gr["date"], self.gr["depth"], self.gr["Oxygen"]-100*self.gr["Oxygen"]/self.gr["OxygenSat"],20, cmap = cmocean.cm.oxy)
#ccT = ax2.contour(self.gr["date"], self.gr["depth"], self.gr["Salinity"],20, colors = "gray", linewidths = 1)
ccT = ax4.contour(self.gr["date"], self.gr["depth"], self.gr["Oxygen"]-100*self.gr["Oxygen"]/self.gr["OxygenSat"],[0], colors = "blue", linewidths = 1)
ax4.plot(self.gr["date"], self.gr["mld"], color = "k", lw = 1)
ax4.plot(self.gr["date"], self.gr["mld_DO"], ls = "--", color = "k", lw = 1)
cb = fig.colorbar(cfT)
ax4.annotate("DO-DO$_{\\mathrm{sat}}$ [$\\mu$ mol kg$^{-1}$]", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
ax4.set_ylim(yl)
ax4.set_yticklabels([])
ax4.set_xticklabels([])
if "DIC_LIAR" in self.gr.keys():
ax5 = fig.add_axes([0.54,0.05,0.47,0.27])
cfT=ax5.contourf(self.gr["date"], self.gr["depth"], self.gr["DIC_LIAR"],20, cmap = cmocean.cm.ice_r)
#ccT = ax2.contour(self.gr["date"], self.gr["depth"], self.gr["Salinity"],20, colors = "gray", linewidths = 1)
#ccT = ax2.contour(self.gr["date"], self.gr["depth"], self.gr["DIC_LIAR"],[0], colors = "gray", linewidths = 1)
ax5.plot(self.gr["date"], self.gr["mld"], color = "k", lw = 1)
ax5.plot(self.gr["date"], self.gr["mld_DO"], ls = "--", color = "k", lw = 1)
cb = fig.colorbar(cfT)
ax5.annotate("DIC [$\\mu$ mol kg$^{-1}$]", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
ax5.set_ylim(yl)
ax5.set_yticklabels([])
ax5.xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
filename = "float_maps/%s_map.png"%(self.raw["code"])
if saves:
fig.savefig(filename)
plt.close(fig)
if shows:
plt.show()
def grids_interpolates(x0,y0,x,dx, grid = False):
y = np.full(x.size,np.nan)
if grid:
for i in range(x.size):
jj = (x0>=x[i]-dx/2.) & (x0<=x[i]+dx/2.)
if np.nansum(jj)>0:
y[i] = np.mean(y0[jj])
igood = np.isfinite(y)
if np.sum(igood)>5:
intt = intrp.interp1d( x[igood], y[igood], bounds_error = False)
y[~igood] = intt(x[~igood])
elif np.sum(np.isfinite(y0))>5:
intt = intrp.interp1d( x0, y0, bounds_error = False)
y = intt(x)
return y
##############################
######### OTHER FUNCTIONS ####
##############################
def mixed_layer_depth(z0, den0, Dd = 0.03, crit = "diff", z_min = 30., intrp = True):
#Mixed layer calculation
if crit != "diff" and crit != "grad" and crit != "DO":
crit = "diff"
print("Incorrect criterion, set to diff")
c,f = den0.shape
MLD = np.full(f, np.nan)
for i in range(f):
if z0.ndim ==1:
z = np.copy(z0)
else:
z = z0[:,i]
#den = np.sort(den0[:,i])
den = den0[:,i]
iif = np.isfinite(den+z)
if np.sum(iif)<=1:
continue
den = den[iif]
z = z[iif]
if np.min(z0)>z_min:
continue
if crit == "diff":
sden = den[0]
denp = den-sden
imld = np.where( denp>=Dd )[0]
if imld.size == 0:
MLD[i] = np.max(z)
elif imld[0]>0:
imld = imld[0]
z2 = z[imld]
z1 = z[imld-1]
denp2 = denp[imld]
denp1 = denp[imld-1]
if intrp:
MLD[i] = (z2-z1)/(denp2-denp1)*(Dd - denp1) + z1
else:
MLD[i] = (z1+z2)*0.5
else:
MLD[i] = np.max(z)
#MLD[i] = z0[0,i]
elif crit == "grad":
grden = np.abs(first_centered_differences(z, den))
imld = np.where(grden>=Dd)[0]
if imld.size == 0:
MLD[i] = np.max(z)
elif imld[0]>0:
imld = imld[0]
z2 = z[imld]
z1 = z[imld-1]
grd2 = grden[imld]
grd1 = grden[imld-1]
if intrp:
MLD[i] = (z2-z1)/(grd2-grd1)*(Dd - grd1) + z1
else:
MLD[i] = 0.5*(z1+z2)
else:
MLD[i] = z[0]
if crit == "DO":
sden = den[0]
denp = den-sden
imld = np.where( np.abs(denp)>=Dd )[0]
if imld.size == 0:
MLD[i] = np.max(z)
elif imld[0]>0:
imld = imld[0]
z2 = z[imld]
z1 = z[imld-1]
MLD[i] = z1
else:
MLD[i] = np.max(z)
#MLD[i] = z0[0,i]
return MLD
def calculates_thorpe_scale(z,dens,PLOT = False):
#sorts for ascending depth
ii = np.argsort(z)
z = z[ii]
dens = dens[ii]
#sorts for ascending density
jj = np.argsort(dens)
disp = z - z[jj]
nn = disp.size
#Looks for individual overturns
LT = np.zeros(nn)
ov_size = np.zeros(nn)
ov_num = np.zeros(nn)
ovN0 = 1
i = 0
while True:
#plt.plot(dens[i:]-dens[i])
ii_lighter0 = np.where( (dens[i:]-dens[i])<=0 )[0]
if ii_lighter0.size>1:
ii_lighter = np.arange(i,i+ii_lighter0[-1]+1)
#print(ii_lighter0)
dens_ov = dens[ii_lighter]
z_ov = z[ii_lighter]
jj = np.argsort(dens_ov)
disp_ov = z_ov - z_ov[jj]
#print(disp_ov)
LT[ii_lighter] = np.nanmean(disp_ov**2)**0.5
if LT[ii_lighter][0]>0:
ov_size[ii_lighter] = np.max(z_ov)-np.min(z_ov)
ov_num[ii_lighter] = ovN0
ovN0+=1
i = ii_lighter[-1]+1
else:
i+=1
if i>=nn:
break
if PLOT == True:
fig, ax = plt.subplots(1,2, sharey = True)
ax[0].plot(dens, z)
ax[0].set_ylim(ax[0].get_ylim()[::-1])
ax[0].set_xlabel("$\\sigma_{\\theta}$ [kg m$^{-3}$]")
ax[0].set_ylabel("Depth [m]")
ax[1].plot(np.abs(disp),z, lw = 1, color = "gray")
ax[1].plot(LT,z, color = "k")
#ax[1].plot(ov_size,z)
ax[1].set_xlabel("$L_T$ [m]")
plt.show()
return z, LT, ov_size, ov_num
def geopotential_anomaly(CT,SA,p, pref = np.array([500.,1500.])):
rho = gsw.rho(SA,CT,p)
rho0 = gsw.rho(35.,0.,p)
delta = rho**-1 - rho0**-1
#delta = gsw.specvol_anom_standard(SA,CT,p+10)
if np.max(p)<np.max(pref):
return np.nan
p_i = np.arange(pref[0], pref[1]+1.,1.)
dp = 1.*1e4 #Pa
intd = intrp.interp1d( p, delta, bounds_error = False )
delta_i = intd( p_i )
gpa = np.sum(dp*delta_i)
return gpa
def FCD_2d(x, y, axis = 0):
if x.ndim != 2 or y.ndim !=2:
sys.exit("Invalid dimensions")
if axis != 0 and axis != 1:
sys.exit("Invalid axis")
if axis == 1:
x = x.T
y = y.T
dy = np.full(y.shape,np.nan)
for i in range(x.shape[1]):
dy[:,i] = first_centered_differences(x[:,i], y[:,i])
if axis == 1:
dy = dy.T
return dy
def first_centered_differences(x, y, fill = False):
if x.size != y.size:
print("first-centered differences: vectors do not have the same size")
dy = np.full( x.size, np.nan )
iif = np.where( (np.isfinite(x)) & (np.isfinite(y))) [0]
if iif.size < 2:
return dy
x0 = x[iif]
y0 = y[iif]
dy0 = np.full( x0.size, np.nan )
#calculates differences
dy0[0] = (y0[1] - y0[0])/(x0[1]-x0[0])
dy0[-1] = (y0[-1] - y0[-2])/(x0[-1]-x0[-2])
dy0[1:-1] = (y0[2:] - y0[0:-2])/(x0[2:]- x0[0:-2])
dy[iif] = dy0
if fill:
dy[0:iif[0]] = dy[iif[0]]
dy[iif[-1]+1:] = dy[iif[-1]]
return dy
def moving_average(x,n, window = "flat"):
if n%2 == 0:
n+=1
N = x.size
cx = np.full(x.size, np.nan)
for i in range(N):
ii = np.arange(i-n//2, i+n//2+1,1)
if window == "flat":
ww = np.ones(ii.size)
elif window == "gauss":
xx = ii - i
ww = np.exp(- xx**2/(float(n)/4)**2 )
elif window == "hanning":
ww = np.hanning(ii.size)
ww = ww[ (ii>=0) & (ii<N)]
ii = ii[ (ii>=0) & (ii<N)]
kk = np.isfinite(x[ii])
if np.sum(kk)<0.25*ii.size:
continue
cx[i] = np.sum(x[ii[kk]]*ww[kk])/np.sum(ww[kk])
return cx
#time conversion
def convert_time_to_date(time):
date = [datetime.datetime.fromordinal(int(time0)) + datetime.timedelta(time0%1) for time0 in time]
return date
def convert_date_to_time(date):
N = len(date)
time = np.full(N, np.nan)
for i in range(N):
time[i]=date[i].toordinal() + date[i].hour/24. + date[i].minute/24./60. + date[i].second/24./60./60. + date[i].microsecond/24./60./60./1e6
return time
def convert_datetime64_to_date(date64):
ts = (date64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
date = [datetime.datetime.utcfromtimestamp(ts0) for ts0 in ts]
return date
def convert_datetime64_to_time(date64):
ts = (date64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
date = [datetime.datetime.utcfromtimestamp(ts0) for ts0 in ts]
time = convert_date_to_time(date)
return time
####
### Meteo functions
###
#wind transformations
def wdir_to_uv(w,alpha):
alpha = 270.-alpha
alpha *=np.pi/180
u = w*np.cos(alpha)
v = w*np.sin(alpha)
return u,v
def uv_to_wdir(u,v):
w = (u**2+v**2)**0.5
alpha = 180/np.pi*np.arctan2(v,u)
alpha = 270.-alpha
alpha[alpha>360]-=360
#alpha[alpha>180] = 360 - alpha[alpha>180]
return w, alpha
def cd_large_and_pond( U10 ):
#drag coefficient from Large and Pond 1981
CD = np.full(U10.size, np.nan)
CD[U10<11.] = 1.2
CD[U10>=11.] = 0.49 + 0.065*U10[U10>=11.]
CD *=1e-3
return CD
class ERAmeteo():
def __init__(self, folder, **kargs):
if "t_chunks" in kargs:
t_chunks = kargs["t_chunks"]
else:
t_chunks = 24
filelist = sorted(glob.glob(folder + "/*.nc"))
self.DATASET = xr.open_mfdataset(filelist, parallel = True, chunks = {"time":t_chunks})#, "latitude": 28, "longitude": 144})#
#display(self.DATASET)
#self.time = self.DATASET.time.data
def get_data(self, date_fl, lon_fl, lat_fl, VARS = ['u10', 'v10', 't2m', 'mslhf', 'msnlwrf', 'msnswrf', 'msshf', 'sst', 'sp']):
#transforms time coordinates
dt_fl = mdates.num2date(date_fl)
dt64_fl = np.array([np.datetime64(dt0) for dt0 in dt_fl])
DATASET = self.DATASET.sel( time = slice(dt64_fl[0]-np.timedelta64(10,"D"), dt64_fl[-1]+np.timedelta64(1,"D")))
DATASET = DATASET.sel(longitude = slice(np.nanmin(lon_fl)-0.5, np.nanmax(lon_fl)+0.5))
DATASET = DATASET.sel(latitude = slice(np.nanmax(lat_fl)+0.5, np.nanmin(lat_fl)-0.5))
display(DATASET)
timeERA = DATASET.time.data
ntE = timeERA.size
ntF = date_fl.size
self.ERAhr = dict()
for vv in VARS:
self.ERAhr[vv] = np.full(ntE, np.nan)
self.ERAhr["time"] = timeERA
self.ERAlr = dict()
for vv in VARS:
self.ERAlr[vv] = np.full(ntF, np.nan)
self.ERAlr["time"] = timeERA
#np.datetime64(datetime.utcnow()).astype(datetime)
#interpolated coordinates
for i in range(ntF):
if i == 0:
lon = lon_fl[i]
lat = lat_fl[i]
time1 = dt64_fl[i]
time0 = dt64_fl[i] -np.timedelta64(10,"D")
else:
lon = 0.5*(lon_fl[i]+lon_fl[i-1])
lat = 0.5*(lat_fl[i]+lat_fl[i-1])
time1 = dt64_fl[i]
time0 = dt64_fl[i-1]
if time1-time0>np.timedelta64(15,"D"):
time0 = time1 - np.timedelta64(10,"D")
time_count00 = time.time()
print("\nREADING METEO FLOAT %d of %d (%1.2f %%)"%(i+1, ntF, (i)/float(ntF)*100))
print("Float time: %s, Long: %1.2f, Lat: %1.2f"%( dt64_fl[i].astype(datetime.datetime).strftime("%Y/%m/%d %H:%M"), lon_fl[i], lat_fl[i] ))
ii = np.where( (timeERA>time0) & (timeERA<=time1))[0]
DT = DATASET.sel(time = slice(time0,time1),expver = 1)
print("Time for search: %s to %s"%( DT.time.data[0],DT.time.data[-1]))
DT = DT.sel( longitude = lon, latitude = lat, method = "nearest" )
#DT = DT.compute()
jj = np.where( (DT.time.data>time0) & (DT.time.data<=time1))[0]
for vv in VARS:
#print(vv)
#display(DT[vv])
self.ERAhr[vv][ii] = DT[vv].compute().data[jj]
self.ERAlr[vv][i] = np.nanmean(self.ERAhr[vv][ii])
#print(self.ERAlr[vv][i])
print("Elapsed time %1.1f s"%( time.time()-time_count00 ))
print("READING ENDED")
##
## GAS FLUXES
##
def CO2_solubility(S,T):
#CO2 [umol/kg/atm] solubility in seawater according to Weiss 1974
#See McGillis and Wanninkhof (2006). Marine Chemistry 98:100-108
Tk=T+273.15 # in Kelvin
lnK0 = -60.2409+93.4517*(100/Tk)+23.3585*np.log(Tk/100)+ S*(0.023517-0.023656*(Tk/100)+0.0047036*(Tk/100)**2)
K0 = np.exp(lnK0)
return K0
def partial_pressure_water_vapour(S,T):
#Partial pressure of water vapour [atm]
#See McGillis and Wanninkhof (2006). Marine Chemistry 98:100-108
#it is used to calculate pCO2 from dry air molecular fraction (X) as:
# pCO2 = (P - pH2O)*X
# see Woolf et al. (2016) J. Geophys. Res: Oceans, 121 (2) : 1229-1248
Tk=T+273.15
pH2O = np.exp( 24.4543 - 67.4509*(100/Tk) - 4.8489*np.log(Tk/100) - 0.000544*S )
return pH2O
def reads_CO2_file_cape_grim(textfile = "atm_CO2/CapeGrim_CO2.csv", interpolation = "linear", plots = False):
ff = open(textfile)
time = []
XCO2 = []
for line in ff.readlines():
lineS = line.split(",")
if "Y" not in lineS[0]:
date0 = datetime.datetime(int(lineS[0]),int(lineS[1]),int(lineS[2]))
time0 =date0.toordinal()
XCO20 = float(lineS[4])
time.append(time0)
XCO2.append(XCO20)
time = np.array(time)
XCO2 = np.array(XCO2)
if interpolation == "linear":
intCO2 = intrp.interp1d( time, XCO2, bounds_error = False )
elif interpolation == "spline":
intCO2 = intrp.UnivariateSpline( time, XCO2 )
if plots:
xtime = np.arange( np.nanmin(time) , np.nanmax(time) ,1. )
fig, ax = plt.subplots()
ax.plot(time, XCO2,".")
ax.plot(xtime, intCO2(xtime.astype(float)))
plt.show()
return intCO2
def kw_wanninkhof(U, T, gas = "CO2"):
#wanninkhof 2014 piston velocity
kw = 0.251*U**2
Sc = Schmidt_number(T, gas)
kw = kw * (Sc/660)**-0.5
return kw
def Schmidt_number(T, gas = "CO2"):
if gas == "CO2":
Scp = np.poly1d( [0.0007555,-0.0923207, 4.7353, -136.25, 2116.8] )
elif gas == "O2":
Scp = np.poly1d( [0.00093777,-0.10939, 5.2122, -135.6, 1920.4] )
Sc = Scp(T)
return Sc
##
## Carbon framework
##
def carbon_framework(DIC, TALK, SA, CT, pres, lon, lat, AOU, pCO2, depth, **kargs):
import PyCO2SYS as pyco2
if "ML_zero" in kargs:
ML_zero = kargs["ML_zero"]
else:
ML_zero = True
if "prealk_eqn" in kargs:
prealk_eqn = kargs["prealk_eqn"]
else:
prealk_eqn = "GLODAP"
RNO = - 16./170
RCO = - 106./170.
CF = dict()
#calculates PT and SP for pyco2
SP = gsw.SP_from_SA( SA, pres, lon, lat )
PT = gsw.pt_from_CT( SA, CT )
#function for surface alkalinity
if prealk_eqn == "GLODAP":
ALKpre = 42.5036*SP +825.1583
elif prealk_eqn == "SOCCOM":
ALKpre = 2818.56 - 80.81*SA - 4.74 * CT + 1.922 * SA**2 + 0.117 * CT**2
##"2818.56 - 80.81*SA - 4.74 * CT + 1.922 * SA**2 + 0.117 * CT**2"
#ALKpre = eval("%s"%(prealk_eqn))
#preindustrial saturation
results = pyco2.sys(ALKpre, 278., 1,4, salinity = SP, temperature = PT)
CF["DICsat_prein"] = results["dic"]
#results = pyco2.sys(ALKpre, pCO2, 1,4, salinity = SP, temperature = PT)
#CF["DICsat_prealk"] = results["dic"]
#present day saturation
#results = pyco2.sys(ALKpre, pCO2, 1,4, salinity = SP, temperature = PT)
results = pyco2.sys(ALKpre, pCO2, 1,4, salinity = SP, temperature = PT)
CF["DICsat"] = results["dic"]
#with local alkalinity
results = pyco2.sys(TALK, pCO2, 1,4, salinity = SP, temperature = PT)
CF["DICsat_talk"] = results["dic"]
#soft tissue
CF["DICsoft"] = - RCO*AOU
#carbonate
CF["DICcarb"] = 0.5*( TALK - ALKpre - RNO * AOU )
if ML_zero and "mld" in kargs:
mld = kargs["mld"]
nt = mld.size
nz = depth.size
#gets indices for mixed layer
zM = np.tile(depth,(nt,1)).T
mldM = np.tile(mld,(nz,1))
ismld = zM<mldM
CF["DICsoft"][ismld] = 0.
CF["DICcarb"][ismld] = 0.
#DeltaC referenced to pre-industrial levels
CF["DICdelta_prein"] = DIC - CF["DICsat_prein"] - CF["DICsoft"] - CF["DICcarb"]
#DeltaC referenced to present day
CF["DICdelta"] = DIC - CF["DICsat"] - CF["DICsoft"] - CF["DICcarb"]
#Disequilibrium C preindustrial
CF["DICdis_prein"] = DIC - CF["DICsat_prein"]
#Disequilibrium C present day
CF["DICdis"] = DIC - CF["DICsat"]
#disequilibrium with local talk
CF["DICdis_talk"] = DIC - CF["DICsat_talk"]
CF["DIC"] = np.copy(DIC)
CF["ALKpre"] = np.copy(ALKpre)
return CF
###
### Net ecosystem production
def NEP_calculation(date, z, Lon, Lat, Nitrate, POC, SA, **kargs):
##FUNCTION TO CALCULATE NEP from nitrate depletion / POC accumulation
if "PLOT" in kargs:
PLOT = kargs["PLOT"]
else:
PLOT = False
#first I convert the numerical date to a datetime format so I can get the month and year vectors
RCN = 106/16. # Redfield ratio
nt = date.size
dateDT = convert_time_to_date( date )
year = np.full( nt, np.nan )
month = np.full(nt, np.nan)
for i in range(nt):
year[i] = dateDT[i].year
month[i] = dateDT[i].month
#integration depth
if "mld" in kargs:
H = min([np.nanmax(kargs["mld"]),500]) # calculates the maximum ML
#print("Integration depth: %1.0f m"%(H))
elif "H" in kargs:
H = kargs["H"]
else:
H = 200.
jh = np.where( z>= H)[0][0] # gets the depth index for the maxmum mixed layer
#depth integrated nitrate
dint_Nitrate = np.nanmean(Nitrate[:jh,:], axis = 0)*H*(1027/1e6)
dint_POC = np.nanmean(POC[:jh,:], axis = 0)*H/1000.
mSA = np.nanmean( SA[z>500,:], axis = 0 )
#by multiplying by density ~1027 and dividing by 1e6 I get units mol m-2
#for each year calculates the maximum and minimum
Uyear = np.unique(year)
nyr = Uyear.size
date_nit_sum = np.full(nyr, np.nan)
date_nit_win = np.full(nyr, np.nan)
nit_win = np.full(nyr, np.nan)
nit_sum = np.full(nyr, np.nan)
nit_win_month_avg = np.full(nyr, np.nan)
nit_sum_month_avg = np.full(nyr, np.nan)
POC_win = np.full(nyr, np.nan)
POC_sum = np.full(nyr, np.nan)
POC_win_month_avg = np.full(nyr, np.nan)
POC_sum_month_avg = np.full(nyr, np.nan)
SA_win = np.full(nyr, np.nan)
SA_sum = np.full(nyr, np.nan)
Lat_win = np.full(nyr, np.nan)
Lat_sum = np.full(nyr, np.nan)
Lon_win = np.full(nyr, np.nan)
Lon_sum = np.full(nyr, np.nan)
flag_nit_NEP = np.full(nyr, False)
for i, yr in enumerate(Uyear):
#start_summer = datetime.datetime(int(yr),12,1,0,0).toordinal()
#end_summer = datetime.datetime(int(yr)+1,4,1,0,0).toordinal()
start_summer = datetime.datetime(int(yr)+1,1,1,0,0).toordinal()
end_summer = datetime.datetime(int(yr)+1,4,1,0,0).toordinal()
it_summer = np.where( (date>= start_summer) & (date<= end_summer) )[0]
if it_summer.size > 0:
if np.sum(np.isfinite(dint_Nitrate[it_summer]))>0:
imin_nit = it_summer[ np.nanargmin( dint_Nitrate[it_summer] ) ]
date_nit_sum[i] = date[imin_nit]
nit_sum[i] =np.nanmin( dint_Nitrate[it_summer])
POC_sum[i] = dint_POC[imin_nit]
#ii_sum_month = np.where( np.abs(date - date[imin_nit] )<15 )[0]
ii_sum_month = np.where( (month == month[imin_nit]) & (year == year[imin_nit]) )[0]
nit_sum_month_avg[i] =np.nanmean( dint_Nitrate[ii_sum_month])
POC_sum_month_avg[i] =np.nanmean( dint_POC[ii_sum_month])
SA_sum[i] = mSA[imin_nit]
Lat_sum[i] = Lat[imin_nit]
Lon_sum[i] = Lon[imin_nit]
#start_winter = datetime.datetime(int(yr),5,1,0,0).toordinal()
#end_winter = datetime.datetime(int(yr),12,1,0,0).toordinal()
start_winter = datetime.datetime(int(yr),8,1,0,0).toordinal()
end_winter = datetime.datetime(int(yr),12,1,0,0).toordinal()
it_winter = np.where( (date>= start_winter) & (date<= end_winter) )[0]
if it_winter.size > 0:
if np.sum(np.isfinite(dint_Nitrate[it_winter]))>0:
imax_nit = it_winter[ np.nanargmax( dint_Nitrate[it_winter] ) ]
date_nit_win[i] = date[imax_nit]
nit_win[i] = np.nanmax( dint_Nitrate[it_winter])
POC_win[i] = dint_POC[imax_nit]
#ii_win_month = np.where( np.abs(date - date[imax_nit] )<15 )[0]
ii_win_month = np.where( (month == month[imax_nit]) & (year == year[imax_nit]) )[0]
nit_win_month_avg[i] =np.nanmean( dint_Nitrate[ii_win_month])
POC_win_month_avg[i] =np.nanmean( dint_POC[ii_win_month])
SA_win[i] = mSA[imax_nit]
Lat_win[i] = Lat[imax_nit]
Lon_win[i] = Lon[imax_nit]
flag_NEP = (np.abs(date_nit_win-date_nit_sum)<8*30) & (np.abs(SA_win-SA_sum)<0.05) & (np.abs(Lon_win-Lon_sum)<8.) & (np.abs(Lat_win-Lat_sum)<5.)
#calculates net ecosystem production (molC m-2 yr-1)
NEP = (nit_win - nit_sum)*RCN
#from the monthly means
NEP_avg = (nit_win_month_avg - nit_sum_month_avg)*RCN
NEP_POC = -(POC_win - POC_sum)
NEP_POC_avg = -(POC_win_month_avg - POC_sum_month_avg)
#gets the date around the depletion
date_NEP = 0.5*(date_nit_sum +date_nit_win )
Lon_NEP = 0.5*(Lon_win+Lon_sum)
Lat_NEP = 0.5*(Lat_win+Lat_sum)
if PLOT:
print( "\n-------------------------------------------------------------------------")
print("YEAR\t NEP Nit\t <NEP Nit>\t NEP POC\t <NEP POC>" )
print("\t\t\t\t [mol/m2/yr]")
print( "-------------------------------------------------------------------------")
for i in range(nyr):
print("%d-%d\t %1.2f\t\t%1.2f\t\t%1.2f\t\t%1.2f"%(Uyear[i],Uyear[i]+1, NEP[i], NEP_avg[i], NEP_POC[i], NEP_POC_avg[i]) )
print( "-------------------------------------------------------------------------")
print("Mean \t%1.2f\t\t%1.2f\t\t%1.2f\t\t%1.2f"%(np.nanmean(NEP), np.nanmean(NEP_avg),np.nanmean(NEP_POC), np.nanmean(NEP_POC_avg)))
print( "-------------------------------------------------------------------------")
#Plots the results
fig, ax = plt.subplots(3,1,figsize = (8,6), sharex = True)
ax[0].plot( date, dint_Nitrate, "k" )
l1,=ax[0].plot(date_nit_sum, nit_sum,"o", ms = 10, mec = "k", color = "goldenrod")
l2,=ax[0].plot(date_nit_win, nit_win,"o", ms = 10, mec = "k", color = "green")
for i in range(nyr):
ax[0].plot([date_nit_sum[i]-15,date_nit_sum[i]+15], [nit_sum_month_avg[i],nit_sum_month_avg[i]], color = "k", zorder = -1)
ax[0].plot([date_nit_win[i]-15,date_nit_win[i]+15], [nit_win_month_avg[i],nit_win_month_avg[i]], zorder = -1, color = "k")
yl = ax[0].get_ylim()
for i in range(nyr):
ax[0].fill_between( [date_nit_sum[i]-15,date_nit_sum[i]+15], y1 = yl[0], y2 = yl[1], color = l1.get_color(), alpha = 0.3 )
ax[0].fill_between( [date_nit_win[i]-15,date_nit_win[i]+15], y1 = yl[0], y2 = yl[1], color = l2.get_color(), alpha = 0.3 )
ax[0].set_ylim(yl)
ax[0].set_ylabel( "$\\int \\mathrm{Nitrate}\, \\rm d z$\n[mol m$^{-2}$]" )
ax[0].grid(True)
ax[1].plot( date, dint_POC, "k" )
l1,=ax[1].plot(date_nit_sum, POC_sum,"o", ms = 10, mec = "k", color = "goldenrod")
l2,=ax[1].plot(date_nit_win, POC_win,"o", ms = 10, mec = "k", color = "green")
for i in range(nyr):
ax[1].plot([date_nit_sum[i]-15,date_nit_sum[i]+15], [POC_sum_month_avg[i],POC_sum_month_avg[i]], color = "k", zorder = -1)
ax[1].plot([date_nit_win[i]-15,date_nit_win[i]+15], [POC_win_month_avg[i],POC_win_month_avg[i]], zorder = -1, color = "k")
yl = ax[1].get_ylim()
for i in range(nyr):
ax[1].fill_between( [date_nit_sum[i]-15,date_nit_sum[i]+15], y1 = yl[0], y2 = yl[1], color = l1.get_color(), alpha = 0.3 )
ax[1].fill_between( [date_nit_win[i]-15,date_nit_win[i]+15], y1 = yl[0], y2 = yl[1], color = l2.get_color(), alpha = 0.3 )
ax[1].set_ylim(yl)
ax[1].set_ylabel( "$\\int \\mathrm{POC}\, \\rm d z$\n[mol m$^{-2}$]" )
ax[1].grid(True)
ax[2].bar( date_NEP[flag_NEP]-50, NEP[flag_NEP], width = 50, ec = "k", label = "Nit 1-prof" )
ax[2].bar( date_NEP[flag_NEP]-30, NEP_avg[flag_NEP], width = 50, ec = "k", label = "Nit month" )
ax[2].bar( date_NEP[flag_NEP]+30, NEP_POC[flag_NEP], width = 50, ec = "k", label = "POC 1-prof" )
ax[2].bar( date_NEP[flag_NEP]+50, NEP_POC_avg[flag_NEP], width = 50, ec = "k", label = "POC month" )
ax[2].set_ylabel("NEP\n[molC m$^{-2}$ y$^{-1}$]")
ax[2].legend(loc = "center left", bbox_to_anchor = (1.01,0.5))
formatter = mdates.DateFormatter("%Y") ### formatter of the date
locator = mdates.YearLocator() ### where to put the labels
ax[2].xaxis.set_major_locator(locator)
ax[2].xaxis.set_major_formatter(formatter)
ax[2].grid(True)
return date_NEP, Lon_NEP, Lat_NEP, NEP_avg, NEP_POC_avg, flag_NEP
def oxygen_consumption_rate(date, z, Lon, Lat, Oxygen, SA, **kargs):
if "PLOT" in kargs:
PLOT = kargs["PLOT"]
else:
PLOT = False
if "zmax" in kargs:
zmax = kargs["zmax"]
else:
zmax = 500.
if "zmin" in kargs:
zmin = kargs["zmin"]
else:
zmin = 100.
RCO = - 106./170.
nt = date.size
dateDT = convert_time_to_date( date )
year = np.full( nt, np.nan )
month = np.full(nt, np.nan)
for i in range(nt):
year[i] = dateDT[i].year
month[i] = dateDT[i].month
dz = z[1]-z[0]
jh = np.where((z>=zmin) & (z<=zmax))[0]
#depth integrated O2
dint_O2 = moving_average( np.nanmean(Oxygen[jh,:], axis = 0),10)
mSA = np.nanmean( SA[z>500,:], axis = 0 )
O2 = np.copy(Oxygen)
nz, nt = O2.shape
for j in range(nz):
O2[j,:] = moving_average(O2[j,:],10)
if "mld" in kargs:
zM = np.tile(z,(nt,1)).T
mldM = np.tile(kargs["mld"],(nz,1))
ismld = zM<mldM
O2[ismld] = np.nan
#for each year calculates the maximum and minimum
Uyear = np.unique(year)
nyr = Uyear.size
date_O2_sum = np.full(nyr, np.nan)
date_O2_win = np.full(nyr, np.nan)
R_O2 = | np.full(nyr, np.nan) | numpy.full |
"""
Utility functions used throughout the code.
"""
import io
import json
import os
import pickle
import logging
import numpy as np
import pathlib
import torch
from torch.nn import Sequential, Module, Linear
from scipy.sparse import csc_matrix
from scipy import optimize, interpolate
from scipy.stats import norm as Gaussian
from scipy.special import betaln
cifar10_label_names = ['airplane', 'automobile', 'bird',
'cat', 'deer', 'dog', 'frog', 'horse',
'ship', 'truck']
def aggregate_by_group(v, g, n):
assert g.max() < n
return csc_matrix((v, (g, np.zeros_like(g))), shape=(n, 1)
).toarray().squeeze()
def subsample_arrays(arrays, n, seed=0):
rng_state = np.random.get_state()
np.random.seed(seed)
take_inds = np.random.choice(len(arrays[0]),
n, replace=False)
np.random.set_state(rng_state)
return [a[take_inds] for a in arrays]
def get_weights_norm(model):
"""Returns the average 2-norm of the weights"""
return np.mean([torch.norm(p.data).item() for p in model.parameters()])
def copy_state(model_src, model_tgt):
"""Copy weights of model_src in model_tgt"""
model_tgt.load_state_dict(model_src.state_dict())
return model_tgt
def average_step(model, model_avg, step, eta=0.):
"""In place averaging step from
http://proceedings.mlr.press/v28/shamir13.pdf
Parameters
----------
model : torch.Module
Current model that we optimize
model_avg : torch.Module
Model corresponding to the averaging of the iterates
step : int
Current iteration number (starts at 1)
eta : float, optional
Parameter of [Shamir & Zhang, 2013], eta=0. corresponds to normal
averaging
Returns
-------
model_avg : torch.Module
Updated averaged model
"""
keys = model.state_dict().keys()
for k in keys:
model_avg.state_dict()[k].mul_(1 - ((eta + 1) / (step + eta))).add_(
model.state_dict()[k].mul((eta + 1) / (step + eta)))
return model_avg
def average_step_ema(model, model_avg, gamma=0.9):
"""Updates model_avg with an exponential moving average with param gamma"""
keys = model.state_dict().keys()
for k in keys:
model_avg.state_dict()[k].mul_(1 - gamma).add_(
model.state_dict()[k].mul(gamma))
return model_avg
class SquaredGaussian(object):
def __init__(self, loc=0.0, scale=1.0):
self.loc = loc
self.scale = scale if scale > 0 else 1e-12
self.gaussian = Gaussian(loc=loc, scale=self.scale)
def ppf(self, x):
def target(a):
return self.cdf(a) - x
v_hi = self.scale ** 2
while target(v_hi) < 0:
v_hi *= 2
return optimize.brentq(target, 0, v_hi)
def cdf(self, x):
return self.gaussian.cdf(np.sqrt(x)) - self.gaussian.cdf(-np.sqrt(x))
def analytical_dro_objective(p, invcdf, size=1.0, geometry='cvar', reg=0.0,
output_lambda=False):
if geometry == 'cvar':
assert reg == 0.0
opt_lambda = None
var = interpolate.interp1d(p, invcdf)(1 - size)
ind = np.where(p > 1 - size)[0][0]
out = (1 / (p[-1] - 1 + size)) * np.trapz(
np.concatenate([[var], invcdf[ind:]]),
np.concatenate([[1 - size], p[ind:]]))
elif geometry == 'chi-square':
if size < np.inf:
assert reg == 0.0
def chisquare(eta):
r = np.maximum(invcdf - eta, 0)
r /= np.trapz(r, p)
return 0.5 * np.trapz((r - 1.0) ** 2, p) - size
eta_0 = invcdf[0]
while chisquare(eta_0) > 0.0:
eta_0 = 2 * eta_0 - invcdf[-1]
eta_1 = (invcdf[-1] + invcdf[-2]) / 2
if chisquare(eta_1) <= 0.0:
eta = eta_1
else:
eta = optimize.brentq(chisquare, eta_0, eta_1)
r = np.maximum(invcdf - eta, 0)
opt_lambda = np.trapz(r, p)
r /= opt_lambda
out = np.trapz(r * invcdf, p)
else:
assert reg > 0.0
def target(eta):
r = np.maximum(invcdf - eta, 0)
return np.trapz(r, p) / reg - 1.0
eta_0 = invcdf[0]
while target(eta_0) < 0.0:
eta_0 = 2 * eta_0 - invcdf[-1]
eta_1 = (invcdf[-1] + invcdf[-2]) / 2
if target(eta_1) >= 0.0:
eta = eta_1
else:
eta = optimize.brentq(target, eta_0, eta_1)
r = np.maximum(invcdf - eta, 0)
opt_lambda = np.trapz(r, p) # should be equal to reg!
r /= opt_lambda
out = 0.5 * (np.trapz(r * invcdf, p) + eta + reg)
if output_lambda:
return out, opt_lambda
else:
return out
def project_to_cs_ball(v, rho):
"""Numpy/Scipy projection to chi-square ball of radius rho"""
n = len(v)
def cs_div(p):
return 0.5 * np.mean((n * p - 1)**2)
# first, check if a simplex projection is within the chi-square ball
target_simplex = lambda eta: np.sum(np.maximum(v - eta, 0)) - 1.0
eta_min_simplex = v.min() - 1 / n
eta_max_simplex = v.max()
eta_simplex = optimize.brentq(
target_simplex, eta_min_simplex, eta_max_simplex)
p_candidate = np.maximum(v - eta_simplex, 0)
if cs_div(p_candidate) <= rho:
return p_candidate
# second, compute a chi-square best response
def target_cs(eta, return_p=False):
p = np.maximum(v - eta, 0)
if p.sum() == 0.0:
p[np.argmax(v)] = 1.0
else:
p /= p.sum()
err = cs_div(p) - rho
return p if return_p else err
eta_max_cs = v.max()
eta_min_cs = v.min()
if target_cs(eta_max_cs) <= 0:
return target_cs(eta_max_cs, return_p=True)
while target_cs(eta_min_cs) > 0.0: # find left interval edge for bisection
eta_min_cs = 2 * eta_min_cs - eta_max_cs
eta_cs = optimize.brentq(
target_cs, eta_min_cs, eta_max_cs)
p_candidate = target_cs(eta_cs, return_p=True)
assert np.abs(cs_div(p_candidate) - rho) < rho * 1e-2
return p_candidate
def project_to_cvar_ball(w, alpha):
if alpha == 1.0:
return | np.ones(n) | numpy.ones |
"<NAME> and <NAME>'s soft thresholding procedures for SigClust"
import numpy as np
import scipy.stats
def soft_threshold_hanwen_huang(eigenvalues, sig2b):
"Soft threshold eigenvalues to background noise level sig2b according to Hanwen Huang's scheme"
optimal_tau = _compute_tau(eigenvalues, sig2b)
soft_thresholded_eigenvalues = _shift_and_threshold_eigenvalues(eigenvalues, optimal_tau, sig2b)
return soft_thresholded_eigenvalues
def soft_threshold_ming_yuan(eigenvalues, sig2b):
"""Soft thresholds eigenvalues to background noise level sig2b using Ming Yuan's
scheme, which maintains total power. Results in an anti-conservative SigClust
when the relative size of the first eigenvalue is small."""
# Starting with the smallest eigenvalue, sequentially bring eigenvalues up to
# sig2b and distribute the difference equally over the larger eigenvalues
# (which maintains the total power).
d = len(eigenvalues)
eigenvalues_asc = | np.sort(eigenvalues) | numpy.sort |
import copy
import datetime
import heapq
import logging
import numpy as np
from operator import itemgetter, attrgetter
from queue import Queue
import sys
from kaggle_environments.envs.halite.helpers import *
MY_DATEFMT = '%M:%S'
def easy_log(s, loglevel='D'):
pass # print(f'{datetime.datetime.now().strftime(MY_DATEFMT)}{loglevel.upper()[0]} {s}')
easy_log('ini begin')
MAX_HALITE = 500
MAP_SIZE = 21
HALF_MAP_SIZE = MAP_SIZE // 2
ROWS = MAP_SIZE
COLS = MAP_SIZE
PLAYERS = 4
MOVE = [
None,
ShipAction.NORTH,
ShipAction.EAST,
ShipAction.SOUTH,
ShipAction.WEST,
]
LEN_MOVE = len(MOVE)
I_MINE = 0
I_NORTH = 1
I_EAST = 2
I_SOUTH = 3
I_WEST = 4
I_NORTH_EAST = 5
I_SOUTH_EAST = 6
I_SOUTH_WEST = 7
I_NORTH_WEST = 8
I_CONVERT = 5
def ship_action_to_int(action, convert_aware=False):
if action is None:
return I_MINE
elif isinstance(action, int):
return action
elif action == ShipAction.NORTH:
return I_NORTH
elif action == ShipAction.EAST:
return I_EAST
elif action == ShipAction.SOUTH:
return I_SOUTH
elif action == ShipAction.WEST:
return I_WEST
elif action == ShipAction.CONVERT:
if convert_aware:
return I_CONVERT
return I_MINE
DY = [0, 1, 0, -1, 0]
DX = [0, 0, 1, 0, -1]
def position_to_ij(p):
return ROWS - p[1] - 1, p[0]
def ij_to_position(i, j):
return j, ROWS - i - 1
def mod_map_size_x(x):
return (x + MAP_SIZE) % MAP_SIZE
def rotated_diff_position_impl(x0, x1):
"""x1 - x0 の値域 [-20, 20] を [-10, 10] へおさめたい"""
d = x1 - x0
if d < -HALF_MAP_SIZE: # [-20, -11]
return d + MAP_SIZE # [1, 10]
elif HALF_MAP_SIZE < d: # [11, 20]
return d - MAP_SIZE # [-10, -1]
return d
# memorize
def initialize_rotated_diff_position():
t = np.zeros((MAP_SIZE, MAP_SIZE), dtype=np.int32)
for x0 in range(MAP_SIZE):
for x1 in range(MAP_SIZE):
t[x0, x1] = rotated_diff_position_impl(x0, x1)
t.flags.writeable = False
return t
ROTATED_DIFF_POSITION = initialize_rotated_diff_position()
def rotated_diff_position(x0, x1):
return ROTATED_DIFF_POSITION[x0, x1]
def distance_impl(x0, x1, y0, y1):
dx = abs(rotated_diff_position(x0, x1))
dy = abs(rotated_diff_position(y0, y1))
return dx + dy
def initialize_distance():
t = np.zeros((COLS, ROWS, COLS, ROWS), dtype=np.int32)
for x0 in range(COLS):
for y0 in range(ROWS):
for x1 in range(COLS):
for y1 in range(ROWS):
t[x0, y0, x1, y1] = distance_impl(x0=x0, y0=y0, x1=x1, y1=y1)
t.flags.writeable = False
return t
DISTANCE = initialize_distance()
def calculate_distance(p0, p1):
return DISTANCE[p0[0], p0[1], p1[0], p1[1]]
def initialize_neighbor_positions(sup_d):
"""マンハッタン距離1ずつ増加していくときの全範囲x, yと距離d"""
ts =[]
us = []
for d in range(sup_d):
n_neighbors = 1 + (d * (d + 1) // 2) * 4
t = np.zeros((n_neighbors, 3), dtype=np.int32)
k = 0
for dx in range(-d, d + 1):
abs_dx = abs(dx)
for dy in range(-d, d + 1):
abs_dy = abs(dy)
if d < abs_dx + abs_dy:
continue
t[k, :] = dx, dy, abs_dx + abs_dy
k += 1
assert k == n_neighbors
u = np.zeros((COLS, ROWS, n_neighbors, 3), dtype=np.int32)
for x in range(COLS):
for y in range(ROWS):
for k, (dx, dy, d) in enumerate(t):
x1 = mod_map_size_x(x + dx)
y1 = mod_map_size_x(y + dy)
u[x, y, k, :] = x1, y1, d
t.flags.writeable = False
u.flags.writeable = False
ts.append(t)
us.append(u)
return ts, us
NEIGHBOR_D_POSITIONS, NEIGHBOR_POSITIONS = initialize_neighbor_positions(sup_d=7)
def neighbor_d_positions(d):
return NEIGHBOR_D_POSITIONS[d]
def neighbor_positions(d, p):
return NEIGHBOR_POSITIONS[d][p[0], p[1]]
DISTANCE_TO_PREFERENCE = [0.62 + 0.02 * i for i in range(HALF_MAP_SIZE)] + [1.0] + [1.2 + 0.02 * i for i in range(HALF_MAP_SIZE)]
def distance_to_preference(d):
return DISTANCE_TO_PREFERENCE[d + HALF_MAP_SIZE]
def preference_move_to_impl_2(x0, x1, dx_action):
abs_dx0 = abs(rotated_diff_position(x0=x0, x1=x1))
x_ = mod_map_size_x(x0 + dx_action)
abs_dx_ = abs(rotated_diff_position(x0=x_, x1=x1))
preference = 1.0
dx2 = abs_dx_ - abs_dx0
if dx2 < 0: # 距離縮んだ 遠いほうは abs_dx0
preference *= distance_to_preference(abs_dx0)
elif 0 < dx2: # 遠ざかった 遠いほうは abs_dx_
preference *= distance_to_preference(-abs_dx_)
return preference
def preference_move_to_impl(x0, y0, x1, y1):
"""x0, y0: 現在位置; x1, y1: 目標位置"""
preference = np.ones(LEN_MOVE, dtype=np.float32)
for i_action in range(LEN_MOVE):
preference[i_action] *= preference_move_to_impl_2(
x0=x0, x1=x1, dx_action=DX[i_action])
preference[i_action] *= preference_move_to_impl_2(
x0=y0, x1=y1, dx_action=DY[i_action])
if x0 == x1 and y0 == y1:
preference[0] *= 1.5
return preference
def initialize_preference_move_to():
t = np.zeros((COLS, ROWS, LEN_MOVE), dtype=np.float32)
for x1 in range(COLS):
for y1 in range(ROWS):
t[x1, y1, :] = preference_move_to_impl(x0=0, y0=0, x1=x1, y1=y1)
t.flags.writeable = False
return t
PREFERENCE_MOVE_TO = initialize_preference_move_to()
def preference_move_to(p0, p1):
x1 = mod_map_size_x(p1[0] - p0[0])
y1 = mod_map_size_x(p1[1] - p0[1])
return PREFERENCE_MOVE_TO[x1, y1]
def calculate_next_position(position, next_action):
"""next_action で移動した後の座標を求める"""
p = position
if isinstance(next_action, int):
next_action = MOVE[next_action]
assert (next_action is None) or isinstance(next_action, ShipAction)
if next_action == ShipAction.NORTH:
p = Point(x=p[0], y=(p[1] + 1) % ROWS)
elif next_action == ShipAction.EAST:
p = Point(x=(p[0] + 1) % COLS, y=p[1])
elif next_action == ShipAction.SOUTH:
p = Point(x=p[0], y=(p[1] + ROWS - 1) % ROWS)
elif next_action == ShipAction.WEST:
p = Point(x=(p[0] + COLS - 1) % COLS, y=p[1])
return p
def direction_to_str(next_action):
if next_action == ShipAction.NORTH:
return '^'
elif next_action == ShipAction.EAST:
return '>'
elif next_action == ShipAction.SOUTH:
return 'v'
elif next_action == ShipAction.WEST:
return '<'
elif next_action == ShipAction.CONVERT:
return 'c'
return '.'
I_FLAG_NEXT_SHIP_POSITION = 0
I_FLAG_MINE_D2 = 1
I_FLAG_MINE_D3 = 2
I_FLAG_MINE_D4 = 3
I_FLAG_GO_HOME_STRAIGHT = 4
N_FLAG_TYPES = 5
I_SCORE_SURROUNDED_HALITE_GROUND = 0 # 隣接4マスのうち何マス halite 増産するか 途中更新なし shipyard 生成破壊で変わりうることに注意
I_SCORE_EMPTY_OPPONENT_D2 = 1
I_SCORE_NON_EMPTY_OPPONENT_D2 = 2
I_SCORE_OPPONENT_D2 = 3
I_SCORE_OPPONENT_D3 = 4
I_SCORE_OPPONENT_SHIPYARD_D6 = 5
I_SCORE_ALLY_SHIPYARD_D1 = 6
I_SCORE_ALLY_SHIPYARD_D4 = 7
I_SCORE_ALLY_SHIPYARD_D7 = 8
I_SCORE_OPPONENT_SHIPYARD_D2 = 9
I_SCORE_OPPONENT_SHIPYARD_D3 = 10
I_SCORE_OPPONENT_SHIPYARD_D4 = 11
I_SCORE_ALLY_D2 = 12
I_SCORE_EMPTY_ALLY_D4 = 13
I_SCORE_NON_EMPTY_ALLY_D4 = 14
I_SCORE_ALLY_D4 = 15
I_SCORE_HALITE = 16 # ただの地面 halite
I_SCORE_HALITE_D4 = 17 # 周囲Dマスの halite 和
I_SCORE_SHIPYARD_CANDIDATES_SUB = 18 # halite, 他のshipyard領域なら0
I_SCORE_SHIPYARD_CANDIDATES = 19 # 自分自身の位置と、他のshipyard領域を除く周囲のhalite
I_SCORE_MIN_NEIGHBOR_OPPONENT_HALITE = 20 # その場, 隣接 計5マスの敵haliteの最小
I_SCORE_ALLY_REACH = 21 # ally が到達できる最短ターン
I_SCORE_EMPTY_ALLY_REACH = 22 # empty ally が到達できる最短ターン
I_SCORE_NON_EMPTY_ALLY_REACH = 23
I_SCORE_OPPONENT_REACH = 24 # opponent が到達できる最短ターン
I_SCORE_EMPTY_OPPONENT_REACH = 25 # empty opponent が到達できる最短ターン
I_SCORE_NON_EMPTY_OPPONENT_REACH = 26
I_SCORE_REACH_ADVANTAGE = 27 # I_SCORE_OPPONENT_REACH - I_SCORE_ALLY_REACH
I_SCORE_DETOUR_REACH = 28 # shipyards has neighbor ZOC
I_SCORE_OPPONENT_DETOUR_REACH = 29
I_SCORE_DETOUR_ADVANTAGE = 30
I_SCORE_DANGER_ZONE = 31 # rectangle of 2 diagonal shipyards, with edge
I_SCORE_DANGER_ZONE_IN = 32 # without edge
I_SCORE_HUNT_ZONE = 33
I_SCORE_HUNT_ZONE_IN = 34
I_SCORE_FUTURE_HUNT_ZONE = 35
N_SCORE_TYPES = 36
class FlagsManager(object):
def __init__(self):
self.flags = np.zeros((N_FLAG_TYPES, ROWS), dtype=np.uint32)
def parse(self, i=None, j=None, x=None, y=None):
i_ = (ROWS - y - 1) if i is None else i
j_ = x if j is None else j
assert i_ is not None, f'i={i}, y={y}'
assert j_ is not None, f'j={j}, x={x}'
return i_, j_
def set_all(self, i_flag_type):
self.flags[i_flag_type, ...] = 0xFFFFFFFF
def reset_all(self, i_flag_type):
self.flags[i_flag_type, ...] = 0
def set(self, i_flag_type, **kwargs):
i, j = self.parse(**kwargs)
self.flags[i_flag_type, i] |= (1 << j)
def reset(self, i_flag_type, **kwargs):
i, j = self.parse(**kwargs)
self.flags[i_flag_type, i] &= ~(1 << j)
def xor(self, i_flag_type, **kwargs):
i, j = self.parse(**kwargs)
self.flags[i_flag_type, i] ^= (1 << j)
def get(self, i_flag_type, **kwargs):
i, j = self.parse(**kwargs)
return (self.flags[i_flag_type, i] >> j) & 1
def initialize_compound_interest(t_max=400, max_cell_halite=500, regen_rate=0.02):
m = np.zeros((t_max + 1, max_cell_halite + 1), dtype=np.float32)
m[0, :] = np.arange(max_cell_halite + 1)
rate = 1.0 + regen_rate
for t in range(t_max):
m[t + 1, :] = np.minimum(m[t, :] * rate, max_cell_halite)
# for h in range(4, max_cell_halite, 25):
# logger.debug(f'h={h}, m[:, h]={m[:, h]}')
return m
COMPOUND_INTEREST = initialize_compound_interest()
HUNT_IMPOSSIBLE = 9
DIAG_DIRECTIONS = ((I_NORTH, I_EAST), (I_EAST, I_SOUTH), (I_SOUTH, I_WEST), (I_WEST, I_NORTH))
def solve_hunt_impl2(m, c, result):
if not c:
if m == 0x1E: # 全方向にいる
return True, result
return False, [HUNT_IMPOSSIBLE] * 4
t = []
diag, ij = c[0]
for i in ij:
# print(f'solve_hunt_impl2: m={m} c={c} result={result} c0={c[0]} i={i}')
if i == 0:
m_i = m
else:
m_i = m | (1 << i)
r_i = result + [i]
success_i, result_i = solve_hunt_impl2(m_i, c[1:], r_i)
if success_i:
return success_i, result_i
return False, [HUNT_IMPOSSIBLE] * 4
def solve_hunt_impl(a):
m0 = (a[0] << I_NORTH) | (a[1] << I_EAST) | (a[2] << I_SOUTH) | (a[3] << I_WEST)
c = []
for diag, b in enumerate(a[4:]):
if b == 0:
c.append([diag, (0,)]) # 単にない方角は0とする
else:
c.append([diag, DIAG_DIRECTIONS[diag]])
return solve_hunt_impl2(m0, c, [])
def initialize_hunt_dp():
# ラストの4が解で ne se sw nw に対して行くべき方向
# (I_NORTH or I_EAST), (I_SOUTH or I_EAST), ...
# 0だったら2shipsを両側に派遣しろという意味
t = np.zeros((2, 2, 2, 2, 3, 3, 3, 3, 4), dtype=np.int32)
for north in range(2):
for east in range(2):
for south in range(2):
for west in range(2):
for ne in range(3):
for se in range(3):
for sw in range(3):
for nw in range(3):
n = north
e = east
s = south
w = west
ne_ = ne
se_ = se
sw_ = sw
nw_ = nw
if ne == 2:
n = 1
e = 1
ne_ = 0
if se == 2:
s = 1
e = 1
se_ = 0
if sw == 2:
s = 1
w = 1
sw_ = 0
if nw == 2:
n = 1
w = 1
nw_ = 0
a = [n, e, s, w, ne_, se_, sw_, nw_]
success, result = solve_hunt_impl(a)
if success:
if ne == 2:
result[0] = 5
if se == 2:
result[1] = 5
if sw == 2:
result[2] = 5
if nw == 2:
result[3] = 5
t[north, east, south, west, ne, se, sw, nw, :] = result
# print(f'HUNT_DP[n{north} e{east} s{south} w{west} ne{ne} se{se} sw{sw} nw{nw}]={result}, success={success}')
t.flags.writeable = False
return t
HUNT_DP = initialize_hunt_dp()
DIRECTION_MAPPING = np.array([I_MINE, I_MINE, I_NORTH, -1, I_EAST, -1, I_NORTH_EAST, -1, I_SOUTH, -1, -1, -1, I_SOUTH_EAST, -1, -1, -1, I_WEST, -1, I_NORTH_WEST, -1, -1, -1, -1, -1, I_SOUTH_WEST, -1, -1, -1, -1, -1, -1, -1], dtype=np.int32)
DIRECTION_MAPPING.flags.writeable = False
assert DIRECTION_MAPPING[1 << I_MINE] == I_MINE
assert DIRECTION_MAPPING[1 << I_NORTH] == I_NORTH
assert DIRECTION_MAPPING[1 << I_EAST] == I_EAST
assert DIRECTION_MAPPING[1 << I_SOUTH] == I_SOUTH
assert DIRECTION_MAPPING[1 << I_WEST] == I_WEST
assert DIRECTION_MAPPING[(1 << I_NORTH) | (1 << I_EAST)] == I_NORTH_EAST
assert DIRECTION_MAPPING[(1 << I_EAST) | (1 << I_SOUTH)] == I_SOUTH_EAST
assert DIRECTION_MAPPING[(1 << I_SOUTH) | (1 << I_WEST)] == I_SOUTH_WEST
assert DIRECTION_MAPPING[(1 << I_WEST) | (1 << I_NORTH)] == I_NORTH_WEST
def initialize_both_move_to_halite_nash_equilibrium_impl2(halite, r_wait):
h1 = int((halite) * 0.25)
h2 = int((halite - h1) * 0.25)
h3 = int((halite - h1 - h2) * 0.25)
h23 = h2 + h3
# -500, -500 | h1, h23
# h23, h1 | r_wait, r_wait
# 後続の掘りの差を出さないためにすぐSPAWNすると仮定
# 本当は1ターン待つと 1.02倍されるので impl2 の r_wait の価値が上がるはずだが、参入できていない
# h_next = min(MAX_HALITE, int(halite * 1.02))
# r_p = -MAX_HALITE * p * q + h1 * p * (1 - q) + h23 * (1 - p) * q + r_wait * (1 - p) * (1 - q)
# r_q = -MAX_HALITE * p * q + h23 * p * (1 - q) + h1 * (1 - p) * q + r_wait * (1 - p) * (1 - q)
# dr_q / dq = -MAX_HALITE * p - h23 * p + h1 * (1 - p) - r_wait * (1 - p)
# = p * (-MAX_HALITE - h23 - h1 + r_wait) + (h1 - r_wait)
# = 0
# となる p を求める
p = max(0.0, (h1 - r_wait) / (MAX_HALITE + h23 + h1 - r_wait))
r_p = -MAX_HALITE * p * p + h1 * p * (1 - p) + h23 * (1 - p) * p + r_wait * (1 - p) * (1 - p)
return p, r_p
def initialize_both_move_to_halite_nash_equilibrium_impl(halite):
r_ng = 0.0
r_ok = MAX_HALITE
p_ok = 0.0
gamma = 0.9
while 0.99 < abs(r_ok - r_ng):
r_mid = 0.5 * (r_ng + r_ok)
p, r_p = initialize_both_move_to_halite_nash_equilibrium_impl2(halite, r_mid * gamma)
# print(f'h{halite} r_mid{r_mid:.1f} r_p{r_p:.1f} p{p:.6f}')
if r_p < r_mid: # ok
r_ok = r_mid
p_ok = p
else:
r_ng = r_mid
return p_ok, r_ok
def initialize_both_move_to_halite_nash_equilibrium():
t = np.zeros(MAX_HALITE + 1, dtype=np.float32)
for halite in range(MAX_HALITE + 1):
p, r = initialize_both_move_to_halite_nash_equilibrium_impl(halite)
t[halite] = p
t.flags.writeable = False
return t
BOTH_MOVE_TO_HALITE_NASH_EQUILIBRIUM = initialize_both_move_to_halite_nash_equilibrium()
def both_move_to_halite_nash_equilibrium(halite):
return BOTH_MOVE_TO_HALITE_NASH_EQUILIBRIUM[int(halite)]
class Project(object):
def __init__(self, project_id, agent, ):
self.project_id = project_id
self.agent = agent
self.priority = 1.0
self.ships = {} # key is ship_id, value is role
self.shipyards = {} # key is shipyard_id, value is role
self.budget = 0 # Projectで複数stepにまたがって自由に使えるhalite
def schedule(self):
"""Project 継続判断 (True で継続 False なら解体) と priority 設定"""
return False
def reserve_budget(self, halite):
"""
free_haliteから予算をhaliteだけ追加確保する
指定したhaliteが負ならfree_haliteへ戻るので
Projectが使う際には, 先にfree_haliteを増やしてから
reserve_ship なりreserve_shipyardするとよい
次stepへ持ち越す事も可能
すなわち確保したうえで自分が使わなければ次stepでも残っている
次step高優先度projectにもとられない
free_halite < haliteなら失敗し,
成功したらTrue 失敗したら現在budgetのままとなる
"""
d_halite = max(-self.budget, halite)
if self.agent.free_halite < d_halite:
return False # 足りない
self.agent.free_halite -= d_halite
self.budget += d_halite
assert 0 <= self.budget, f'{self.budget}'
return True
def maintain_dead_staffs(self):
staff_ids = []
b = self.agent.board
for ship_id in self.ships:
if ship_id not in b.ships:
staff_ids.append(ship_id)
for shipyard_id in self.shipyards:
if shipyard_id not in b.shipyards:
staff_ids.append(shipyard_id)
self.dismiss_project(staff_ids=staff_ids)
def ships_generator(self, with_free=False, with_my_project=False):
a = self.agent
for ship in a.sorted_ships:
if a.determined_ships.get(ship.id, None) is not None:
continue
project_id = a.belonging_project.get(ship.id, None)
if (with_free and (project_id is None)) or (with_my_project and (project_id == self.project_id)):
yield ship
def run(self):
"""
人員と予算と確保し, next_actionを決める
必須人員他に確保されていたらまだ解体ありうる (return True で継続)
前step確保していたstaff_idsはscheduleでリリースしていない限り残る
"""
# if -1e-6 < self.priority:
# self.agent.log(s=f'prj={self.project_id} run prio={self.priority}')
return False
def discard(self):
"""
project終了のため, 確保している人員を開放する
予算を戻してもらう
"""
a = self.agent
a.log(step=a.board.step, id_=None, s=f'p{a.player_id} discard project_id={self.project_id}')
assert 0 <= self.budget, self.budget
self.reserve_budget(-self.budget)
self.dismiss_project(
staff_ids=list(self.ships.keys()) + list(self.shipyards.keys()))
assert self.project_id not in a.belonging_project.values(), f'project_id={self.project_id} belonging_project={a.belonging_project}'
if self.project_id in a.projects:
del a.projects[self.project_id]
def join_project(self, *args, staff_ids, **kwargs):
a = self.agent
for staff_id in staff_ids:
project_id = a.belonging_project.get(staff_id, None)
if project_id and project_id != self.project_id:
project = a.projects.get(project_id, None)
if project:
project.dismiss_project(staff_ids=[staff_id])
return self.agent.join_project(*args, staff_ids=staff_ids, project_id=self.project_id, **kwargs)
def dismiss_project(self, *args, **kwargs):
return self.agent.dismiss_project(*args, project_id=self.project_id, **kwargs)
class DefenseShipyardProject(Project):
"""
priorityはhuntに次ぐ
Shipyardが壊されないように守る
敵shipより手前に1shipはいるようにする
張りこまれていたら,
相殺する
deposit希望のshipを突っ込ませる運ゲーをマネジメント
"""
def __init__(self, shipyard_id, *args, **kwargs):
project_id = f'defense_yd{shipyard_id}'
super().__init__(*args, project_id=project_id, **kwargs)
self.shipyard_id = shipyard_id
self.cancel_threshold = 4 # 隣で停止されたときに相殺決断するまで待つ猶予
self.spawn_step_threshold = 250
self.spawn_step_threshold_final = 390
self.o = None
self.empty_o_min_d = []
self.last_confirmed_step = -1
self.last_swapped_step = -1
self.info = {}
def shipyard_defender_strategy(self, ship):
a = self.agent
shipyard = a.board.shipyards[self.shipyard_id]
o_min_d = self.info['opponent_distance']
e_min_d = self.info.get('empty_ally_ship_distance', 99999)
n_min_d = self.info.get('non_empty_ally_ship_distance', 99999)
d = calculate_distance(ship.position, shipyard.position)
free_steps = o_min_d - d
original_free_steps = free_steps
if 0 < ship.halite:
free_steps -= 1 # 先回りして deposit する必要がある
remaining_steps = max(0, 398 - a.board.step - d)
free_steps = min(free_steps, remaining_steps)
original_free_steps = min(original_free_steps, remaining_steps)
a.log(step=a.board.step, id_=ship.id, s=f'yd{shipyard.id} shipyard_defender h{ship.halite} o_min_d={o_min_d} d={d} free_steps={free_steps}')
if free_steps < 0: # こいつ任命できないはずなんだが
a.log(loglevel='WARNING', id_=ship.id, s=f'defender too far. project_id={self.project_id} h={ship.halite} o_min_d={o_min_d} info={self.info}')
return a.ship_strategy(ship)
# 基本は帰還
priority = 10000 + a.calculate_collision_priority(ship)
q_empty, forced = a.calculate_moving_ship_preference(
ship=ship, position=shipyard.position, mode='escape', mine_threshold=None)
if o_min_d == 1: # 身を盾にして守る
for k_action, cell_k in enumerate(a.neighbor_cells(a.board.cells[ship.position])):
if cell_k.shipyard and cell_k.shipyard.player_id == a.player_id:
q_empty[k_action] = max(10.0, q_empty[k_action])
return a.reserve_ship_by_q(ship, q=q_empty, priority=priority)
# キリの良い数字になれるならさっさと帰る
player_halite = a.board.current_player.halite
short_halite = MAX_HALITE - (player_halite % MAX_HALITE)
if free_steps == 0 or (player_halite < 1000 and short_halite <= ship.halite): # 最短距離で帰る
a.log(id_=ship.id, s=f'{ship.position} prj={self.project_id} free_steps{free_steps} q_empty{q_empty} back short_halite{short_halite}')
return a.reserve_ship_by_q(ship, q=q_empty, priority=priority)
# 今いる場所を掘れるかどうか検討しよう
cell = a.board.cells[ship.position]
mine_project = a.projects.get(f'mine_{ship.position[0]}_{ship.position[1]}', None)
if mine_project:
mine_threshold = mine_project.mining_halite_threshold
else:
mine_threshold = 160.0
if mine_threshold:
mode = 'mine'
else:
mode = 'escape'
if mine_threshold < cell.halite:
q_mine, forced = a.calculate_moving_ship_preference(
ship=ship, position=ship.position, mode=mode, mine_threshold=mine_threshold)
else:
q_mine = np.copy(q_empty)
if mine_threshold <= cell.halite: # 危険な時はq_mine使わないので停止危険チェックはしなくてOK
q_mine[0] = max(4.0, q_mine[0])
if free_steps == 1: # 最短 / 1回停止可能
if mine_threshold <= cell.halite:
if 0 < ship.halite: # 掘っても free_steps は減らない
a.log(id_=ship.id, s=f'{ship.position} prj={self.project_id} free_steps{free_steps} q_mine{q_mine} mine_thre{mine_threshold}')
return a.reserve_ship_by_q(ship, q=q_mine, priority=priority)
elif d == 1 and mine_threshold <= cell.halite:
# もし唯一の敵をブロックする位置にいるのなら, halite回収できる
for cell_i in a.neighbor_cells(a.board.cells[shipyard.position]):
if cell_i.position == ship.position:
continue
i_, j_ = position_to_ij(cell_i.position)
if a.scores[I_SCORE_OPPONENT_REACH, i_, j_] < 1.5:
# blockできていないので大人しく帰る
return a.reserve_ship_by_q(ship, q=q_empty, priority=priority)
a.log(id_=ship.id, s=f'{ship.position} prj={self.project_id} blocking. q_mine={q_mine}')
return a.reserve_ship_by_q(ship, q=q_mine, priority=priority)
a.log(id_=ship.id, s=f'{ship.position} prj={self.project_id} free_steps{free_steps} q_empty{q_empty} back')
return a.reserve_ship_by_q(ship, q=q_empty, priority=priority)
ship_cell = a.board.cells[ship.position]
if ((free_steps < 2) or
(0 < ship.halite) or
(free_steps == 3 and ship.halite == 0 and 0 < d and 80.0 < ship_cell.halite)):
# 離れる方向へ移動しても掘れないので自重する
has_non_empty_ship = False
for k_action, cell_k in enumerate(a.neighbor_cells(a.board.cells[ship.position])):
d_k = calculate_distance(cell_k.position, shipyard.position)
if d < d_k:
q_mine[k_action] *= 0.3
if d == 0:
if cell_k.ship and cell_k.ship.player_id == a.player_id and 0 < cell_k.ship.halite:
has_non_empty_ship = True
if has_non_empty_ship: # 帰り道を邪魔しない
q_mine[I_NORTH:] *= 10.0
a.log(id_=ship.id, s=f'{ship.position} prj={self.project_id} free_steps{free_steps} q_mine{q_mine} back with mine has_non_empty_ship={has_non_empty_ship} d{d} original_free_steps{original_free_steps} mine_thre{mine_threshold} mine_prj{mine_project}')
return a.reserve_ship_by_q(ship, q=q_mine, priority=priority)
# 離れて1回回収するための条件を満たしたので、周囲を探索
positions = neighbor_positions(d=(2 if d == 0 else 1), p=ship.position)
max_position = shipyard.position
max_halite_diff = 0
for x1, y1, d1 in positions:
p1 = Point(x=x1, y=y1)
cell1 = a.board.cells[p1]
if cell1.halite < 1e-6:
continue
mine_project1 = a.projects.get(f'mine_{x1}_{y1}', None)
if mine_project1:
if mine_project1.ships:
continue # 競合を避ける
mine_threshold1 = mine_project1.halite_threshold
else:
mine_threshold1 = 40.0
halite_diff = cell1.halite - mine_threshold1
if max_halite_diff < halite_diff:
max_position = p1
max_halite_diff = halite_diff
q_explore, forced = a.calculate_moving_ship_preference(
ship=ship, position=max_position, mode='mine', mine_threshold=mine_threshold)
if d == 0 and 2 < o_min_d: # 脅威がない場合のshipyard上での停止は避ける
q_explore[0] *= 0.4
a.log(id_=ship.id, s=f'{ship.position} prj={self.project_id} free_steps{free_steps} q_explore{q_explore} max{max_position} h_diff{max_halite_diff}')
return a.reserve_ship_by_q(ship, q=q_explore, priority=priority)
def defense_shipyard_strategy_ships_generator(self):
"""
MineProjectは制御を奪ってよい
EscortProjectに自分自身が属しているshipは制御を奪ってよい
"""
a = self.agent
for ship in a.sorted_ships:
determined = a.determined_ships.get(ship.id, None)
if determined is not None:
continue
project_id = a.belonging_project.get(ship.id, None)
if project_id is None:
yield ship
elif project_id == self.project_id:
yield ship
elif project_id[:6] == 'escort':
escort_project = a.projects.get(project_id, None)
if not escort_project:
yield ship
elif escort_project.has_myself():
yield ship
elif project_id[:4] == 'mine':
yield ship
def find_confirmed_shipyard(self, shipyard):
"""近くにally shipyardがあるなら、壊されてもいいや"""
a = self.agent
for x_k, y_k, d_k in neighbor_positions(d=2, p=shipyard.position):
if d_k == 0:
continue # 自分自身
cell_k = a.board.cells[x_k, y_k]
shipyard_k = cell_k.shipyard
if shipyard_k is None:
continue
if shipyard_k.player_id != a.player_id:
continue
project_id = f'defense_yd{shipyard_k.id}'
project = a.projects.get(project_id, None)
if project is None:
continue
last_confirmed_step = project.last_confirmed_step
if a.board.step <= last_confirmed_step:
return shipyard_k
return None
def schedule(self):
super().schedule()
self.info = {}
self.maintain_dead_staffs()
a = self.agent
shipyard = a.board.shipyards.get(self.shipyard_id, None)
if shipyard is None or 397 <= a.board.step:
return False
len_ships = len(a.board.current_player.ships)
if len_ships == 1 and a.board.current_player.ships[0].halite == 0 and a.board.current_player.halite < MAX_HALITE:
a.log(loglevel='WARNING', id_=shipyard.id, s=f'prj={self.project_id} last_escape_mode')
return False # 逃げ専
self.position = shipyard.position
self.i, self.j = position_to_ij(self.position)
self.shipyard_cell = a.board.cells[self.position]
confirmed_shipyard = self.find_confirmed_shipyard(shipyard)
if confirmed_shipyard:
a.log(id_=shipyard.id, s=f'confirmed_shipyard{confirmed_shipyard.id} found')
return False
o_min_d = 99999 # opponent
empty_o_min_d = 99999
self.o = None
for shipyard_i in a.board.shipyards.values():
if shipyard_i.player_id == a.player_id:
continue
# spawn に 1 step かかるので +1
d = 1 + calculate_distance(self.position, shipyard_i.position)
if d < o_min_d:
self.o = shipyard_i
o_min_d = d
empty_o_min_d = d
for ship in a.board.ships.values():
if ship.player_id == a.player_id:
continue
d = calculate_distance(self.position, ship.position)
if d < o_min_d:
self.o = ship
o_min_d = d
if ship.halite == 0 and d < empty_o_min_d:
empty_o_min_d = d
self.info['shipyard_id'] = shipyard.id
self.info['opponent_id'] = None if self.o is None else self.o.id
self.info['opponent_distance'] = o_min_d
self.empty_o_min_d.append(empty_o_min_d)
if self.o is None:
self.priority = -100.0
elif a.board.step < 9:
self.priority = -1.0
else:
self.priority = 200000. - o_min_d * 10000. + a.scores[I_SCORE_HALITE_D4, self.i, self.j]
if self.priority < 0.0:
self.dismiss_project(staff_ids=list(self.ships.keys()))
self.last_confirmed_step = a.board.step
return True
def should_cancel(self, *args, **kwargs):
result, condition = self.should_cancel_impl(*args, **kwargs)
self.agent.log(s=f'prj={self.project_id} should_cancel={result} {condition}')
def should_cancel_impl(self, e, o, cell_o, e_min_d, o_min_d):
a = self.agent
condition = ''
if 2 < o_min_d:
return False, '2<o_min_d'
if e_min_d != 0:
return False, 'e_min_d!=0'
if len(self.empty_o_min_d) < 5:
return False, f'len(empty_o_min_d)={len(self.empty_o_min_d)}' # 最初期
n_min_d = int(1e-6 + a.scores[I_SCORE_NON_EMPTY_ALLY_REACH, self.i, self.j])
condition += f' n_min_d{n_min_d}'
if 2 < n_min_d:
condition += ' 2<n_min_d'
return False, condition # depositしたいshipがいないなら放置しよう
condition0 = (3 == np.sum((np.array(self.empty_o_min_d[-3:]) <= 1).astype(np.int32)))
condition1 = (4 <= np.sum((np.array(self.empty_o_min_d[-5:]) <= 1).astype(np.int32)))
condition += f' cond0={condition0} cond1={condition1} empty_o_min_d{self.empty_o_min_d[-5:]}'
if not (condition0 or condition1):
return False, condition
previous_o = a.previous_board.ships.get(o.id, None)
# 往復にせよ停止にせよ、1ターン前の位置へ突っ込めばよい
for k_action, cell_k in enumerate(a.neighbor_cells(a.previous_board.cells[self.position])):
if k_action == 0:
continue
ship_k = cell_k.ship
if ship_k is None or 0 < ship_k.halite or ship_k.player_id == a.player_id:
continue
condition += f' k{k_action}_found'
return ship_k.position, condition
return False, condition
def search_empty_ally(self):
"""shipyardの隣にいるempty_shipを探す"""
ship_candidates = list(self.defense_shipyard_strategy_ships_generator())
for i_action, cell_i in enumerate(self.agent.neighbor_cells(self.shipyard_cell)):
if i_action == 0:
continue
ship_i = cell_i.ship
if ship_i is None:
continue
if 0 < ship_i.halite:
continue
if ship_i in ship_candidates:
return ship_i
return None
def run_cancel(self, e, target_position):
"""return done, safe, spawned"""
a = self.agent
defender = None
ally_ship = self.search_empty_ally()
shipyard = a.board.shipyards[self.shipyard_id]
if ally_ship:
self.join_project(staff_ids=[ally_ship.id], role='defender_ally', forced=True)
a.moving_ship_strategy(ship=ally_ship, position=self.position, mode='cancel_without_shipyard', mine_threshold=None)
a.moving_ship_strategy(ship=e, position=target_position, mode='cancel_without_shipyard', mine_threshold=None)
a.log(id_=e.id, s=f'prj={self.project_id} cancel to {target_position}, with ally s{ally_ship.id}')
return True, True, False
elif MAX_HALITE <= a.free_halite + self.budget:
# SPAWN可能
if a.flags.get(I_FLAG_NEXT_SHIP_POSITION, i=self.i, j=self.j):
a.log(loglevel='warning', s=f'prj={self.project_id} cannot spawn because someone will return type A')
a.reserve_shipyard(shipyard, None) # 誰かが帰還するのでSPAWNしない
a.moving_ship_strategy(ship=e, position=target_position, mode='cancel_without_shipyard', mine_threshold=None)
return True, False, False
elif self.spawn_step_threshold <= a.board.step and 1 < len(a.board.current_player.shipyards):
# もう SPAWN するのは勿体ないので, 抑止力で祈る
self.reserve_budget(MAX_HALITE - self.budget)
a.log(id_=e.id, s=f'prj={self.project_id} cancel to {target_position}, with fake SPAWN')
a.reserve_shipyard(shipyard, None)
a.moving_ship_strategy(ship=e, position=target_position, mode='cancel_without_shipyard', mine_threshold=None)
return True, False, False
else:
self.reserve_budget(-self.budget)
a.log(id_=e.id, s=f'prj={self.project_id} cancel to {target_position}, with SPAWN')
a.reserve_shipyard(shipyard, ShipyardAction.SPAWN)
a.moving_ship_strategy(ship=e, position=target_position, mode='cancel_without_shipyard', mine_threshold=None)
return True, True, True
return False, True, False
def run(self):
"""防衛担当者を決める"""
super().run()
a = self.agent
shipyard = a.board.shipyards[self.shipyard_id]
if self.shipyard_id not in self.shipyards:
self.join_project(staff_ids=[self.shipyard_id], role='defended')
if self.priority < 0.0: # 何もしなくてよい. 単にプロジェクト継続
self.info['safe'] = True
self.info['empty_ally_ship_id'] = None
self.info['empty_ally_ship_distance'] = None
self.info['non_empty_ally_ship_id'] = None
self.info['non_empty_ally_ship_distance'] = None
return True
if a.board.step < self.spawn_step_threshold and 0 < self.budget and MAX_HALITE <= a.free_halite + self.budget:
# Expeditionから引き継いだbudgetをさっさと使う
self.reserve_budget(-self.budget)
a.reserve_shipyard(shipyard, ShipyardAction.SPAWN)
self.dismiss_project(staff_ids=list(self.ships.keys()))
return True
cell = a.board.cells[self.position]
o_min_d = self.info['opponent_distance']
e_min_d = 99999 # empty
e = None
n_min_d = 99999 # non empty ally
n = None
for ship in self.defense_shipyard_strategy_ships_generator():
d = calculate_distance(shipyard.position, ship.position)
if ship.halite == 0:
if (d < e_min_d):
e = ship
e_min_d = d
elif d < o_min_d:
if ((d < n_min_d)
or ((d == n_min_d) and (n.halite < ship.halite))):
n = ship
n_min_d = d
if e:
self.info['empty_ally_ship_id'] = e.id
self.info['empty_ally_ship_distance'] = e_min_d
if n:
self.info['non_empty_ally_ship_id'] = n.id
self.info['non_empty_ally_ship_distance'] = n_min_d
safe = False
spawned = False
defender = None # デフォルト挙動させたい場合に指定する
min_d = 99999
if n_min_d < o_min_d: # deposit 間に合う
self.dismiss_project(staff_ids=list(self.ships.keys()))
self.join_project(staff_ids=[n.id], role='defender')
defender = n
safe = True
elif e_min_d <= o_min_d:
self.dismiss_project(staff_ids=list(self.ships.keys()))
self.join_project(staff_ids=[e.id], role='defender')
opponent_id = self.info['opponent_id']
previous_o = a.previous_board.ships.get(opponent_id, None)
o = a.board.ships.get(opponent_id, None)
if o is None:
o = a.board.shipyards.get(opponent_id, None)
# a.log(f'prj={self.project_id} p{a.player_id} self.shipyard_id={self.shipyard_id} opponent_id={opponent_id} previous_o{previous_o} o{o} o_min_d={o_min_d}')
cell_o = a.board.cells[o.position]
target_position = self.should_cancel(e=e, o=o, cell_o=cell_o, e_min_d=e_min_d, o_min_d=o_min_d)
if target_position:
# RestrainShipyardProject 対策で相殺しに行く
done, safe, spawned = self.run_cancel(e=e, target_position=target_position)
else:
defender = e
safe = True
spawned = False
elif MAX_HALITE <= a.free_halite + self.budget:
someone_arrived = a.flags.get(I_FLAG_NEXT_SHIP_POSITION, i=self.i, j=self.j)
if someone_arrived:
a.reserve_shipyard(shipyard, None) # 誰かが帰還する
safe = False
a.log(loglevel='warning', s=f'id={self.project_id} cannot spawn because someone will return type B')
elif ((self.spawn_step_threshold_final <= a.board.step)
or
(self.spawn_step_threshold <= a.board.step
and a.scores[I_SCORE_HALITE_D4, self.i, self.j] < 1000.
and 1 < len(a.board.current_player.shipyards))):
# もう SPAWN するのは勿体ないので, 抑止力で祈る
self.reserve_budget(MAX_HALITE - self.budget)
a.reserve_shipyard(shipyard, None)
safe = False
a.log(f'prj={self.project_id} stop spawning because the shipyard is not tasty')
else:
self.reserve_budget(-self.budget)
a.reserve_shipyard(shipyard, ShipyardAction.SPAWN)
spawned = True
safe = True
a.log(f'prj={self.project_id} spawning to protect')
someone_arrived = a.flags.get(I_FLAG_NEXT_SHIP_POSITION, i=self.i, j=self.j)
if ((not spawned)
and (not someone_arrived)
and (MAX_HALITE <= a.free_halite + self.budget)):
# 不要不急のspawn
can_spawn = a.can_spawn(shipyard, budget=self.budget)
if can_spawn:
a.log(f'prj={self.project_id} unneeded spawn freeh{a.free_halite} budget{self.budget}')
self.reserve_budget(-self.budget)
a.reserve_shipyard(shipyard, ShipyardAction.SPAWN)
spawned = True
if defender: # デフォルト挙動
defender = self.swap_project(defender)
self.shipyard_defender_strategy(defender)
self.info['safe'] = safe
a.log(s=f'id={self.project_id} run priority={self.priority} shipyard_info={self.info} ships={self.ships} spawned={spawned}')
return True
def swap_project(self, ship):
"""shipyard上待機が原因で味方をブロックしないように役割スワップを検討する"""
a = self.agent
shipyard = a.board.shipyards[self.shipyard_id]
if shipyard.position != ship.position:
return ship # 対象 ship が shipyard 上で守っているとき限定
if a.board.step - 1 <= self.last_swapped_step:
# 連続してswapするとデッドロックしうるので回避
return ship
to_sort = []
for k_action, cell_k in enumerate(a.neighbor_cells(a.board.cells[ship.position])):
if k_action == 0:
continue
ship_k = cell_k.ship
if not ship_k:
continue
if ship_k.player_id != a.player_id:
continue
if 0 < ship_k.halite:
continue
project_id_k = a.belonging_project.get(ship_k.id, None)
if project_id_k is None:
continue
project_k = a.projects.get(project_id_k, None)
if project_k is None:
continue
target_position = None
if project_id_k[:6] == 'escort':
if project_k.target_ship_id in [ship.id, ship_k.id]:
continue # 自分自身のescortは放置
target_ship = a.board.ships.get(project_k.target_ship_id, None)
if target_ship is None:
continue
target_position = target_ship.position
priority = 10.0
elif project_id_k[:4] == 'mine':
target_position = project_k.position
priority = 100.0
else:
continue
d_k = calculate_distance(ship_k.position, target_position)
d = calculate_distance(ship.position, target_position)
if d < d_k:
priority += d_k
to_sort.append((priority, ship_k.id, ship_k, project_k, target_position))
if not to_sort:
return ship
# join, dismiss 処理をする
priority, swapped_ship_id, swapped_ship, project, target_position = sorted(to_sort, reverse=True)[0]
ship_role = self.ships[ship.id]
swapped_ship_role = project.ships[swapped_ship_id]
self.dismiss_project(staff_ids=[ship.id])
project.dismiss_project(staff_ids=[swapped_ship_id])
self.join_project(staff_ids=[swapped_ship_id], role=ship_role)
project.join_project(staff_ids=[ship.id], role=swapped_ship_role)
if project.project_id[:4] == 'mine':
# 取り巻きも Project移動しなければならない
old_escort_project_id = f'escort{swapped_ship.id}'
old_escort_project = a.projects.get(old_escort_project_id, None)
ship_ids = []
if old_escort_project:
ship_ids = list(old_escort_project.ships.keys())
old_escort_project.dismiss_project(staff_ids=ship_ids)
new_escort_project_id = f'escort{ship.id}'
new_escort_project = a.projects.get(new_escort_project_id, None)
if new_escort_project:
new_escort_project.join_project(staff_ids=ship_ids, role='defender')
a.log(id_=ship.id, s=f'swapped. old_escort_prj={old_escort_project_id}({len(old_escort_project.ships)} new_escort_prj={new_escort_project_id}({len(new_escort_project.ships)})')
a.log(id_=swapped_ship_id, s=f'swapped. old_escort_prj={old_escort_project_id}({len(old_escort_project.ships)} new_escort_prj={new_escort_project_id}({len(new_escort_project.ships)})')
a.log(id_=ship.id, s=f'swapped. s{ship.id}(prj={a.belonging_project.get(ship.id, None)}) s{swapped_ship_id}({a.belonging_project.get(swapped_ship_id, None)}) dk{d_k} target{target_position}')
a.log(id_=swapped_ship_id, s=f'swapped. s{ship.id}(prj={a.belonging_project.get(ship.id, None)}) s{swapped_ship_id}({a.belonging_project.get(swapped_ship_id, None)}) dk{d_k} target{target_position}')
self.last_swapped_step = a.board.step
return swapped_ship
class RestrainShipyardProject(Project):
"""
NOTE: best agent does not use this project
敵のshipyardの隣に1ship常駐し、敵の動向を見る
deposit防ぎつつ、相殺もされないのがベスト
無視してshipyard空けたり
depositしてくるようならタイミング見計らって特攻する
相殺してくるようなら互いに損なので適度に逃げる
メイン目的はHuntProjectの逃げ道防ぐ布石
"""
def __init__(self, shipyard_id, *args, **kwargs):
project_id = f'restrain_yd{shipyard_id}'
super().__init__(*args, project_id=project_id, **kwargs)
self.shipyard_id = shipyard_id
self.shipyard = None
self.shipyards[self.shipyard_id] = 'target_shipyard'
self.zero_halite_positions = []
self.stop_canceled_count = [0, 0]
self.move_canceled_count = [0, 0]
self.d_max_camp_positon_from_ally_shipyard = 7
def feedback(self, shipyard):
"""味方が前いた位置に相殺しに来ているかをチェック"""
a = self.agent
if a.previous_board is None:
return
for ship_id, role in self.ships.items():
ship0 = a.previous_board.ships.get(ship_id, None)
if ship0 is None:
continue
ship1 = a.board.ships.get(ship_id, None)
vibrate_canceled = 0
stop_canceled = 0
if ship1 is None:
vibrate_canceled = 1
stop_canceled = 1
cell1 = a.board.cells[ship0.position]
if cell1.ship and cell1.ship.player_id == shipyard.player_id:
stop_canceled = 1
if role == 'stop':
self.stop_canceled_count[0] += stop_canceled
self.stop_canceled_count[1] += 1
elif role == 'vibrate':
self.vibrate_canceled_count[0] += vibrate_canceled
self.vibrate_canceled_count[1] += 1
# 全体を見る
if 2 <= a.opponent_history[self.shipyard.player_id]['cancel_against_shipyard_attack'][0]:
self.stop_canceled_count[0] = max(1, self.stop_canceled_count[0])
self.stop_canceled_count[1] = max(1, self.stop_canceled_count[1])
def can_stop(self):
if 0 < self.stop_canceled_count[0]:
return False
if self.d_max_camp_positon_from_ally_shipyard < self.d_ally_shipyard:
return False
return True
def can_vibrate(self):
if 0 < self.vibrate_canceled_count[0]:
return False
if self.d_max_camp_positon_from_ally_shipyard < self.d_ally_shipyard:
return False
return True
def schedule(self):
super().schedule()
a = self.agent
self.shipyard = a.board.shipyards.get(self.shipyard_id, None)
if self.shipyard is None:
return False
self.feedback(self.shipyard)
# HuntProjectで連携したいShipyardの方向を探す
self.d_ally_shipyard = a.ally_shipyard_distances[self.shipyard_id]['min']
self.ally_shipyard = None
for ally_shipyard_id_k, d_ally_shipyard_k in a.ally_shipyard_distances[self.shipyard_id].items():
if ally_shipyard_id_k == 'min':
continue
if d_ally_shipyard_k == self.d_ally_shipyard:
self.ally_shipyard = a.board.shipyards.get(ally_shipyard_id_k, None)
if self.ally_shipyard:
break
if self.ally_shipyard is None:
self.priority = -1.0
return True
# ally_shipyardから掘った opponent_ship が逃げていく方向
self.opponent_return_directions = np.zeros(LEN_MOVE, dtype=np.bool)
self.dx = rotated_diff_position(self.ally_shipyard.position[0], self.shipyard.position[0])
self.dy = rotated_diff_position(self.ally_shipyard.position[1], self.shipyard.position[1])
if self.dx <= -2:
self.opponent_return_directions[I_WEST] = True
if 2 <= self.dx:
self.opponent_return_directions[I_EAST] = True
if self.dy <= -2:
self.opponent_return_directions[I_SOUTH] = True
if 2 <= self.dy:
self.opponent_return_directions[I_NORTH] = True
a.log(s=f'prj={self.project_id} ships={list(self.ships.keys())} d_ally_shipyard={self.d_ally_shipyard} op_return_dir{self.opponent_return_directions}')
self.join_project(staff_ids=[self.shipyard_id], role='target_shipyard')
# self.dismiss_project(staff_ids=list(self.ships.keys()))
self.zero_halite_positions = []
self.opponent_working = False
self.already_in_position = False
has_neighbor_opponent_shipyard = False
for x, y, d in neighbor_positions(d=2, p=self.shipyard.position):
if d == 0:
continue
cell_i = a.board.cells[x, y]
shipyard_i = cell_i.shipyard
if shipyard_i and shipyard_i.player_id != self.shipyard.player_id:
# shipyardが密集しているのでスルー
has_neighbor_opponent_shipyard = True
ship_i = cell_i.ship
if ship_i and ship_i.player_id not in [a.player_id, self.shipyard.player_id]:
# 他の敵が仕事している
self.opponent_working = True
if d == 1 and cell_i.halite < 1e-6:
self.zero_halite_positions.append(Point(x=x, y=y))
if ship_i and ship_i.player_id == a.player_id:
self.already_in_position = True
if 10 < self.d_ally_shipyard:
self.priority = -1.0 # 初期位置より遠いと牽制する気起きん
elif has_neighbor_opponent_shipyard:
self.priority = -1.0 # 過密地帯なので勝手に牽制し合っている
# elif (not self.already_in_position) and opponent_working:
# self.priority = -1.0
else:
self.priority = len(self.zero_halite_positions) + 1.
if self.priority < 0.0:
self.dismiss_project(staff_ids=list(self.ships.keys()))
a.log(id_=self.shipyard_id, s=f'prj={self.project_id} prio{self.priority} has_opyd={has_neighbor_opponent_shipyard} already_in_pos={self.already_in_position} op_working={self.opponent_working}')
return True
def restrain_ships_generator(self):
a = self.agent
for ship in a.empty_ships:
if a.determined_ships.get(ship.id, None) is not None:
continue
project_id = a.belonging_project.get(ship.id, None)
if project_id is None:
yield ship
elif project_id == self.project_id:
yield ship
def calculate_target_position(self):
a = self.agent
p0 = self.ally_shipyard.position
p1 = self.shipyard.position
positions = [p0]
p = p0
for j in range(22):
q = preference_move_to(p, p1)
action = np.argmax(q)
p = Point(x=mod_map_size_x(p[0] + DX[action]), y=mod_map_size_x(p[1] + DY[action]))
positions.append(p)
# a.log(s=f'prj={self.project_id} p{p} q{q} positions{positions}')
if p == p1:
break
i = min(len(positions) - 2, self.d_max_camp_positon_from_ally_shipyard)
a.log(s=f'prj={self.project_id} positions{positions} i{i}')
return positions[i]
def run(self):
super().run()
if self.priority < 0.0:
return True
a = self.agent
# 自ship数が少ないときはやらない
len_ships = len(a.board.current_player.ships)
len_restrain_ships = 0
len_staffs = len(self.ships)
restrain_ships = []
for staff_id, project_id in a.belonging_project.items():
if project_id is None:
continue
if staff_id not in a.board.current_player.ships:
continue
if project_id[:8] == 'restrain':
len_restrain_ships += 1
restrain_ships.append((ship_id, project_id))
len_restrain_ships_allowed = max(0, (len_ships - 15) // 3)
len_diff = len_restrain_ships_allowed - len_restrain_ships
if len_diff <= 0:
max_len_ships = len_staffs
else:
max_len_ships = max(0, min(1, len_diff))
if max_len_ships <= 0:
return True
target_position = self.calculate_target_position()
a.log(s=f'prj={self.project_id} {target_position} len_ships{len_ships} len_restrain_ships{len_restrain_ships} len_staffs{len_staffs} len_restrain_ships_allowed{len_restrain_ships_allowed} len_diff{len_diff} max_len_ships{max_len_ships} restrain_ships{restrain_ships}')
to_sort = []
opponent_ships = []
for opponent_ship in a.board.players[self.shipyard.player_id].ships:
if opponent_ship.halite == 0:
continue
dx_opponent = rotated_diff_position(opponent_ship.position[0], self.shipyard.position[0])
dy_opponent = rotated_diff_position(opponent_ship.position[1], self.shipyard.position[1])
directions = np.zeros(LEN_MOVE, dtype=np.bool)
if 0 < dy_opponent and self.opponent_return_directions[I_NORTH]:
directions[I_NORTH] = True
if 0 < dx_opponent and self.opponent_return_directions[I_EAST]:
directions[I_EAST] = True
if dy_opponent < 0 and self.opponent_return_directions[I_SOUTH]:
directions[I_SOUTH] = True
if dx_opponent < 0 and self.opponent_return_directions[I_WEST]:
directions[I_WEST] = True
if np.any(directions):
opponent_ships.append((opponent_ship, dx_opponent, dy_opponent, directions))
a.log(s=f'prj={self.project_id}{self.shipyard.position} test_op{opponent_ship.id}{opponent_ship.position} dxo{dx_opponent} dyo{dy_opponent} dirs{directions}')
for ship in self.restrain_ships_generator():
# ブロックできそうなshipはいる?
d = calculate_distance(ship.position, target_position)
# 正なら西にship, 東にshipyard
dx = rotated_diff_position(ship.position[0], self.shipyard.position[0])
# 正なら南にship, 北にshipyard
dy = rotated_diff_position(ship.position[1], self.shipyard.position[1])
score = 0
target_opponent_ship = None
next_direction = None
can_stop = a.board.cells[ship.position].halite < 3.99
condition = ''
for opponent_ship, dx_opponent, dy_opponent, directions in opponent_ships:
d_min_shipyard = a.opponent_shipyard_distances[opponent_ship.id]['min']
d_shipyard = a.opponent_shipyard_distances[opponent_ship.id][self.shipyard_id]
if d_min_shipyard < d_shipyard:
continue
# 正なら西にally 東にop
dx_ships = dx - dx_opponent
# 正なら南にally 北にop
dy_ships = dy - dy_opponent
condition = f'dxs{dx_ships} dys{dy_ships}'
abs_dx_ships = abs(dx_ships)
abs_dy_ships = abs(dy_ships)
if directions[I_NORTH] and 0 < dy < dy_opponent:
# 北 shipyard ally op 南
if abs_dx_ships <= abs_dy_ships: # x軸合わせ間に合う
score = 1
target_opponent_ship = opponent_ship
condition += f' NORTH('
if abs_dx_ships == 0:
condition += f' abs_dx_ships0'
if 1 < abs_dy_ships:
condition += f' 1<abs_dy_ships'
next_direction = I_SOUTH
elif can_stop:
condition += f' can_stop'
next_direction = I_MINE
elif dx < 0: # 西にshipyard
condition += f' dx<0'
next_direction = I_WEST
elif 0 < dx: # 東にshipyard
condition += f' 0<dx'
next_direction = I_EAST
elif 1 < dy:
condition += f' 1<dy'
next_direction = I_NORTH
else: # 運ゲー
condition += f' good_luck'
next_direction = I_EAST
elif dx_ships < 0: # 西にop
condition += f' dx_ships<0'
next_direction = I_WEST
else:
condition += f' 0<dx_ships'
next_direction = I_EAST
condition += f')'
break
if directions[I_SOUTH] and dy_opponent < dy < 0:
# 北 op ally shipyard 南
if abs_dx_ships <= abs_dy_ships: # x軸合わせ間に合う
score = 1
target_opponent_ship = opponent_ship
condition += f' SOUTH('
if abs_dx_ships == 0:
condition += f' abs_dx_ships0'
if 1 < abs_dy_ships:
condition += f' 1<abs_dy_ships'
next_direction = I_NORTH
elif can_stop:
condition += f' can_stop'
next_direction = I_MINE
elif dx < 0: # 西にshipyard
condition += f' dx<0'
next_direction = I_WEST
elif 0 < dx: # 東にshipyard
condition += f' 0<dx'
next_direction = I_EAST
elif dy < -1:
condition += f' dy<-1'
next_direction = I_SOUTH
else: # 運ゲー
condition += f' good_luck'
next_direction = I_EAST
elif dx_ships < 0: # 西にop
condition += f' dx_ships<0'
next_direction = I_WEST
else:
condition += f' 0<dx_ships'
next_direction = I_EAST
condition += f')'
break
if directions[I_WEST] and dx_opponent < dx < 0:
# 東 op ally shipyard 西
if abs_dy_ships <= abs_dx_ships: # y軸合わせ間に合う
score = 1
target_opponent_ship = opponent_ship
condition += f' WEST('
if abs_dy_ships == 0:
condition += f' abs_dy_ships0'
if 1 < abs_dx_ships:
condition += f' 1<abs_dx_ships'
next_direction = I_EAST
elif can_stop:
condition += f' can_stop'
next_direction = I_MINE
elif dy < 0: # 南にshipyard
condition += f' dy<0'
next_direction = I_SOUTH
elif 0 < dy: # 北にshipyard
condition += f' 0<dy'
next_direction = I_NORTH
elif dx < -1:
condition += f' dx<-1'
next_direction = I_WEST
else: # 運ゲー
condition += f' good_luck'
next_direction = I_NORTH
elif dy_ships < 0: # 南にop
condition += f' dy_ships<0'
next_direction = I_SOUTH
else: # 北にop
condition += f' 0<dy_ships'
next_direction = I_NORTH
condition += f')'
break
if directions[I_EAST] and 0 < dx < dx_opponent:
# 東 shipyard ally op 西
if abs_dy_ships <= abs_dx_ships: # y軸合わせ間に合う
score = 1
target_opponent_ship = opponent_ship
condition += f' EAST('
if abs_dy_ships == 0:
condition += f' abs_dy_ships0'
if 1 < abs_dx_ships:
condition += f' 1<abs_dx_ships'
next_direction = I_WEST
elif can_stop:
condition += f' can_stop'
next_direction = I_MINE
elif dy < 0: # 南にshipyard
condition += f' dy<0'
next_direction = I_SOUTH
elif 0 < dy: # 北にshipyard
condition += f' 0<dy'
next_direction = I_NORTH
elif 1 < dx:
condition += f' 1<dx'
next_direction = I_EAST
else: # 運ゲー
condition += f' good_luck'
next_direction = I_NORTH
elif dy_ships < 0: # 南にop
condition += f' dy_ships<0'
next_direction = I_SOUTH
else: # 北にop
condition += f' 0<dy_ships'
next_direction = I_NORTH
condition += f')'
break
to_sort.append((-score, d, dx, dy, ship.id, ship, target_opponent_ship, next_direction, condition))
if len(to_sort) == 0:
return True
to_sort = sorted(to_sort)
negative_score, d, dx, dy, ship_id, ship, target_opponent_ship, next_direction, condition = to_sort[0]
score = -negative_score
opponent_ship_id = None if target_opponent_ship is None else target_opponent_ship.id
a.log(id_=ship_id, s=f'prj={self.project_id} {target_position} score{score} d{d} dx{dx} dy{dy}, op{opponent_ship_id} next_dir{next_direction} op_return_dir{self.opponent_return_directions} {condition}')
self.dismiss_project(staff_ids=list(self.ships.keys()))
role = 'escape'
if next_direction is None: # target_position へ移動する
mode = 'escape'
a.moving_ship_strategy(ship=ship, position=target_position, mode=mode, mine_threshold=None)
else:
mode = 'cancel'
position = calculate_next_position(ship.position, next_direction)
a.moving_ship_strategy(ship=ship, position=position, mode=mode, mine_threshold=None)
self.join_project(staff_ids=[ship_id], role=role)
return True
class EscortProject(Project):
"""
対象のshipを守りながら移動
対象自体は別Projectに属しているならそちら優先
None ならまとめて管理 (defender に逃げ道潰されるのを防ぐため)
"""
def __init__(self, target_ship_id, *args, defender_ship_id=None, **kwargs):
self.target_ship_id = target_ship_id
self.shipyard = None # target_shipが帰還する予定のshipyard
self.nearest_shipyard_id = None # 同上
self.defender_ship_id = defender_ship_id
self.is_final = False
super().__init__(*args, project_id=f'escort{self.target_ship_id}', **kwargs)
def has_myself(self):
return self.target_ship_id in self.ships.keys()
def schedule(self):
super().schedule()
self.shipyard = None
a = self.agent
target_ship = a.board.ships.get(self.target_ship_id, None)
if not target_ship:
return False
i_target, j_target = position_to_ij(target_ship.position)
project_id = a.belonging_project.get(self.target_ship_id, None)
is_project_mine = ((project_id is not None) and (project_id[:4] == 'mine'))
self.can_safely_deposit = False
if 0 == target_ship.halite: # 普通護衛不要
if is_project_mine:
self.priority = 0.001
else:
self.priority = -1.0
self.can_safely_deposit = True
elif self.ships.get(self.target_ship_id, None): # 自身が属している=帰還したい
self.priority = (1.0 + target_ship.halite * 200)
else: # 護衛のみ
self.priority = (1.0 + target_ship.halite * 100) / 1000
self.shipyard, self.d_target_staff = a.find_nearest_ally_shipyard(target_ship.position)
condition = ''
if self.shipyard:
self.target_staff = self.shipyard
target_staff_id = self.target_staff.id if self.target_staff else None
condition += 'shipyard'
else:
self.target_staff, self.d_target_staff = a.find_leader_ship(target_ship.position)
condition += 'leader_ship'
target_staff_id = self.target_staff.id if self.target_staff else None
a.log(s=f'prj={self.project_id} {condition} target_staff_id={target_staff_id}')
if self.shipyard and (not is_project_mine):
# 間に合うか判定するのだが、
# mine目的の時は今間に合っても暫く掘ると間に合わないかもしれない
d_to_shipyard = calculate_distance(target_ship.position, self.shipyard.position)
i_shipyard, j_shipyard = position_to_ij(self.shipyard.position)
opponent_reach_shipyard = int(1e-6 + a.scores[I_SCORE_OPPONENT_REACH, i_shipyard, j_shipyard])
i_target, j_target = position_to_ij(target_ship.position)
opponent_reach_target = int(1e-6 + a.scores[I_SCORE_OPPONENT_REACH, i_target, j_target])
if d_to_shipyard < opponent_reach_shipyard:
# 間に合う
self.can_safely_deposit = True
elif d_to_shipyard <= 6 and d_to_shipyard < opponent_reach_target + opponent_reach_shipyard:
# shipyard の反対側っぽいのでおおよそ安全
a.log(id_=self.target_ship_id, s=f'{target_ship.position} EscortProject: target can ALMOST safely deposit to {self.shipyard.id}{self.shipyard.position}')
self.can_safely_deposit = True
original_defender_ship_id = self.defender_ship_id
defender_ship = a.board.ships.get(self.defender_ship_id, None)
if defender_ship: # 現職の検証
defender_ship_project_id = a.belonging_project.get(defender_ship.id, None)
d = calculate_distance(defender_ship.position, target_ship.position)
if (5 < d
or 0 < defender_ship.halite
or ((defender_ship_project_id is not None) and defender_ship_project_id != self.project_id)):
# 不適当または雇えない
self.dismiss_project(staff_ids=[self.defender_ship_id])
elif self.defender_ship_id:
self.dismiss_project(staff_ids=[self.defender_ship_id])
if self.schedule_final(target_ship):
self.is_final = True
self.priority = 9999999.0 + target_ship.halite
else:
self.is_final = False
if self.can_safely_deposit:
# 一旦解散しつつpriority下げて様子見
self.priority *= 0.01
self.dismiss_project(staff_ids=list(self.ships.keys()))
elif self.priority < 0.0:
self.dismiss_project(staff_ids=list(self.ships.keys()))
# a.log(id_=original_defender_ship_id, s=f'prj={self.project_id} prio{self.priority:.3f} def{self.defender_ship_id} belong{a.belonging_project.get(original_defender_ship_id, None)}')
return True
def update_defender_ship_id(self):
self.defender_ship_id = None
for ship_id, role in self.ships.items():
if role == 'defender':
self.defender_ship_id = ship_id
def join_project(self, *args, **kwargs):
super().join_project(*args, **kwargs)
self.update_defender_ship_id()
def dismiss_project(self, *args, **kwargs):
super().dismiss_project(*args, **kwargs)
self.update_defender_ship_id()
def schedule_final(self, target_ship):
a = self.agent
if a.board.step <= 359:
return False
if a.determined_ships.get(self.target_ship_id, None) is not None:
return False
# 1歩進むのに倍ぐらいかかると予想
# ラストescapeだと2手失うので1手余分に確保
steps = 1 + self.d_target_staff + max(0, self.d_target_staff - 3)
if a.board.step + steps < 399:
return False
return True
def run(self):
super().run()
a = self.agent
target_project_id = a.belonging_project.get(self.target_ship_id, None)
is_defense_shipyard_project = ((target_project_id is not None) and (target_project_id[:10] == 'defense_yd'))
if self.priority < 0.0 or is_defense_shipyard_project:
self.dismiss_project(staff_ids=list(self.ships.keys()))
return True
if self.defender_ship_id and a.determined_ships.get(self.defender_ship_id, None):
return True # MineProject がrunしていることがある
target_ship = a.board.ships[self.target_ship_id]
determined = a.determined_ships.get(self.target_ship_id, None)
if determined is None:
# target_ship 未行動なら自身も加入させる
self.join_project(staff_ids=[self.target_ship_id], role='target')
defender = None
if self.can_safely_deposit: # defender 不要
if self.defender_ship_id is not None:
self.dismiss_project(staff_ids=[self.defender_ship_id])
else:
if self.defender_ship_id is not None:
# HuntProjectに奪われているかもしれないので検証
defender_ship_project_id = a.belonging_project.get(self.defender_ship_id, None)
if ((defender_ship_project_id is not None)
and defender_ship_project_id != self.project_id):
self.defender_ship_id = None
if self.defender_ship_id is not None:
defender = a.board.ships[self.defender_ship_id]
else:
to_sort = []
for ship in self.ships_generator(with_free=True):
if 0 < ship.halite:
continue
d = calculate_distance(ship.position, target_ship.position)
if 5 < d:
continue
to_sort.append((ship, d, ship.id))
if not to_sort: # 誰もいませんでした
if not self.is_final:
return True
else:
to_sort = sorted(to_sort, key=itemgetter(1, 2))
defender = to_sort[0][0]
self.defender_ship_id = defender.id
if defender:
self.join_project(staff_ids=[defender.id], role='defender')
# target_ship を先に移動
if determined is None:
mode = 'escape'
mine_threshold = 3.99
if self.is_final:
if target_ship.halite == 0 and 5 <= len(a.board.current_player.ships):
# shipyard_attack
opponents = []
for player_id in range(PLAYERS):
if player_id != a.player_id:
opponents.append(player_id)
opponent_shipyard, d_opponent_shipyard = a.find_nearest_shipyard(
target_ship.position,
player_ids=opponents)
if opponent_shipyard:
target_position = opponent_shipyard.position
mode = 'cancel'
mine_threshold = None
else:
target_position = target_ship.position
mode = 'escape'
mine_threshold = None
else:
target_position = self.target_staff.position
if 0 < target_ship.halite and self.d_target_staff <= 3:
mode = 'merge'
mine_threshold = None
elif self.shipyard:
target_position = self.shipyard.position
elif defender:
target_position = defender.position
else:
target_position = target_ship.position
if 398 <= a.board.step and 500 < target_ship.halite and 1 < self.d_target_staff:
a.reserve_ship(target_ship, ShipAction.CONVERT)
a.log(id_=target_ship.id, s=f'h{target_ship.halite} convert at last turn')
else:
a.moving_ship_strategy(
target_ship, position=target_position,
mode=mode, mine_threshold=mine_threshold)
a.log(id_=target_ship.id, s=f'{target_ship.position}->{target_position} by myself in EscortProject is_final={self.is_final} mode={mode}')
else:
target_position = None
if defender:
a.log(id_=defender.id, s=f'{defender.position} escort to {target_ship.id}{target_ship.position} is_final={self.is_final}')
a.log(id_=target_ship.id, s=f'{target_ship.position} escorted by {defender.id}{defender.position} is_final={self.is_final}')
qs = np.ones((LEN_MOVE, LEN_MOVE), dtype=np.float32)
mode = 'escape'
if determined is None and 0 < target_ship.halite:
# 帰ろうとしているのでcancelも辞さない
mode = 'cancel_without_shipyard'
for i_action, cell_i in enumerate(a.neighbor_cells(a.board.cells[target_ship.position])):
# cell_i: target_ship の次step移動先
# target_position_i: target_shipの目標移動先
if target_position:
target_position_i = target_position
else:
target_position_i = cell_i.position
# d_i = calculate_distance(defender.position, cell_i.position)
target_d_i = calculate_distance(target_position_i, cell_i.position)
# target_position よりも先回りする位置を目標地点とする
to_sort_k = []
for k_action, cell_k in enumerate(a.neighbor_cells(cell_i)):
target_d_k = calculate_distance(target_position_i, cell_k.position)
d_k = calculate_distance(defender.position, cell_k.position)
to_sort_k.append((target_d_k, d_k, k_action, cell_k))
target_d_k, d_k, k_action, cell_k = sorted(to_sort_k)[0]
if d_k <= 1: # 目標地点到達するときは先回りしているので相殺してよい
mode_i = mode
else: # 遠いときはcancelしても効果が薄い
mode_i = 'escape'
q_i, forced = a.calculate_moving_ship_preference(
ship=defender, position=cell_k.position,
mode=mode_i, mine_threshold=None)
a.log(id_=defender.id, s=f'[{i_action}{cell_i.position}] ka{k_action}{cell_k.position} tdk{target_d_k} dk{d_k} mode={mode_i}')
qs[i_action, :] = q_i
a.reserve_ship_by_q(
ship=defender, q=qs, depend_on=target_ship)
return True
MINE_PRIORITY_BY_DISTANCE = [100.0 * (0.8**t) for t in range(99)]
def mine_priority_by_distance(d):
if 0 <= d < len(MINE_PRIORITY_BY_DISTANCE):
return MINE_PRIORITY_BY_DISTANCE[d]
return 0.0
class MineProject(Project):
"""
位置を指定して掘る
"""
def __init__(self, position, *args, **kwargs):
super().__init__(*args, project_id=f'mine_{position[0]}_{position[1]}', **kwargs)
self.position = position
self.i, self.j = position_to_ij(position)
self.cell = None
self.halite = 0.0
self.halite_threshold = 80.0
self.ally_reach = None
self.empty_ally_reach = None
self.ally_reach_v2 = None
self.opponent_reach = None
self.empty_opponent_reach = None
self.opponent_reach_v2 = None
self.elapsed_steps = 0
self.neighbor_empty_opponent_counter = 0
self.early_game_threshold = 50 # defender なしで project 成立するターン数
self.last_swapped_step = -1
self.last_mined_player_id = self.agent.player_id
def run_miner(self, miner, forced=False):
a = self.agent
overwrite = False
determined = a.determined_ships.get(miner.id, None)
if determined is not None:
overwrite = True
if (not forced) or (determined != 'reserved'):
a.log(loglevel='warning', id_=miner.id, s=f'prj={project_id} run_miner failed because determined{determined} or not forced')
exit()
return
priority, ship_id, q, ship, forced_, depend_on = a.reserving_ships[miner.id]
del a.reserving_ships[miner.id]
else:
priority = None
depend_on = None
previous_project_id = a.belonging_project.get(miner.id, None)
if previous_project_id is None:
self.join_project(staff_ids=[miner.id], role='miner')
elif previous_project_id != self.project_id:
self.join_project(staff_ids=[miner.id], role='miner', forced=True)
miner_d = calculate_distance(miner.position, self.position)
target_cell = a.board.cells[self.position]
miner_cell = a.board.cells[miner.position]
p_success = 1.0
if miner_d == 0:
mode = 'mine'
q, forced_ = a.calculate_moving_ship_preference(
miner, position=self.position,
mode=mode, mine_threshold=self.mining_halite_threshold)
elif miner_d == 1: # 相殺で突っ込むべきか判断する
opponent_exists = np.zeros(PLAYERS, dtype=np.int32)
opponent_mining = False
wait_opponent_mining = False
for k_action, cell_k in enumerate(a.neighbor_cells(target_cell)):
if cell_k.ship is None:
continue
ship_k = cell_k.ship
if ship_k.player_id == a.player_id:
continue
if ship_k.halite < miner.halite: # 無理
a.log(s=f'prj={self.project_id} k{k_action} ship_k{ship_k.id} h{ship_k.halite}')
self.d_halite = 0.0
return True
if ship_k.halite == miner.halite:
if k_action == 0: # 先着されているので待って追い出したい
opponent_mining = True
if miner_cell.halite <= 3.99 or (100. < miner_cell.halite < self.halite): # 掘りあったら有利
wait_opponent_mining = True
a.log(id_=miner.id, loglevel='info', s=f'prj={self.project_id} k{k_action} ship_k{ship_k.id} th{self.halite} miner_cellh{miner_cell.halite} op_mining={opponent_mining} wait_op_mining={wait_opponent_mining}')
opponent_exists[ship_k.player_id] = 1
p_stay = np.ones(PLAYERS, dtype=np.float32)
# 敵が突っ込んでくる確率を見積り、nash均衡よりも高そうだったらnash均衡、そうでなければ突っ込む
p_nash = both_move_to_halite_nash_equilibrium(target_cell.halite)
for player_id in range(PLAYERS):
if not opponent_exists[player_id]:
continue
t, u = a.opponent_history[ship_k.player_id].get('cancel_both_move_to_mine', [0, 0])
if u == 0: # 初手は様子見
p_stay[player_id] = 0.0
else:
p_stay[player_id] = (u - t) / u
p_opponent_go = 1.0 - np.prod(p_stay)
if opponent_mining: # 掘らせておこう
p_success = 0.0
elif p_opponent_go < 1e-6 + p_nash: # 敵は保守的なのでこっちはちょっと攻めよう
p_success = 2.0 * p_nash
else: # nash均衡にしておく
p_success = p_nash
if np.random.rand() < p_success:
mode = 'cancel'
else:
mode = 'escape'
if np.any(opponent_exists):
a.log(id_=miner.id, s=f'prj={self.project_id} h{miner.halite} p_nash{p_nash:.6f} p_stay{p_stay} p_opponent_go{p_opponent_go:.6f} p_success{p_success:.6f} mode={mode} op_mining={opponent_mining} wait_op_mining={wait_opponent_mining}')
if wait_opponent_mining: # 相殺覚悟で隣待ち
mode = 'cancel'
q, forced_ = a.calculate_moving_ship_preference(
miner, position=miner.position,
mode=mode, mine_threshold=3.99)
else:
q, forced_ = a.calculate_moving_ship_preference(
miner, position=self.position,
mode=mode, mine_threshold=self.mining_halite_threshold)
elif miner_d == 2 and self.empty_opponent_reach == 0:
# 相殺覚悟で突っ込みさっさと追い払う
mode = 'cancel'
q, forced_ = a.calculate_moving_ship_preference(
miner, position=self.position,
mode=mode, mine_threshold=None if miner.halite == 0 else 300.0)
else:
mode = 'escape'
q, forced_ = a.calculate_moving_ship_preference(
miner, position=self.position,
mode=mode, mine_threshold=None if miner.halite == 0 else 300.0)
a.reserve_ship_by_q(miner, q=q, forced=False, priority=priority, depend_on=depend_on)
self.update_score()
a.log(id_=miner.id, s=f'run_miner h{miner.halite} {miner.position}->{self.position} h{self.halite} prj={previous_project_id}->{self.project_id} prio{self.priority:.1f} noc{self.neighbor_empty_opponent_counter} h_thre{self.halite_threshold} mh_thre{self.mining_halite_threshold} mode={mode}')
def swap_mine_project(self, miner, defender, escort_project):
"""
役割スワップでターン節約
miner, reserved, old_minerの就職先, 新たなminerに対応するescort_project
"""
a = self.agent
if a.board.step - 1 <= self.last_swapped_step:
# 連続してswapするとデッドロックしうるので回避
a.log(s=f'swap failed. last_swapped_step{self.last_swapped_step}')
return miner, False, self, escort_project # miner, defender, reserved, old_miner_project, old_defender_project
defender_ship_id = defender.id if defender else None
to_sort = []
d0 = calculate_distance(miner.position, self.position)
condition = f'ground{self.position} miner_id={miner.id}{miner.position} d0={d0}'
for k_action, cell_k in enumerate(a.neighbor_cells(a.board.cells[miner.position])):
if k_action == 0:
continue
ship_k = cell_k.ship
if not ship_k:
continue
condition += f' [{k_action}] {ship_k.id}'
determined = a.determined_ships.get(ship_k.id)
reserved = None
if determined is None:
condition += f' not_reserved'
reserved = False
elif determined == 'reserved':
condition += f' reserved'
reserved = True
else:
condition += f' determined.'
continue
if ship_k.player_id != a.player_id:
condition += f' p_diff.'
continue
if miner.halite != ship_k.halite:
condition += f' h_diff.'
continue # can_offerが一致するかわからないので金額を同じとき限定にしておく
project_id_k = a.belonging_project.get(ship_k.id, None)
condition += f' prj={project_id_k}'
if project_id_k is None:
continue
project_k = a.projects.get(project_id_k, None)
if project_k is None:
condition += f' prj_not_found.'
continue
target_position = None
new_mine_project_k = None
if project_id_k == f'escort{miner.id}':
# 自分自身をescortしているdefenderとの交換だけにする
if (defender is None):
condition += f' WARN_defender=None.'
continue # 指定されているはずなんだが
if (project_k.defender_ship_id is not None and project_k.defender_ship_id != defender_ship_id):
condition += f' WARN_defender_ship_id={project_k.defender_ship_id}!={defender_ship_id}.'
continue # 指定されているはずなんだが
target_ship = a.board.ships.get(project_k.target_ship_id, None)
if target_ship is None or target_ship.id != miner.id:
condition += f' target_ship_not_found.'
continue
target_position = target_ship.position
condition += f' target_ship{target_ship.id}{target_position}'
priority = 10.0
elif project_id_k[:4] == 'mine':
target_position = project_k.position
condition += f' {target_position}'
priority = 100.0
new_mine_project_k = project_k
else:
condition += f' unsuitable_prj'
continue
d_k = calculate_distance(ship_k.position, target_position)
d0_k = calculate_distance(ship_k.position, self.position)
d = calculate_distance(miner.position, target_position)
condition += f' d0{d0} dk{d_k} d{d} d0k{d0_k}'
if d0 + d_k < d + d0_k:
condition += f' far.'
priority += 1000.
continue # 交換後合計が遠くなるならやらない
elif d0 + d_k == d + d0_k and max(d0, d_k) <= max(d, d0_k):
condition += f' imbal.'
continue # 遠いほうの距離を減らしたい
priority += d_k
to_sort.append((priority, ship_k.id, ship_k, new_mine_project_k, target_position, reserved))
if not to_sort:
a.log(id_=miner.id, s=f'prj={self.project_id} swap no_candidates. cond=({condition})')
return miner, False, self, escort_project
# join, dismiss 処理をする
priority, swapped_ship_id, swapped_ship, new_mine_project, target_position, reserved = sorted(to_sort, reverse=True)[0]
ship_role = self.ships[miner.id]
self.dismiss_project(staff_ids=[miner.id])
if new_mine_project is None: # defenderとの交換
if defender and swapped_ship_id == defender_ship_id:
# swapped_shipをtarget_shipとするescort_projectへ変更する
escort_project = a.projects.get(f'escort{swapped_ship_id}', None)
if escort_project:
escort_project.join_project(staff_ids=[miner.id], role='defender')
condition += f' join_to_escort_prj={escort_project.project_id}'
return defender, False, escort_project, escort_project
else:
condition += ' WARN_new_mine_project=None swapped_ship_id{swapped_ship_id} defender_ship_id{defender_ship_id}.'
a.log(id_=miner.id, s=f'prj={self.project_id} swap_failed. cond=({condition})')
return miner, False, self, escort_project
# miner 同士の交換
condition += ' minerXminer'
swapped_ship_escort_project = a.projects.get(f'escort{swapped_ship_id}', None)
swapped_defender = None
if swapped_ship_escort_project:
swapped_defender_ship_id = swapped_ship_escort_project.defender_ship_id
swapped_defender = a.board.ships.get(swapped_defender_ship_id, None)
if swapped_defender:
condition += f' has_swapped_defender{swapped_defender_ship_id}_{a.belonging_project.get(swapped_defender_ship_id, None)}'
else:
condition += 'no_swapped_defender'
if defender_ship_id:
# 自分に護衛がいるなら、相手にも護衛が確定していないと相手だけ解散して無駄足になりうる
condition += f'has_defender{defender_ship_id}'
if not swapped_defender:
condition += f' no_swapped_defender'
a.log(id_=miner.id, s=f'prj={self.project_id} swap_failed. cond=({condition})')
return miner, False, self, escort_project
elif swapped_defender_ship_id:
# 自分に護衛がいないなら、相手にも護衛がいないときだけにしておく
return miner, False, self, escort_project
swapped_ship_role = new_mine_project.ships[swapped_ship_id]
new_mine_project.dismiss_project(staff_ids=[swapped_ship_id])
self.join_project(staff_ids=[swapped_ship_id], role=ship_role)
new_mine_project.join_project(staff_ids=[miner.id], role=swapped_ship_role)
condition += f' join_to_prj={new_mine_project.project_id}'
a.log(id_=miner.id, s=f'prj={self.project_id} swapped. s{miner.id}(prj={a.belonging_project.get(miner.id, None)}) s{swapped_ship_id}({a.belonging_project.get(swapped_ship_id, None)}) d0{d0} dk{d_k} target{target_position} cond({condition})')
a.log(id_=swapped_ship_id, s=f'prj={self.project_id} swapped. s{miner.id}(prj={a.belonging_project.get(miner.id, None)}) s{swapped_ship_id}({a.belonging_project.get(swapped_ship_id, None)}) d0{d0} dk{d_k} target{target_position} cond({condition})')
self.last_swapped_step = a.board.step
return swapped_ship, reserved, new_mine_project, swapped_ship_escort_project
def mine_project_ships_generator(self, max_d):
a = self.agent
for ship in a.sorted_ships:
if a.determined_ships.get(ship.id, None) is not None:
continue
d = calculate_distance(ship.position, self.position)
if max_d < d:
continue
if d == 0 or 0 < ship.halite:
neighbor_opponent_count = 0
for k_action, cell_k in enumerate(a.neighbor_cells(a.board.cells[ship.position])):
i_k, j_k = position_to_ij(cell_k.position)
if 0.5 < a.scores[I_SCORE_EMPTY_OPPONENT_D2, i_k, j_k]:
neighbor_opponent_count += 1
if 3 <= neighbor_opponent_count: # 囲まれそう
continue
if self.shipyard:
nearest_shipyard, d_shipyard = a.find_nearest_shipyard(ship.position)
if nearest_shipyard is not None and nearest_shipyard.id != self.shipyard.id:
if len(a.ships_by_shipyard[nearest_shipyard.id]) <= 2:
continue # 過疎地帯からは引っ張ってこない
project_id = a.belonging_project.get(ship.id, None)
if project_id is None:
yield ship, d
continue
project = a.projects.get(project_id, None)
if project is None:
yield ship, d
elif project_id == self.project_id:
yield ship, d
elif project_id[:4] == 'mine':
# 距離近めでhalite十分に多かったら上書き可能とする
d_other = calculate_distance(ship.position, project.position)
dd = d - d_other
threshold = 99999.0
if d <= d_other:
threshold = 1.0
elif d <= d_other + 1:
threshold = 70.0
elif d <= d_other + 2:
threshold = 140.0
else:
threshold = 99999.0
if project.d_halite + threshold < self.d_halite:
a.log(f'prj={self.project_id} other_prj={project_id} d{d} d_other{d_other} dh{self.d_halite} dh_other{project.d_halite}')
yield ship, d
def update_score(self):
a = self.agent
if 0 == len(self.ships):
return
f = a.flags
neighbor_positions = NEIGHBOR_POSITIONS[3][self.i, self.j]
for i_k, j_k, d_k in neighbor_positions:
if d_k < 2:
f.set(I_FLAG_MINE_D2, i=i_k, j=j_k)
if d_k < 3:
f.set(I_FLAG_MINE_D3, i=i_k, j=j_k)
if d_k < 4:
f.set(I_FLAG_MINE_D4, i=i_k, j=j_k)
def update_halite_threshold(self):
a = self.agent
step = a.board.step
if a.previous_board: # 掘られたかチェック
previous_cell = a.previous_board.cells[self.position]
cell = a.board.cells[self.position]
if cell.halite + 1e-6 < previous_cell.halite:
if previous_cell.ship:
self.last_mined_player_id = previous_cell.ship.player_id
ally_d = 99999
opponent_d = 99999
condition = f'last_p{self.last_mined_player_id}'
for shipyard in a.board.shipyards.values():
d = calculate_distance(self.position, shipyard.position)
if shipyard.player_id == a.player_id:
if d < ally_d:
ally_d = d
elif d < opponent_d:
opponent_d = d
danger_zone = a.scores[I_SCORE_DANGER_ZONE, self.i, self.j]
hunt_zone = a.scores[I_SCORE_HUNT_ZONE_IN, self.i, self.j]
condition += f' ally_d{ally_d} op_d{opponent_d} danger_zone{danger_zone:.0f} hunt_zone{hunt_zone:.0f}'
nearest_ally_shipyard = a.nearest_ally_shipyard[self.i][self.j]
if nearest_ally_shipyard:
len_ships = len(a.ships_by_shipyard.get(nearest_ally_shipyard.id, []))
else:
len_ships = 0
if ally_d <= opponent_d and danger_zone < 0.5:
# 我々の土地
if 3 <= len_ships and 0.5 < hunt_zone and self.last_mined_player_id == a.player_id:
if step < 20:
self.halite_threshold = 200.
elif step < 270:
self.halite_threshold = 200.0
else:
self.halite_threshold = 20.0
elif ally_d + 1 < opponent_d and ally_d <= 2 and self.last_mined_player_id == a.player_id:
# shipyard至近距離 安全なので増やす
if step < 20:
self.halite_threshold = 140.
elif step < 270:
self.halite_threshold = 160.0 if ally_d <= 1 else 100.0
else:
self.halite_threshold = 20.0
else:
if step < 100:
self.halite_threshold = 120.
elif step < 200:
self.halite_threshold = 70.0
elif step < 270:
self.halite_threshold = 50.0
else:
self.halite_threshold = 20.0
else:
# 敵地なので吸いつくす
if step < 100:
self.halite_threshold = 100.
elif step < 270:
self.halite_threshold = 30.0
else:
self.halite_threshold = 20.0
# 掘り始めたら1回余分に掘る
self.mining_halite_threshold = self.halite_threshold * 0.7
condition += f' h_thre{self.halite_threshold} mh_thre{self.mining_halite_threshold}'
return condition
def calculate_minable_halite(self, advantage):
a = self.agent
halite = self.halite
disadvantage = -min(0, advantage)
remaining_halite = self.halite * (0.75**disadvantage)
if self.halite_threshold < halite:
return halite - self.mining_halite_threshold, 'enough_h'
elif self.mining_halite_threshold < halite:
# 掘り始めたらしっかり掘る
ship = a.board.cells[self.position].ship
if ship and ship.player_id == a.player_id and 0 < ship.halite:
return halite - self.mining_halite_threshold, 'thre_h_ok'
else:
return 0.0, 'thre_h_ng'
else:
return 0.0, 'short_h'
def schedule(self):
super().schedule()
a = self.agent
self.maintain_dead_staffs()
self.cell = a.board.cells[self.position]
self.halite = self.cell.halite
if self.halite < 1e-6:
return False
condition = ''
self.priority = 1.0
self.shipyard, self.d_shipyard = a.find_nearest_shipyard(self.position)
condition += self.update_halite_threshold()
len_ships = len(a.board.current_player.ships)
# 間に合わないなら掘らない
if self.shipyard is None:
condition += f' shipyard=None'
if 389 < a.board.step:
condition += f' 389<step.'
self.priority = -1.0
elif len_ships <= 1 and a.board.current_player.halite < 2 * MAX_HALITE: # もう拠点を構えるのは不可能
condition += f' escape_only len_ships{a.len_ships}.'
# a.log(loglevel='WARNING', s=f'cond({condition})')
self.priority = -1.0
elif len_ships <= 1 and a.board.current_player.halite < 2 * MAX_HALITE: # 逃げたほうが良い
# condition += f' escape_only_with_yd len_ships{a.len_ships}.'
# a.log(loglevel='WARNING', s=f'cond({condition})')
self.priority = -1.0
elif 399 < a.board.step + self.d_shipyard:
condition += f' 399<step+{self.d_shipyard}.'
self.priority = -1.0
elif not a.flags.get(I_FLAG_GO_HOME_STRAIGHT, i=self.i, j=self.j):
# 迂回が必要なら優先度下げる
condition += f' must_detour'
self.priority *= 0.1
pass
else:
d_straight = calculate_distance(self.position, self.shipyard.position)
detour_advantage = a.scores[I_SCORE_DETOUR_ADVANTAGE, self.i, self.j]
condition += f' d_straight{d_straight} detour_adv{detour_advantage:.0f}'
if 2 < d_straight and detour_advantage < -3.5:
# 敵の強いところは避ける
condition += ' detour_disadv'
self.priority *= 0.1
# self.priority = -1.0
if self.priority < -1e-6:
self.reset_project()
a.log(f'prj={self.project_id} cond=({condition})')
return True
# shipyard無かったらどこかにできるかもしれないので継続
if 20 < self.elapsed_steps: # timeout 膠着状態かも
condition += ' timeout'
self.reset_project()
# 敵とのレース
self.ally_reach = int(1e-6 + a.scores[I_SCORE_ALLY_REACH, self.i, self.j])
self.empty_ally_reach = int(1e-6 + a.scores[I_SCORE_EMPTY_ALLY_REACH, self.i, self.j])
self.ally_reach_v2 = min(self.ally_reach + 1, self.empty_ally_reach)
self.opponent_reach = int(1e-6 + a.scores[I_SCORE_OPPONENT_REACH, self.i, self.j])
self.empty_opponent_reach = int(1e-6 + a.scores[I_SCORE_EMPTY_OPPONENT_REACH, self.i, self.j])
self.opponent_reach_v2 = min(self.opponent_reach + 1, self.empty_opponent_reach)
condition += f' a{self.ally_reach}_ea{self.empty_ally_reach}_o{self.opponent_reach}_eo{self.empty_opponent_reach}'
if self.opponent_reach_v2 <= self.ally_reach_v2:
# 敵が同時/先に着く場合、掘られた後 empty_ship で場所を取り返す
advantage = -max(1, self.empty_ally_reach - 1 - self.opponent_reach)
condition += f' op_territory'
else:
advantage = 0
self.d_halite, condition_t = self.calculate_minable_halite(advantage)
condition += f' adv{advantage} hth{self.halite_threshold} mth{self.mining_halite_threshold} dh{int(self.d_halite)} {condition_t}'
if self.d_halite < 1e-6:
self.priority = -1.0
else:
self.priority *= self.d_halite
# 近いほど優先度高い
ally_reach = int(1e-6 + a.scores[I_SCORE_ALLY_REACH, self.i, self.j])
self.priority *= mine_priority_by_distance(self.ally_reach)
if a.board.step < 2:
self.priority = -1.0
condition += ' too_early_game'
has_neighbor_empty_opponent = False
for i_action, cell_i in enumerate(a.neighbor_cells(self.cell)):
shipyard = cell_i.shipyard
if shipyard and shipyard.player_id != a.player_id:
# 敵shipyardの隣は優先度下げる
self.priority *= 0.1
ship = cell_i.ship
if not ship:
continue
if ship.player_id == a.player_id:
if i_action != 0:
continue
project_id = a.belonging_project.get(ship.id, None)
# if project_id == self.project_id or project_id is None:
# 既に掘れるなら優先度上げる (他の移動に邪魔されないようにするため, ただしreserving_ships実装してからはこれだけだと不十分)
# pass # ally_reach でもう上げている
elif i_action == 0: # 敵に先着された
condition += ' op_reach'
# self.priority = -1.0
elif ship.halite == 0:
condition += ' op_neighbor'
has_neighbor_empty_opponent = True
if has_neighbor_empty_opponent:
self.neighbor_empty_opponent_counter += 1
# if 3 <= self.neighbor_empty_opponent_counter:
# self.priority = -1.0 # お見合い膠着回避
else:
self.neighbor_empty_opponent_counter = 0
if self.priority < 0.0:
condition += ' failed'
self.reset_project()
a.log(f'prj={self.project_id} schedule prio{self.priority:.1f} cond=({condition}) h_thre{self.halite_threshold}')
return True
def reset_project(self):
self.elapsed_steps = 0
if self.ships:
self.dismiss_project(staff_ids=list(self.ships.keys()))
def can_offer(self, ship_halite, d):
"""あんまりhalite量が多いならjoinさせない"""
a = self.agent
if ship_halite == 0:
advantage = self.opponent_reach_v2 - d
self.d_halite, condition = self.calculate_minable_halite(advantage=advantage)
# あんまり遠いならjoinさせない
return d <= 8 and 4.9 < self.d_halite
if 1000. < ship_halite and 0 < d:
return False
advantage = self.opponent_reach - d - 1
if 0 < advantage:
self.d_halite = self.halite * (1.0 - (0.75 ** advantage))
return True
self.d_halite = self.halite * 0.25
if d == 0: # もうちょっと詳しくチェックする
opponent_halite = int(1e-6 + a.scores[I_SCORE_MIN_NEIGHBOR_OPPONENT_HALITE, self.i, self.j])
return ship_halite < opponent_halite
elif d == 1:
opponent_halite = 99999
for x_k, y_k, d_k in neighbor_positions(d=2, p=self.position):
cell_k = a.board.cells[x_k, y_k]
ship_k = cell_k.ship
if ship_k is None:
continue
if ship_k.player_id == a.player_id:
continue
opponent_halite = min(ship_k.halite, opponent_halite)
return ship_halite < opponent_halite
return False
def is_current_ship_mining(self):
a = self.agent
cell = a.board.cells[self.position]
if (cell.ship and cell.ship.player_id == a.player_id):
determined = a.determined_ships.get(cell.ship.id, None)
a.log(s=f'prj={self.project_id} is_current_ship_mining s{cell.ship.id} determined={determined}')
if determined is None:
pass
elif determined == 'reserved':
q = a.reserving_ships[cell.ship.id][2]
if len(q.shape) == 2:
max_i_action = np.argmax(q[0])
else:
max_i_action = np.argmax(q)
if max_i_action == 0:
return True
else:
next_action = determined[0]
if (next_action is None) or (next_action == I_MINE):
return True
return False
def should_have_defender(self, d_shipyard):
a = self.agent
# o0 = a.previous_len_opponent_ships // 5
d_threshold = [99999, 6, 5, 4, 3]
o1 = min(max(0, a.len_opponent_ships - 30) // 5, len(d_threshold) - 1)
a.log(f'prj={self.project_id} should_have_defender d_yd{d_shipyard} o1{o1} d_thre{d_threshold[o1]}')
return d_threshold[o1] < d_shipyard
def run(self):
super().run()
a = self.agent
if self.priority < 0.0:
return True
# 一旦解放
self.dismiss_project(staff_ids=list(self.ships.keys()))
# 他のProject (例えばDefenseShipyardProject)で掘っていることがある
if self.is_current_ship_mining():
a.log(s=f'prj={self.project_id} is_current_ship_mining is True')
return True
len_ships = len(a.board.current_player.ships)
if len_ships < 4:
i_flag = I_FLAG_MINE_D4
max_d = [8, 1]
elif len_ships < 8:
i_flag = I_FLAG_MINE_D4
max_d = [12, 2]
elif len_ships < 10:
i_flag = I_FLAG_MINE_D4
max_d = [16, 2]
elif len_ships < 20:
i_flag = I_FLAG_MINE_D3
max_d = [22, 1]
else:
i_flag = I_FLAG_MINE_D2
max_d = [22, 1]
if a.flags.get(i_flag, i=self.i, j=self.j):
i_max_d = 1
else:
i_max_d = 0
to_sort = []
for ship, d in self.mine_project_ships_generator(max_d[i_max_d]):
escorted_count = 0
escort_project = a.projects.get(f'escort{ship.id}', None)
if escort_project:
for ship_id in escort_project.ships:
if ship_id == ship.id:
continue
escorted_count += 1
to_sort.append((ship, d, -escorted_count, -ship.halite, ship.id))
miner = None
miner_d = 99999
defender = None
for i, (ship, d, negative_escorted_count, negative_halite, ship_id) in enumerate(sorted(to_sort, key=itemgetter(1, 2, 3, 4))):
halite = abs(negative_halite)
if not self.can_offer(ship_halite=halite, d=d):
continue
if miner is None:
miner = ship
miner_d = d
elif halite == 0:
if d <= miner_d + 3: # 護衛候補が遠すぎるならいないのと同じ
defender = ship
break
if not miner:
# a.log(s=f'prj={self.project_id} no miner max_d[{i_max_d}]{max_d[i_max_d]} len(to_sort){len(to_sort)}')
self.d_halite = 0.0
return True # 人員確保できませんでした
shipyard, d_shipyard = a.find_nearest_shipyard(self.position)
if shipyard is None:
defender = None
escort_project = None
elif not self.should_have_defender(d_shipyard):
defender = None
escort_project = None
else: # 中盤以降の遠征では defender 必須
escort_project = a.projects[f'escort{miner.id}']
if escort_project.defender_ship_id is None:
if not defender:
a.log(s=f'prj={self.project_id} miner{miner.id} no defender')
self.d_halite = 0.0
return True
escort_project.defender_ship_id = defender.id
escort_project.join_project(staff_ids=[defender.id], role='defender')
else:
defender = a.board.ships[escort_project.defender_ship_id]
defender_id = defender.id if defender else None
a.log(id_=miner.id, s=f'd_yd={d_shipyard} defender{defender_id}')
self.join_project(staff_ids=[miner.id], role='miner')
old_miner = miner
miner, reserved, old_miner_project, escort_project = self.swap_mine_project(miner, defender, escort_project)
if old_miner_project.project_id == self.project_id:
# かわりませんでした
self.run_miner(miner)
elif escort_project and old_miner_project.project_id == escort_project.project_id:
# 護衛と交代しました
defender = old_miner
self.run_miner(miner)
else: # MineProject 同士で交換しました
self.run_miner(miner, forced=True)
if reserved:
old_miner_project.run_miner(old_miner)
else:
pass # old_miner は向こうのrunに任せる
if escort_project: # old_miner に対する defender も一緒に動かしてしまう
escort_project.schedule() # miner を join_project してから実行すること
escort_project.run()
defender_ship_id = escort_project.defender_ship_id
defender = a.board.ships.get(defender_ship_id, None)
a.log(f'miner={miner.id} old_miner={old_miner.id} defender={defender_id}')
if defender:
a.log(id_=defender_ship_id, s=f'{defender.position} hired by prj={self.project_id}, old_miner{old_miner.id}{old_miner.position} escort_project{escort_project.project_id} prio{escort_project.priority} defprj={a.belonging_project.get(defender_ship_id, None)}')
a.log(id_=old_miner.id, s=f'{old_miner.position} hire defender{defender_ship_id} to escort prio{escort_project.priority} defprj={a.belonging_project.get(defender_ship_id, None)}')
self.elapsed_steps += 1
return True
# MineProject end
class ExpeditionProject(Project):
"""
オイシイ土地へ遠征する
現状CONVERTする前提
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target_position = None
self.early_game_threshold = 50
self.halite_threshold = None
self.mine_threshold_before_convert = 50.0
self.last_converted_position = None
self.ship_per_neighbor_positions = [None] * LEN_MOVE
self.shipyard_halite_threshold = 1500.
def schedule(self):
super().schedule()
a = self.agent
self.maintain_dead_staffs()
len_ships = len(a.board.current_player.ships)
len_shipyards = len(a.board.current_player.shipyards)
max_len_opponent_ships = np.max([len(a.board.players[player_id].ships) for player_id in range(PLAYERS)])
if self.last_converted_position:
shipyard = a.board.cells[self.last_converted_position].shipyard
if shipyard and shipyard.player_id == a.player_id:
# 前ターンにこの ExpeditionProject によって作った
project_id = f'defense_yd{shipyard.id}'
project = a.projects.get(project_id, None)
if project:
budget = self.budget
self.reserve_budget(-self.budget)
project.reserve_budget(min(a.free_halite, MAX_HALITE))
a.log(id_=shipyard.id, s=f'hand over budget {self.project_id}={budget} -> {project_id}={project.budget}')
else:
a.log(loglevel='warning', s=f'prj={self.project_id} hand over failed. {project_id} not found yd{shipyard.id}{shipyard.position}')
pass
# 一旦解散
return False
if a.board.step < 2 or 300 <= a.board.step:
return False
if a.board.step < a.last_shipyard_attacked_step + 50:
self.priority = -1.0 # 陥落したので控える
elif (2 <= a.board.step < self.early_game_threshold) and (len_shipyards <= 1):
self.priority = 1e8
elif (2 <= a.board.step < 200) and (len_shipyards <= 2) and (len_shipyards * 7 <= len_ships):
self.priority = 1e8
elif (self.early_game_threshold <= a.board.step) and (len_shipyards * 6 <= len_ships) and (25000. < a.world_halite or 300.0 < a.halite_per_ship):
# 敵が狩りタイプばっかりだと中盤起こる
self.priority = 1e8
elif len_shipyards <= 10 and len_shipyards * 4 < len_ships and max_len_opponent_ships <= len_ships:
self.priority = 1e6
elif self.ships:
self.priority = 1e6
else:
self.priority = -1.0
# if 0.0 < self.priority and 3 <= len_shipyards:
# 4軒目以降は控えめ
# self.reserve_budget(-self.budget)
# self.priority = 100.0
# self.halite_threshold = max(1400., 0.8 * a.best_scores[I_SCORE_HALITE_D4])
return True
def is_mining_convert_position(self):
# convert予定地のmineを開始しているか
mining_convert_position = False
a = self.agent
if self.target_position and a.previous_board:
cell = a.board.cells[self.target_position]
if cell.ship and cell.ship.player_id == a.player_id:
previous_cell = a.previous_board.cells[self.target_position]
mining_convert_position = (cell.halite < previous_cell.halite)
return mining_convert_position
def expedition_ships_generator(self):
a = self.agent
for ship in a.sorted_ships:
if a.determined_ships.get(ship.id, None) is not None:
continue
project_id = a.belonging_project.get(ship.id, None)
if project_id is None:
yield ship
continue
elif project_id[:4] == 'hunt':
continue
elif project_id[:10] == 'defense_yd':
continue
yield ship
def search_target_position(self):
"""
self.target_positionを設定
"""
a = self.agent
queue = Queue()
visited = np.zeros((2, COLS, ROWS), dtype=np.bool)
best_score = -1.0
best_position = None
best_d = None
best_ship = None
for ship in self.expedition_ships_generator():
is_empty_ship = 1 if ship.halite == 0 else 0
queue.put((ship.position, 0, is_empty_ship, ship))
budget_sufficient = (MAX_HALITE * 2 <= a.free_halite + self.budget)
# 幅優先探索
sup_d = 11
best_score = 1.0
best_position = None
best_ship = None
best_d = 0
min_best_d = None
while not queue.empty():
position, d, is_empty_ship, ship = queue.get()
if sup_d <= d:
break
if visited[is_empty_ship, position[0], position[1]]:
continue
visited[:is_empty_ship+1, position[0], position[1]] = True
score = self.calculate_shipyard_position_score(position, min_best_d, d, is_empty_ship)
if best_score < score:
best_score = score
best_position = position
best_ship = ship
best_d = d
if min_best_d is None:
min_best_d = d
a.log(f'prj={self.project_id} score_updated. d{best_d} score{score:.1f} {position} s{best_ship.id} h{best_ship.halite}')
for x2, y2, d2 in neighbor_positions(1, position):
d3 = d + d2
if (sup_d <= d3) or visited[is_empty_ship, x2, y2]:
continue
queue.put(((x2, y2), d3, is_empty_ship, ship))
self.set_target_position(best_position)
if best_ship:
best_ship_id = best_ship.id
self.join_project(staff_ids=[best_ship.id], role='leader', forced=True)
else:
best_ship_id = None
self.d_leader = best_d
a.log(s=f'search_target_position={best_position}, leader={best_ship_id} d={best_d} score={best_score}')
def set_target_position(self, target_position=None):
self.target_position = target_position
def calculate_shipyard_position_score(self, position, min_best_d, d, is_empty_ship):
a = self.agent
i, j = position_to_ij(position)
if (not is_empty_ship) or a.board.step < self.early_game_threshold:
opponent_reach = int(1e-6 + a.scores[I_SCORE_OPPONENT_REACH, i, j])
if opponent_reach - 2 <= d:
return 0.0 # 敵に邪魔されず CONVERT, SPAWN できる距離限定
if 0.5 < a.scores[I_SCORE_ALLY_SHIPYARD_D7, i, j]:
return 0.0 # 近所に作るのはやめよう
if 0 < a.len_shipyards:
if 0.5 < a.scores[I_SCORE_OPPONENT_SHIPYARD_D6, i, j]:
return 0.0 # 近所に作るのはやめよう
score_candidates = a.scores[I_SCORE_SHIPYARD_CANDIDATES, i, j]
future_hunt_zone = a.scores[I_SCORE_FUTURE_HUNT_ZONE, i, j]
score_hunt_zone = 1.0 + 0.15 * future_hunt_zone
if min_best_d is None:
score_gamma = 1.0
else:
score_gamma = 0.9 ** (d - min_best_d)
score_opponent_shipyard_d4 = 1 / max(1.0, a.scores[I_SCORE_OPPONENT_SHIPYARD_D4, i, j])
score = score_candidates * score_hunt_zone * score_gamma * score_opponent_shipyard_d4
# a.log(f'prj={self.project_id} {position} d{d} sc_c{score_candidates:.0f} sc_hz{score_hunt_zone:.1f} sc_g{score_gamma:.5f} sc_opyd{score_opponent_shipyard_d4:.0f} sc{score:.1f}')
if score < 0.6 * self.shipyard_halite_threshold:
return 0.0 # 周囲の halite が足りないなら避けよう
elif score < self.shipyard_halite_threshold:
# 微妙なライン
if 200 < a.board.step:
return 0.0
if 0.5 < a.scores[I_SCORE_OPPONENT_SHIPYARD_D4, i, j]:
return 0.0 # 敵の近所に作るのはやめよう
if -10.5 < a.scores[I_SCORE_DETOUR_ADVANTAGE, i, j]:
return 0.0 # 遠征だけにしておこう
elif 0.5 < a.scores[I_SCORE_OPPONENT_SHIPYARD_D3, i, j]:
return 0.0 # 敵の近所に作るのはやめよう
return score
def leader_ship_strategy(self, ship, d):
a = self.agent
a.log(id_=ship.id, s=f'prj={self.project_id} leader d{d}')
condition = None
if d == 0: # convert予定地
i_, j_ = position_to_ij(ship.position)
opponent_reach = int(1e-6 + a.scores[I_SCORE_OPPONENT_REACH, i_, j_])
safely_convert_without_spawn = 0
for ship_id_c in self.ships.keys():
if ship_id_c == ship.id:
continue
d_c = a.ally_ship_distances[ship.id][ship_id_c]
if (d_c < opponent_reach) or (
(d_c == opponent_reach) and (0 == a.board.ships[ship_id_c].halite)):
safely_convert_without_spawn = 1
break
sufficient_budget = ((2 - safely_convert_without_spawn) * MAX_HALITE <= self.budget + ship.halite)
if ((a.board.cells[ship.position].halite < self.mine_threshold_before_convert
or a.scores[I_SCORE_REACH_ADVANTAGE, i_, j_] < 2.5)
and sufficient_budget):
self.reserve_budget(-max(0, MAX_HALITE - ship.halite))
a.reserve_ship(ship, ShipAction.CONVERT)
self.last_converted_position = ship.position
condition = 'convert'
else:
a.moving_ship_strategy(ship, position=self.target_position, mode='mine', mine_threshold=4.0)
condition = 'target_reached_mine'
else:
a.moving_ship_strategy(ship, position=self.target_position, mode='escape', mine_threshold=None)
condition = 'move_to_target'
if self.last_converted_position:
i_action = I_CONVERT
else:
i_action = I_MINE
self.ship_per_neighbor_positions[i_action] = ship
a.log(id_=ship.id, s=f'prj={self.project_id} leader {self.target_position} a{i_action} cond({condition})')
def coworker_ship_strategy(self, ship, d, leader_ship):
a = self.agent
best_position = None
to_sort = []
for k_action, cell_k in enumerate(a.neighbor_cells(a.board.cells[self.target_position])):
if self.ship_per_neighbor_positions[k_action] is not None:
continue
d_k = calculate_distance(ship.position, cell_k.position)
steps_k = d_k - d + 2 # 遠いほど掘れるstep数は減る
mine_threshold_k = None
halite_k = None
priority = None
if cell_k.halite < 1e-6:
priority = 0.0
else:
mine_project = a.projects.get(f'mine_{cell_k.position[0]}_{cell_k.position[1]}', None)
if mine_project:
mine_threshold_k = mine_project.halite_threshold
if mine_project.ships:
priority = -100.0
mine_threshold_k = None
else:
mine_threshold_k = 40.0
if mine_threshold_k:
halite_k = cell_k.halite - mine_threshold_k
priority = halite_k / steps_k
if k_action == 0:
priority = -1.0
if self.last_converted_position: # leaderはこのstepでconvertしてくる
if 0 < ship.halite:
priority = 10003.0 # convertと同時にdepositしたい
else: # 同時ガード必要?
i_, j_ = position_to_ij(cell_k.position)
opponent_reach = int(1e-6 + a.scores[I_SCORE_OPPONENT_REACH, i_, j_])
if d == 1 and opponent_reach <= 1:
priority = 10001.0
elif d <= opponent_reach and (self.budget + leader_ship.halite < 2 * MAX_HALITE):
priority = 10002.0
a.log(id_=ship.id, s=f'prj={self.project_id} coworker k{k_action}{cell_k.position} prio{priority} h{cell_k.halite} halite_k{halite_k} steps_k{steps_k} budget{self.budget}')
to_sort.append([priority, k_action, mine_threshold_k, cell_k])
if not to_sort:
priority = None
target_position = self.target_position
k_action = I_MINE
mine_threshold_k = None
else:
priority, k_action, mine_threshold_k, cell_k = sorted(to_sort, key=itemgetter(0, 1), reverse=True)[0]
target_position = cell_k.position
self.ship_per_neighbor_positions[k_action] = ship
if (ship.position == target_position and mine_threshold_k is not None):
mode = 'mine'
else:
mode = 'escape'
preference, forced = a.calculate_moving_ship_preference(
ship, position=target_position, mode=mode, mine_threshold=mine_threshold_k)
q = np.ones(LEN_MOVE) * preference
a.reserve_ship_by_q(ship=ship, q=q, forced=forced, depend_on=leader_ship)
a.log(id_=ship.id, s=f'prj={self.project_id} coworker {self.target_position}{k_action}->{target_position} prio{priority}')
def run(self):
super().run()
self.last_converted_position = None
if self.priority < 0.0:
return False
a = self.agent
# 予算(CONVERT & SPAWN)と人員の確保
# if len(a.board.current_player.shipyards) <= 2:
self.reserve_budget(min(a.free_halite, 2 * MAX_HALITE - self.budget))
# else: # 4軒目以降は保守的
# self.reserve_budget(-self.budget)
# if a.free_halite < 4 * MAX_HALITE:
# return True
self.dismiss_project(staff_ids=list(self.ships.keys()))
self.search_target_position()
if self.target_position is None:
return False
len_staffs = len(self.ships)
to_sort = []
for ship in self.ships_generator(with_free=True):
d = calculate_distance(self.target_position, ship.position)
if 0 < d and 0 < ship.halite and self.early_game_threshold <= a.board.step:
continue # 中盤以降は原則 empty_ship
if self.d_leader + 5 < d: # あんまり遠いならスルー
continue
to_sort.append((d, -ship.halite, ship.id, ship))
max_staffs = 1 if a.board.step < 50 else 2
to_sort = sorted(to_sort)[:max_staffs - len_staffs]
staff_ids = list(map(itemgetter(2), to_sort))
a.log(s=f'prj={self.project_id} target_position={self.target_position} ships={list(self.ships.keys())}+{staff_ids}')
# ここから実際に作戦を実行
leader_ship = None
for ship_id, role in self.ships.items():
if role == 'leader':
leader_ship = a.board.ships.get(ship_id, None)
self.leader_ship_strategy(ship=leader_ship, d=self.d_leader)
break
a.log(s=f'prj={self.project_id} leader{leader_ship.id} staff_ids{staff_ids}')
self.join_project(staff_ids=staff_ids, role='coworker')
self.ship_per_neighbor_positions = [None] * (LEN_MOVE + 1)
for i, (d, negative_halite, ship_id, ship) in enumerate(to_sort):
self.coworker_ship_strategy(ship=ship, d=d, leader_ship=leader_ship)
return True
class HuntProject(Project):
"""狩り"""
def __init__(self, target_ship_id, *args, **kwargs):
super().__init__(*args, project_id=f'hunt{target_ship_id}', **kwargs)
self.target_ship_id = target_ship_id
self.max_staffs = 6
self.max_d = 3
self.center_direction = []
def schedule(self):
super().schedule()
a = self.agent
self.target_ship = a.board.ships.get(self.target_ship_id, None)
if self.target_ship is None: # もう死んだ
return False
self.priority = 1e4
if self.target_ship.halite == 0:
self.priority = -1.0 # 生きている限りproject自体は存続
self.opponent_safe_direction = 0x1F
p0 = self.target_ship.position
cell = a.board.cells[p0]
cells = a.neighbor_cells(cell)
for k_action, cell_k in enumerate(cells): # 敵にとって安全な移動はあるか?
for l_action, cell_l in enumerate(a.neighbor_cells(cell_k)):
ship_l = cell_l.ship
if (ship_l is None) or ship_l.player_id == self.target_ship.player_id:
continue
if ship_l.halite < self.target_ship.halite:
self.opponent_safe_direction &= ~(1 << k_action)
break
if self.opponent_safe_direction < 0:
defender_count = 0 # 周囲の護衛が多かったらあきらめる
for x_k, y_k, d_k in neighbor_positions(d=2, p=self.target_ship.position):
cell_k = a.board.cells[x_k, y_k]
ship_k = cell_k.ship
if (ship_k is None) or (ship_k.player_id != self.target_ship.player_id) or (0 < ship_k.halite):
continue
defender_count += 1
if 2 <= defender_count:
self.priority = -1.0
self.dx_limit = [-self.max_d, self.max_d]
self.dy_limit = [-self.max_d, self.max_d]
# indexを守るこちらのshipにとって、左右どちらへ行きがちかの情報
# 例: self.opponent_tend_to_move[I_SOUTH] == I_WEST の場合、南南西に敵shipyardがあるイメージ
self.opponent_tend_to_move = [None] * LEN_MOVE
for shipyard in a.board.players[self.target_ship.player_id].shipyards:
dx = rotated_diff_position(self.target_ship.position[0], shipyard.position[0])
dy = rotated_diff_position(self.target_ship.position[1], shipyard.position[1])
abs_dx = abs(dx)
abs_dy = abs(dy)
if abs_dx <= abs_dy:
if dy <= 0 and self.dy_limit[0] <= dy + 1:
# 南へ行きたい
self.dy_limit[0] = dy + 1
if dx < 0:
self.opponent_tend_to_move[I_SOUTH] = I_WEST
elif 0 < dx:
self.opponent_tend_to_move[I_SOUTH] = I_EAST
if 0 <= dy and dy - 1 <= self.dy_limit[1]:
# 北へ行きたい
self.dy_limit[1] = dy - 1
if dx < 0:
self.opponent_tend_to_move[I_NORTH] = I_WEST
elif 0 < dx:
self.opponent_tend_to_move[I_NORTH] = I_EAST
if abs_dy <= abs_dx:
if dx <= 0 and self.dx_limit[0] <= dx + 1:
# 西へ行きたい
self.dx_limit[0] = dx + 1
if dy < 0:
self.opponent_tend_to_move[I_WEST] = I_SOUTH
elif 0 < dy:
self.opponent_tend_to_move[I_WEST] = I_NORTH
if 0 <= dx and dx - 1 <= self.dx_limit[1]:
# 東へ行きたい
self.dx_limit[1] = dx - 1
if dy < 0:
self.opponent_tend_to_move[I_EAST] = I_SOUTH
elif 0 < dy:
self.opponent_tend_to_move[I_EAST] = I_NORTH
if not self.assign_candidates(): # 再構成
self.priority = -1.0
if self.priority < 0.0:
self.dismiss_project(staff_ids=list(self.ships.keys()))
return True
def assign_candidates(self):
if self.priority < 0.0:
return False
previous_roles = copy.deepcopy(self.ships)
def get_ship_info_for_debug(a_):
return list(map(lambda u: f'{u.id}{u.position}', a_))
def get_ship_info_for_debug_2(a_):
a2 = []
for t in a_:
a2.append(get_ship_info_for_debug(t))
return a2
def get_ship_info_for_debug_3_1(a_):
return list(map(lambda u: f'{u[0].id}{u[0].position}', a_))
def get_ship_info_for_debug_3(a_):
a2 = []
for t in a_:
a2.append(get_ship_info_for_debug_3_1(t))
return a2
a = self.agent
self.dismiss_project(staff_ids=list(self.ships.keys()))
p0 = self.target_ship.position
count = 0
dp_index = [0] * 9
candidates = [[] for _ in range(9)]
# limitターン敵が全力で一方向へ逃げた時に追いつくか
north_position = Point(
x=p0[0],
y=mod_map_size_x(p0[1] + self.dy_limit[1]))
east_position = Point(
x=mod_map_size_x(p0[0] + self.dx_limit[1]),
y=p0[1])
south_position = Point(
x=p0[0],
y=mod_map_size_x(p0[1] + self.dy_limit[0]))
west_position = Point(
x=mod_map_size_x(p0[0] + self.dx_limit[0]),
y=p0[1])
a.log(id_=self.target_ship_id, s=f'{p0} n{north_position} e{east_position} s{south_position} w{west_position} dx_limit{self.dx_limit} dy_limit{self.dy_limit}')
for ship in a.sorted_ships:
# hunt assign が現状最優先なので、他のproject事情を無視する
if a.determined_ships.get(ship.id, None):
continue # もう行動されていたらさすがに覆さない
project_id = a.belonging_project.get(ship.id, None)
if project_id and project_id != self.project_id and project_id[:4] == 'hunt':
continue # 他の hunt project
if self.target_ship.halite <= ship.halite:
continue
dx = rotated_diff_position(p0[0], ship.position[0])
dy = rotated_diff_position(p0[1], ship.position[1])
abs_dx = abs(dx)
abs_dy = abs(dy)
m = 0
d_north = calculate_distance(north_position, ship.position)
if d_north <= self.dy_limit[1]:
m |= (1 << I_NORTH)
d_east = calculate_distance(east_position, ship.position)
if d_east <= self.dx_limit[1]:
m |= (1 << I_EAST)
d_south = calculate_distance(south_position, ship.position)
if d_south <= abs(self.dy_limit[0]):
m |= (1 << I_SOUTH)
d_west = calculate_distance(west_position, ship.position)
if d_west <= abs(self.dx_limit[0]):
m |= (1 << I_WEST)
previous_role = int(previous_roles.get(ship.id, -1))
original_m = m
im = None
if 0 < m:
# 前回やっていた守備範囲を尊重する
if I_NORTH <= previous_role and (m & (1 << previous_role)):
m = (1 << previous_role)
im = DIRECTION_MAPPING[m]
if im < I_NORTH_EAST:
dp_index[im] = 1
else:
dp_index[im] = min(2, dp_index[im] + 1)
candidates[im].append(ship)
count += 1
# a.log(id_=self.target_ship_id, s=f'assign_candidates 0 s{ship.id}{ship.position} dn{d_north} de{d_east} ds{d_south} dw{d_west} m{m} origm{original_m} prerole{previous_role} im{im}')
# a.log(id_=self.target_ship_id, s=f'assign_candidates 1 count{count} candidates{get_ship_info_for_debug_2(candidates)}')
if count < 5:
return False
# 斜めshipの方角割り当ては超面倒なのでHUNT_DPに事前計算してある
diag_distribution = HUNT_DP[dp_index[1], dp_index[2], dp_index[3], dp_index[4], dp_index[5], dp_index[6], dp_index[7], dp_index[8]]
# a.log(id_=self.target_ship_id, s=f'assign_candidates 2 diag_distribution{diag_distribution}')
if diag_distribution[0] == HUNT_IMPOSSIBLE:
return False
# 8方向から4方向へ削減する
candidates2 = [[] for _ in range(LEN_MOVE)]
for direction, ships in enumerate(candidates):
if not ships:
continue
ships_d = [(ship, calculate_distance(p0, ship.position)) for ship in ships]
if direction < I_NORTH_EAST:
candidates2[direction] += ships_d
continue
diag = direction - I_NORTH_EAST
strategy = diag_distribution[diag]
if strategy == 5: # 双方向
if len(ships) <= 1:
a.log(loglevel='warning', s=f'strategy == 5. len(ships)={len(ships)}')
candidates2[DIAG_DIRECTIONS[diag][0]].append(ships_d[0])
candidates2[DIAG_DIRECTIONS[diag][1]] += ships_d[1:]
else:
candidates2[strategy] += ships_d
# [5, 8] ships 決定する
key_fn = lambda t: t[1] * 10000 + t[0].halite
min_d = 99999
min_direction = None
to_sort = []
for direction in range(1, LEN_MOVE):
c = sorted(candidates2[direction], key=key_fn)
len_c = len(c)
if 0 == len_c:
c1 = get_ship_info_for_debug_2(candidates)
c2 = get_ship_info_for_debug_3(candidates2)
a.log(logloevel='warning', s=f'direction={direction} ship candidate not found target{self.target_ship.position} c1={c1} c2={c2} diag_distribution={diag_distribution} dp_index={dp_index}')
continue
for j in range(2):
if len_c <= j:
break
ship_j, d_j = c[j]
pre_project = a.belonging_project.get(ship_j.id, None)
self.join_project(staff_ids=[ship_j.id], role=str(direction), forced=True)
post_project = a.belonging_project.get(ship_j.id, None)
# a.log(id_=self.target_ship_id, s=f'assign_candidates 3 {ship_j.id}{ship_j.position} prerole{previous_roles.get(ship_j.id, -1)} role{self.ships.get(ship_j.id)} preprj={pre_project} postprj={post_project}')
to_sort.append([ship_j, d_j, ship_j.id])
if 0 < j: # 遠いほうのshipがカバーできる方から中央へ攻める
if d_j < min_d:
min_d = d_j
min_direction = direction
if min_direction is None:
c1 = get_ship_info_for_debug_2(candidates)
c2 = get_ship_info_for_debug_3(candidates2)
a.log(loglevel='warning', s=f'min_ship not found. c1={c1} c2={c2}')
self.center_direction = int(min_direction)
self.sorted_ships = sorted(to_sort, key=itemgetter(1, 2))
def ships_to_log():
a_ = []
for ship_id, role in self.ships.items():
a_.append(f'{ship_id}{a.board.ships[ship_id].position}={role}')
return a_
a.log(id_=self.target_ship_id, s=f'assign_candidates 4 target{self.target_ship.id}{self.target_ship.position} ships{ships_to_log()} center{self.center_direction}')
return True
def run(self):
super().run()
if self.priority < 0.0:
return True
a = self.agent
p0 = self.target_ship.position
cell = a.board.cells[p0]
cells = a.neighbor_cells(cell)
center_ship_id = None
opponent_safe_direction = self.opponent_safe_direction
positions = []
checkmate_count = 0
for ship, d, ship_id in self.sorted_ships:
role_s = self.ships.get(ship_id, None)
if role_s is None: # EscortProject final にとられていることがある
continue
role = int(role_s)
i, j = position_to_ij(ship.position)
cell_r = cells[role]
get_halite = int((1e-6 + cell_r.halite) * 0.25)
last_stop = self.target_ship.halite <= ship.halite + get_halite
p1 = cell_r.position # 1手詰めの目標地点
p2 = p1 # それ以外の目標地点
p_checkmate = cell_r.position # p1 は片方軸合わせしただけのことがあるので checkmate 判定では使えない
should_challenge = False
# tend = self.opponent_tend_to_move[role]
mask_ns = (1 << I_NORTH) | (1 << I_SOUTH)
mask_ew = (1 << I_EAST) | (1 << I_WEST)
if role == I_NORTH:
if ship.position[0] == p1[0]: # x座標はあっている
if ship.position[1] != p1[1] or 0 == get_halite or (0 < ship.halite and (not last_stop)):
pass # 普通に目標地点向かって距離詰めたり停止すればOK
elif (opponent_safe_direction & mask_ew) == (1 << I_EAST): # 東は安全だから東行くっしょ
p2 = cell_r.east.position
elif (opponent_safe_direction & mask_ew) == (1 << I_WEST): # 西は安全だから西行くっしょ
p2 = cell_r.west.position
elif self.dy_limit[1] <= 1: # こちらが追い詰められている
should_challenge = True
else: # 後退して時間稼ぎ
p2 = cell_r.north.position
else: # 先にx座標を合わせる
p1 = Point(x=p0[0], y=ship.position[1])
p2 = p1
elif role == I_EAST:
if ship.position[1] == p1[1]: # y座標はあっている
if ship.position[0] != p1[0] or 0 == get_halite or (0 < ship.halite and (not last_stop)):
pass # 普通に目標地点向かって距離詰めたり停止すればOK
elif (opponent_safe_direction & mask_ns) == (1 << I_NORTH): # 北は安全だから北行くっしょ
p2 = cell_r.north.position
elif (opponent_safe_direction & mask_ns) == (1 << I_SOUTH): # 南は安全だから南行くっしょ
p2 = cell_r.south.position
elif self.dx_limit[1] <= 1: # こちらが追い詰められている
should_challenge = True
else: # 後退して時間稼ぎ
p2 = cell_r.east.position
else: # 先にy座標を合わせる
p1 = Point(x=ship.position[0], y=p0[1])
p2 = p1
elif role == I_SOUTH:
if ship.position[0] == p1[0]: # x座標はあっている
if ship.position[1] != p1[1] or 0 == get_halite or (0 < ship.halite and (not last_stop)):
pass # 普通に目標地点向かって距離詰めたり停止すればOK
elif (opponent_safe_direction & mask_ew) == (1 << I_EAST): # 東は安全だから東行くっしょ
p2 = cell_r.east.position
elif (opponent_safe_direction & mask_ew) == (1 << I_WEST): # 西は安全だから西行くっしょ
p2 = cell_r.west.position
elif -1 <= self.dy_limit[0]: # こちらが追い詰められている
should_challenge = True
else: # 後退して時間稼ぎ
p2 = cell_r.south.position
else: # 先にx座標を合わせる
p1 = Point(x=p0[0], y=ship.position[1])
p2 = p1
elif role == I_WEST:
if ship.position[1] == p1[1]: # y座標はあっている
if ship.position[0] != p1[0] or 0 == get_halite or (0 < ship.halite and (not last_stop)):
pass # 普通に目標地点向かって距離詰めたり停止すればOK
if (opponent_safe_direction & mask_ns) == (1 << I_NORTH): # 北は安全だから北行くっしょ
p2 = cell_r.north.position
elif (opponent_safe_direction & mask_ns) == (1 << I_SOUTH): # 南は安全だから南行くっしょ
p2 = cell_r.south.position
elif -1 <= self.dx_limit[0]: # こちらが追い詰められている
should_challenge = True
else: # 後退して時間稼ぎ
p2 = cell_r.west.position
else: # 先にy座標を合わせる
p1 = Point(x=ship.position[0], y=p0[1])
p2 = p1
if (center_ship_id is None) and (role == self.center_direction):
center_ship_id = ship.id
if ship.position == p1:
p1 = p0
p2 = p0
p_checkmate = p0
self.ships[ship.id] = str(I_MINE)
d_r = calculate_distance(ship.position, p_checkmate)
if d_r <= 1:
bit_flag = (1 << int(self.ships[ship.id]))
checkmate_count |= bit_flag
# a.log(id_=self.target_ship_id, s=f'ship{ship.id}{ship.position} bit_flag{bit_flag} checkmate{checkmate_count}')
positions.append((ship, p1, p2, d_r, last_stop, should_challenge))
for ship, p1, p2, d_r, last_stop, should_challenge in positions:
if 0x1F == checkmate_count or should_challenge:
p = p1
mine_threshold = 3.99
else:
p = p2
if d_r <= 1 and 0 < ship.halite and (not last_stop):
mine_threshold = 3.99
else:
mine_threshold = None
a.log(id_=self.target_ship_id, s=f'p0{p0} s{ship.id}{ship.position}->p1{p1}/p2{p2} checkmate{checkmate_count} p{p} role{self.ships[ship.id]} center{self.center_direction} center_ship{center_ship_id} should_challenge={should_challenge} safe_dir{opponent_safe_direction}')
a.moving_ship_strategy(ship, position=p, mode='cancel_without_shipyard', mine_threshold=mine_threshold)
return True
class MyAgent(object):
def __init__(self, player_id, *args, verbose, **kwargs):
self.player_id = player_id
self.verbose = verbose
self.flags = FlagsManager()
self.scores = np.zeros((N_SCORE_TYPES, ROWS, COLS), dtype=np.float32)
self.best_scores = np.zeros(N_SCORE_TYPES, dtype=np.float32)
self.best_score_cells = [None] * N_SCORE_TYPES
self.initial_phase_step = 20
self.spawn_step_threshold = 200 # + 50 * self.player_id
self.greedily_spawn_step_threshold = 50 # + 50 * self.player_id
self.len_ships_threshold = 57
self.len_ships_threshold_weak = 99 #3 # weak用
self.opponent_history = [
{
'defense_against_shipyard_attack': [0, 0],
'deposit_against_shipyard_attack': [0, 0],
'cancel_against_shipyard_attack': [0, 0],
'stop_shipyard_neighbor': [0, 0],
# cancel (相殺) 系は empty_ship だけ計上する FUGA
'cancel_any': [0, 0],
'cancel_both_move_to_mine': [0, 0], # おいしい土地かつ敵が真上にいない所に突っ込むか
'cancel_move_to_mining_opponent': [0, 0], # 敵が掘っているところへ突っ込む
'cancel_to_mine_here': [0, 0], # 掘るために相殺覚悟
'cancel_with_rob_chance': [0, 0], # rob優先した結果相殺したっぽい
'shipyard_attacked': [0, 0], # 実際にshipyard破壊された回数
} for i in range(4)]
self.last_shipyard_attacked_step = -999
self.opponent_history_queue = []
self.board = None
self.previous_board = None
self.previous_len_opponent_ships = 3
self.belonging_project = {} # key is ship_id or shipyard_id, value is project_id
self.projects = {} # key is project_id, value is Project
self.log_step = -1
self.logs = {}
self.reserving_ships = {}
def dismiss_project(self, project_id, staff_ids):
assert isinstance(project_id, str)
project = self.projects.get(project_id, None)
if project is None:
return
# staffを解雇
for staff_id in staff_ids:
assert isinstance(staff_id, str)
self.belonging_project[staff_id] = None
if staff_id in project.ships:
del project.ships[staff_id]
if staff_id in project.shipyards:
del project.shipyards[staff_id]
def join_project(self, project_id, staff_ids, role='no_role', forced=False):
assert isinstance(project_id, str)
project = self.projects.get(project_id, None)
if project is None:
return
for staff_id in staff_ids:
assert isinstance(staff_id, str)
previous_project_id = self.belonging_project.get(staff_id, None)
if (previous_project_id is not None) and (project_id != previous_project_id):
if not forced:
self.log(loglevel='warning', s=f'p{self.player_id} staff_id={staff_id} join to {project_id} but it already belongs to {self.belonging_project[staff_id]}')
previous_project = self.projects.get(previous_project_id, None)
if previous_project:
previous_project.dismiss_project(staff_ids=[staff_id])
self.belonging_project[staff_id] = project_id
maybe_ship = self.board.ships.get(staff_id, None)
if maybe_ship:
project.ships[staff_id] = role
maybe_shipyard = self.board.shipyards.get(staff_id, None)
if maybe_shipyard:
project.shipyards[staff_id] = role
def log(self, s, step=None, id_=None, indent=0, loglevel='DEBUG'):
if not self.verbose:
return
level = getattr(logging, loglevel.upper())
if level < logging.DEBUG:
return
prefix = ''
if 0 < indent:
prefix += ' ' * indent
if step is None:
step = self.board.step
prefix += f'step{step} '
if self.log_step != step:
self.logs.clear()
self.log_step = step
if id_ is not None:
prefix += f'id{id_} '
if id_ not in self.logs:
self.logs[id_] = []
if (self.verbose & 4) == 4:
self.logs[id_].append(f'{prefix}{s}')
if ((self.verbose & 4) == 4) or ((self.verbose & 1) == 1 and logging.DEBUG < level):
easy_log(f'{prefix}{s}', loglevel=loglevel)
def update_opponent_history(self):
if self.previous_board is None:
self.log(step=self.board.step, id_=None, s=f'update_opponent_history: previous_board is None')
return # 初手は処理しない
# defense_against_shipyard_attack, deposit_against_shipyard_attack
# stop_shipyard_neighbor
for previous_shipyard in self.previous_board.shipyards.values():
player_id = previous_shipyard.player_id
position = previous_shipyard.position
previous_cell = self.previous_board.cells[position]
cell = self.board.cells[position]
shipyard = cell.shipyard
result = None # 0: 守ってない, 1: 守っている, None: 無効
depositor_result = None # 0: depositしなかった, 1: deposit強行した, None: 無効かdepositorいない
cancel_result = None # 0: 安全に相殺できるのにしなかった 1: 相殺した None: その他
shipyard_attacked_result = None # 0=1: shipyard_attackで破壊されたshipyard数
attackers = []
empty_attackers = []
dead_empty_attackers = []
defender_candidates = []
defenders = []
depositors = []
dead_depositors = []
min_attacker_halite = 99999
for previous_cell_i in self.neighbor_cells(previous_cell):
ship = previous_cell_i.ship
if ship is None:
pass
elif ship.player_id == player_id:
defender_candidates.append(ship)
else:
attackers.append(ship.id)
current_ship = self.board.ships.get(ship.id, None)
if ship.halite == 0:
empty_attackers.append(ship.id)
if current_ship:
attacker_result = 1 if (current_ship.position == ship.position) else 0
self.opponent_history[ship.player_id]['stop_shipyard_neighbor'][0] += attacker_result
self.opponent_history[ship.player_id]['stop_shipyard_neighbor'][1] += 1
else:
dead_empty_attackers.append(ship.id)
min_attacker_halite = min(ship.halite, min_attacker_halite)
if 0 == len(attackers): # 脅威などなかった
continue
for candidate in defender_candidates:
if candidate.halite <= min_attacker_halite:
defenders.append(candidate.id)
else:
depositors.append(candidate.id)
if self.board.ships.get(candidate.id, None) is None:
dead_depositors.append(candidate.id)
if shipyard is None:
result = 0
if dead_depositors:
depositor_result = 1 # 正確ではないが多分そうでしょ
elif depositors:
depositor_result = 0
shipyard_attacked_result = 1
elif dead_empty_attackers:
# 相殺した どちらが攻めたのかが問題となる
if cell.ship: # shipyard側が攻めたの確定
cancel_result = 1
# depositors が帰還しているなら守っているわけではない
if cell.ship.id in depositors:
result = 0
depositor_result = 1
else: # 守りも盤石
result = 1
if depositors:
depositor_result = 0
else: # どこで相殺したか不明 意味合いも変わるので放置
pass
elif cell.ship:
# attackerに動きなし 膠着状態か, deposit強行したか
if cell.ship.id in depositors:
result = 0
depositor_result = 1
else: # 戻ったのはdefender
result = 1
if depositors: # deposit できるチャンスの時だけ統計加算
depositor_result = 0
else: # attackerに動きなし shipyardは守られていない
result = 0
if depositors:
depositor_result = 0 # 少なくとも deposit はしてない
if (len(defenders) == 0) and self.previous_board.players[player_id].halite < MAX_HALITE:
# halite 足りないときは spawn できないに決まっているので無視
result = None
if result is not None:
# 分子: 守った回数
self.opponent_history[player_id]['defense_against_shipyard_attack'][0] += result
# 分母: 試行回数
self.opponent_history[player_id]['defense_against_shipyard_attack'][1] += 1
if depositor_result is not None:
self.opponent_history[player_id]['deposit_against_shipyard_attack'][0] += depositor_result
self.opponent_history[player_id]['deposit_against_shipyard_attack'][1] += 1
if cancel_result is not None:
self.opponent_history[player_id]['cancel_against_shipyard_attack'][0] += cancel_result
self.opponent_history[player_id]['cancel_against_shipyard_attack'][1] += 1
if shipyard_attacked_result is not None:
self.opponent_history[player_id]['shipyard_attacked'][0] += shipyard_attacked_result
self.opponent_history[player_id]['shipyard_attacked'][1] += 1
if player_id == self.player_id:
self.last_shipyard_attacked_step = self.board.step
for ship_id, previous_ship in self.previous_board.ships.items():
ship = self.board.ships.get(ship_id, None)
player_id = previous_ship.player_id
if ship is None:
self.log(id_=ship_id, s=f'{previous_ship.position} h{previous_ship.halite} p{player_id} dead')
pass
if 0 < previous_ship.halite:
continue # ひとまず empty_ship だけ調べる
# 分母は cancel おきえたか (merge, shipyard_attackは考えない)
can_cancel = [False] * LEN_MOVE
can_rob = [False] * LEN_MOVE
ground_halite = np.zeros(LEN_MOVE, dtype=np.float32)
empty_opponent = np.zeros(LEN_MOVE, dtype=np.bool)
move_to_cancel_position = False
move_to_rob_position = False
mine_here = False
i_action = None
i_action_candidates = np.zeros(LEN_MOVE, dtype=np.bool) # ship いなくなってしまった時に相殺対象がいうる方向
for k_action, cell_k0 in enumerate(self.neighbor_cells(self.previous_board.cells[previous_ship.position])):
ground_halite[k_action] = cell_k0.halite
for l_action, cell_l0 in enumerate(self.neighbor_cells(self.previous_board.cells[cell_k0.position])):
if not cell_l0.ship:
continue
if cell_l0.ship.player_id == player_id:
continue
if cell_l0.ship.halite == 0:
can_cancel[k_action] = True
if l_action == 0:
empty_opponent[k_action] = True
else:
can_rob[k_action] = True
ship_l1 = self.board.ships.get(cell_l0.ship.id, None)
if ship_l1 is None:
i_action_candidates[k_action] = True
# 現在board
cell_k1 = self.board.cells[cell_k0.position]
if k_action == 0:
if (not cell_k0.shipyard) and (cell_k1.shipyard):
i_action = I_CONVERT
elif cell_k0.ship and cell_k0.ship.player_id != player_id and cell_k0.ship.halite == 0:
empty_opponent[k_action] = True
if cell_k1.ship and cell_k1.ship.id == ship_id:
i_action = k_action
if i_action == I_CONVERT:
continue
if ship is None:
if not any(i_action_candidates):
continue # mergeとかshipyard_attack(含spawn相殺)と思われる
# 複数候補あり正確なところがわからないので、保守的な戦略をとっている
# (なにかメリットあるところへ相殺しにいった) と仮定する
move_to_cancel_position = True
move_to_rob_position = any(can_rob)
else:
i_action_candidates[:] = False
i_action_candidates[i_action] = True
move_to_cancel_position = can_cancel[i_action]
move_to_rob_position = can_rob[i_action]
for k_action in range(LEN_MOVE):
# cancel しうる場所にしか興味ない
if not can_cancel[k_action]:
can_rob[k_action] = False
ground_halite[k_action] = 0.0
if any(can_cancel):
self.opponent_history[player_id]['cancel_any'][1] += 1
if move_to_cancel_position:
# self.log(id_=ship_id, s=f'can_cancel{can_cancel} a{i_action}')
self.opponent_history[player_id]['cancel_any'][0] += 1
if any(can_rob):
self.opponent_history[player_id]['cancel_with_rob_chance'][1] += 1
if move_to_cancel_position:
# self.log(id_=ship_id, s=f'can_rob{can_rob} a{i_action}')
self.opponent_history[player_id]['cancel_with_rob_chance'][0] += 1
if 1e-6 < ground_halite[I_MINE]:
self.opponent_history[player_id]['cancel_to_mine_here'][1] += 1
if i_action_candidates[I_MINE]:
self.opponent_history[player_id]['cancel_to_mine_here'][0] += 1
ground_halite[I_MINE] = 0.0 # あとは移動だけなので用済み
# 分母増加の halite threshold は保守的にしておく
has_ground_halite = 200.0 < | np.array(ground_halite, dtype=np.float32) | numpy.array |
# Copyright 2020-present, Netherlands Institute for Sound and Vision (<NAME>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import cv2
import matplotlib.pyplot as plt
import numpy as np
def mask_clusters(arr, delta_angle):
"""
Createas a boolean array where clusters are interuptted with an if statement
Parameters:
------------------------
arr: np array
contains sorted (angle) values, shape [#num_lines, 1]
delta_angle: int
what is the furthest angle to yield a match? Note that the matches are first filtered on angle and only then on length
Returns:
bool_arr: pandas DataFrame
a boolean array with the same shape as arr
"""
arr = list(arr) # get just the numbers of the array in the list
bool_arr = []
start_cluster_val = arr[0] # first element starts the cluster
for cluster_element in arr:
if abs(cluster_element - start_cluster_val) <= delta_angle:
bool_arr.append(True)
else:
bool_arr.append(False)
start_cluster_val = cluster_element
bol_arr = np.array(bool_arr).reshape(-1,1)
return bol_arr
def apply_hough(img, threshold=100, minLineLength=150, maxLineGap=30):
"""
Applies the probabilistic Hough transform on the given img with given parameters.
Parameters have to be adjusted for the resolution of the image and charactertistics of the input image.
The high-res images are recommened to be scaled down to no more than 1500 pixels on the longest dimension as the transform doesn not perform well for higher res images
Example parameters that proved to be working in different settings:
params_low_res_hough_prob = {"threshold": 100, "minLineLength":150, "maxLineGap":30} # use in case large images (more than 1000 pixels in width/heigh) # for images around 500 pixels width/lentgh
params_high_res_hough_prob = {"threshold": 200, "minLineLength":150, "maxLineGap":25} # use in case large images (more than 1000 pixels in width/heigh) # for images around 1000 pixels width/length
Parameters:
----------------------------
img: np.array
img in the form of a numpy array
threshold: int
The minimum number of intersections in hough space to "detect" a line. higher -> less lines
minLineLength: int
the minimum number of points that can form a line. Lines with less than this number of points are disregarded. higher -> less lines
maxLineGap: int
The maximum gap between two points to be considered in the same line. higher -> more lines
Returns:
hough_lines: np.array
A 3 dimensional array of format [num_lines, 1, 4]. The last dimension stroes x_start, y_start , x_end, y_end of a line.
"""
rho_resolution = 1 # the discretization parameter for rho: not recommended to change
theta_resolution = np.pi / 180 # the discretization parameter for theta: not recommended to change
blurred_image = cv2.GaussianBlur(img, (5, 5), 0) # filter out week lines. Bigger filters apply more blure -> less lines
edges_image = cv2.Canny(blurred_image, 50, 120, None, apertureSize=3) # applying Canny on the blurred image.
hough_lines = cv2.HoughLinesP(edges_image, rho_resolution, theta_resolution, threshold, None, minLineLength,
maxLineGap)
return hough_lines
def get_angle(hough_lines, radians=False):
"""
Calculates the angle of each line wrt. to the image plain.
Parameters:
-------------------
hough_lines: np.array
A 3 dimensional array of format [num_lines, 1, 4]. The last dimension stroes x_start, y_start , x_end, y_end of a line.
radians: bool
Whether to use radians. If False, uses degrees (better for numerical stability).
Returns:
-------------------
out: np.array
Array of shape [num_lines, 1] storing the angle of each line. Note that the degrees fall in range [-90,90] degrees.
"""
if radians:
return np.arctan2(hough_lines[:, :, 3] - hough_lines[:, :, 1], hough_lines[:, :, 2] - hough_lines[:, :, 0])
else:
return np.arctan2(hough_lines[:, :, 3] - hough_lines[:, :, 1],
hough_lines[:, :, 2] - hough_lines[:, :, 0]) * 180 / np.pi # better for numerical stability
def plot_lines(img, hough_lines):
"""
Plots the hough_lines on the orginal image and displays it next to the orginal image.
Used in Jupyter Notebook for inspection. Can be modified to save the figure.
Parameters:
-------------------
img: np.array
img in the form of a numpy array
hough_lines: np.array
A 3 dimensional array of format [num_lines, 1, 4]. The last dimension stroes x_start, y_start , x_end, y_end of a line.
Returns: none
"""
original_image_with_hough_lines = img.copy()
cmap = "gray" if img.shape[-1] == 3 else None
for i in range(0, len(hough_lines)):
l = hough_lines[i][0]
cv2.line(original_image_with_hough_lines, (l[0], l[1]), (l[2], l[3]), (0, 0, 255), 3, cv2.LINE_AA)
plt.figure(figsize=(15, 10))
plt.subplot(121), plt.imshow(img)
plt.subplot(122), plt.imshow(original_image_with_hough_lines, cmap=cmap)
class matchingObjects:
"""
Stores frequently accessed information about the images and contains methods useful for line fitting.
Attributes:
--------------------
path: str
if file is read from directory, path stores that directory
img: np.array
stores the image in the RGB fashion
margin: int
defines how much the borders should be clipped. Useful for old images that have black borders resulting in fake line detections
shape: np.array
stores shape of the image
scale: float
the scaling factor compared to the original image
lines: np.array
strores the x_start,y_start, x_end, y_end corrdinates of the line in the image with a given scale and margin (not the orignal image)
angle: np.array
stores the angle of the detcted lines corresponding to the lines at the same index as in the "lines" atrribute
slope: np.array
stores the slope of the detcted lines corresponding to the lines at the same index as in the "lines" atrribute. Not used -> commented out.
length:
stores the length of the detcted lines corresponding to the lines at the same index as in the "lines" atrribute
Methods:
-------------------
hough_lines(self, radians = False, **kwargs):
Applies probabilistic hough transform and calculates the characteristics of found lines
rank_and_pick_lines(self, delta_angle = 1, max_lines = None):
Filters out lines having similiar angles (taking the longest line out of the "similiar ones") to later limit the dimensionality of the database of lines.
"""
def __init__(self, path=None, margin=50, img=None, scale=1):
"""
Parameters:
-----------------
path: str
if file is read from directory, path stores that directory. If supplied, img is expected to be none.
margin: int
defines how much the borders should be clipped. Useful for old images that have black borders resulting in fake line detections.
Remeber to adjust your margin with scale!
scale: float
the scaling factor compared to the original image
img: np.array
stores the image in the RGB fashion. If supplied, path is expected to be none.
"""
self.scale = scale
if img is None: # read in the image from path
self.path = path
if margin > 0: # deals with black borders that result in many fake line detections
self.img = plt.imread(self.path)[margin:-margin, margin:-margin]
else:
self.img = plt.imread(self.path)
elif path is None: # np array image is provided
self.path = None
if margin > 0: # deals with black borders that result in many fake line detections
self.img = img[margin:-margin, margin:-margin]
else:
self.img = img
self.shape = self.img.shape
if scale != 1:
self.img = cv2.resize(self.img, (int(self.shape[0] * self.scale), int(self.shape[1] * self.scale)))
self.shape = self.img.shape
def hough_lines(self, radians=False, **kwargs):
"""
Applies probabilistic hough transform to find lines. Additionally, for each line, it determines the angle, slope and length.
Parameters:
-----------------
radians: bool
Determines whether to use radians. False is preffered due to possible numerical underflow problems later.
**kwargs: dict, optional
Additional arguments specyfing the parameters of the hough transform. Useful as the default has been optimized for archival, medium resolution photos.
"""
self.lines = apply_hough(self.img, **kwargs)
if self.lines is not None: # if hough found something
self.angle = get_angle(self.lines, radians= False) # radians more stable
x_diff = self.lines[:, :, 2] - self.lines[:, :, 0] # if 0 then slope will be -inf -> vertical line
y_diff = self.lines[:, :, 3] - self.lines[:, :, 1]
# self.slope = y_diff/x_diff # can be calculated if needed.
self.length = np.sqrt(x_diff ** 2 + y_diff ** 2)
def rank_and_pick_lines(self, delta_angle = 1, max_lines = None):
"""
Filters out lines having similiar angles (taking the longest line out of the "similiar ones") to later limit the dimensionality of the database of lines.
Parameters:
-----------------
delta_angle: float
defines how close the angles have to be considered 'similiar' in terms of the angle
max_lines:int
specifiecs how many lines should be kept after filtering. The longest max_lines number of lines are kept.
"""
initial_max = np.max(self.length)
if self.lines is not None:
lst0 = self.lines
order = np.arange(0, len(lst0)).reshape(-1,1)
lst1 = self.angle
lst2 = self.length
merged = np.concatenate([lst1, lst2, order], axis = 1)
new_order = np.lexsort((lst2, lst1), axis = 0) # sorts first by angle then by length
merged_new = merged[new_order] #
grouping_mask = mask_clusters(merged[new_order][:,:,0], delta_angle)
accum = [] #stores the longest line from found clusters
temp = [] # empty list for booking within clusters of similiar lines
#print(grouping_mask)
for i in range(len(grouping_mask)):
if grouping_mask[i] == True:
temp.append(merged_new[i,:,:])
else:
accum.append(np.array(temp)[np.argmax( | np.array(temp) | numpy.array |
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""fMRI Simulator
Simulate fMRI data for a single subject.
This code provides a set of functions necessary to produce realistic
simulations of fMRI data. There are two main steps: characterizing the
signal and generating the noise model, which are then combined to simulate
brain data. Tools are included to support the creation of different types
of signal, such as region specific differences in univariate
activity. To create the noise model the parameters can either be set
manually or can be estimated from real fMRI data with reasonable accuracy (
works best when fMRI data has not been preprocessed)
Functions:
generate_signal
Create a volume with activity, of a specified shape and either multivariate
or univariate pattern, in a specific region to represent the signal in the
neural data.
generate_stimfunction
Create a timecourse of the signal activation. This can be specified using event
onsets and durations from a timing file. This is the time course before
convolution and therefore can be at any temporal precision.
export_3_column:
Generate a three column timing file that can be used with software like FSL
to represent event event onsets and duration
export_epoch_file:
Generate an epoch file from the time course which can be used as an input to
brainiak functions
convolve_hrf
Convolve the signal timecourse with the HRF to model the expected evoked
activity
apply_signal
Combine the signal volume with the HRF, thus giving the signal the temporal
properties of the HRF (such as smoothing and lag)
calc_noise
Estimate the noise properties of a given fMRI volume. Prominently, estimate
the smoothing and SFNR of the data
generate_noise
Create the noise for this run. This creates temporal, spatial task and white
noise. Various parameters can be tuned depending on need
mask_brain
Create a mask volume that has similar contrast as an fMRI image. Defaults to
use an MNI grey matter atlas but any image can be supplied to create an
estimate.
plot_brain
Display the brain, timepoint by timepoint, with above threshold voxels
highlighted against the outline of the brain.
Authors:
<NAME> (Princeton) 2016-2017
<NAME> (Princeton) 2016-2017
"""
import logging
from itertools import product
import nitime.algorithms.autoregressive as ar
import math
import numpy as np
from pkg_resources import resource_stream
from scipy import stats
from scipy import signal
import scipy.ndimage as ndimage
__all__ = [
"generate_signal",
"generate_stimfunction",
"export_3_column",
"export_epoch_file",
"convolve_hrf",
"apply_signal",
"calc_noise",
"generate_noise",
"mask_brain",
"plot_brain",
]
logger = logging.getLogger(__name__)
def _generate_feature(feature_type,
feature_size,
signal_magnitude,
thickness=1):
"""Generate features corresponding to signal
Generate a single feature, that can be inserted into the signal volume.
A feature is a region of activation with a specific shape such as cube
or ring
Parameters
----------
feature_type : str
What shape signal is being inserted? Options are 'cube',
'loop' (aka ring), 'cavity' (aka hollow sphere), 'sphere'.
feature_size : int
How big is the signal in diameter?
signal_magnitude : float
Set the signal size, a value of 1 means the signal is one standard
deviation of the noise
thickness : int
How thick is the surface of the loop/cavity
Returns
----------
signal : 3 dimensional array
The volume representing the signal
"""
# If the size is equal to or less than 2 then all features are the same
if feature_size <= 2:
feature_type = 'cube'
# What kind of signal is it?
if feature_type == 'cube':
# Preset the size of the signal
signal = np.ones((feature_size, feature_size, feature_size))
elif feature_type == 'loop':
# First make a cube of zeros
signal = np.zeros((feature_size, feature_size, feature_size))
# Make a mesh grid of the space
seq = np.linspace(0, feature_size - 1,
feature_size)
xx, yy = np.meshgrid(seq, seq)
# Make a disk corresponding to the whole mesh grid
xxmesh = (xx - ((feature_size - 1) / 2)) ** 2
yymesh = (yy - ((feature_size - 1) / 2)) ** 2
disk = xxmesh + yymesh
# What are the limits of the rings being made
outer_lim = disk[int((feature_size - 1) / 2), 0]
inner_lim = disk[int((feature_size - 1) / 2), thickness]
# What is the outer disk
outer = disk <= outer_lim
# What is the inner disk
inner = disk <= inner_lim
# Subtract the two disks to get a loop
loop = outer != inner
# Check if the loop is a disk
if np.all(inner is False):
logger.warning('Loop feature reduces to a disk because the loop '
'is too thick')
# If there is complete overlap then make the signal just the
# outer one
if np.all(loop is False):
loop = outer
# store the loop
signal[0:feature_size, 0:feature_size, int(np.round(feature_size /
2))] = loop
elif feature_type == 'sphere' or feature_type == 'cavity':
# Make a mesh grid of the space
seq = np.linspace(0, feature_size - 1,
feature_size)
xx, yy, zz = np.meshgrid(seq, seq, seq)
# Make a disk corresponding to the whole mesh grid
signal = ((xx - ((feature_size - 1) / 2)) ** 2 +
(yy - ((feature_size - 1) / 2)) ** 2 +
(zz - ((feature_size - 1) / 2)) ** 2)
# What are the limits of the rings being made
outer_lim = signal[int((feature_size - 1) / 2), int((feature_size -
1) / 2), 0]
inner_lim = signal[int((feature_size - 1) / 2), int((feature_size -
1) / 2),
thickness]
# Is the signal a sphere or a cavity?
if feature_type == 'sphere':
signal = signal <= outer_lim
else:
# Get the inner and outer sphere
outer = signal <= outer_lim
inner = signal <= inner_lim
# Subtract the two disks to get a loop
signal = outer != inner
# Check if the cavity is a sphere
if np.all(inner is False):
logger.warning('Cavity feature reduces to a sphere because '
'the cavity is too thick')
# If there is complete overlap then make the signal just the
# outer one
if np.all(signal is False):
signal = outer
# Assign the signal magnitude
signal = signal * signal_magnitude
# Return the signal
return signal
def _insert_idxs(feature_centre, feature_size, dimensions):
"""Returns the indices of where to put the signal into the signal volume
Parameters
----------
feature_centre : list, int
List of coordinates for the centre location of the signal
feature_size : list, int
How big is the signal's diameter.
dimensions : 3 length array, int
What are the dimensions of the volume you wish to create
Returns
----------
x_idxs : tuple
The x coordinates of where the signal is to be inserted
y_idxs : tuple
The y coordinates of where the signal is to be inserted
z_idxs : tuple
The z coordinates of where the signal is to be inserted
"""
# Set up the indexes within which to insert the signal
x_idx = [int(feature_centre[0] - (feature_size / 2)) + 1,
int(feature_centre[0] - (feature_size / 2) +
feature_size) + 1]
y_idx = [int(feature_centre[1] - (feature_size / 2)) + 1,
int(feature_centre[1] - (feature_size / 2) +
feature_size) + 1]
z_idx = [int(feature_centre[2] - (feature_size / 2)) + 1,
int(feature_centre[2] - (feature_size / 2) +
feature_size) + 1]
# Check for out of bounds
# Min Boundary
if 0 > x_idx[0]:
x_idx[0] = 0
if 0 > y_idx[0]:
y_idx[0] = 0
if 0 > z_idx[0]:
z_idx[0] = 0
# Max Boundary
if dimensions[0] < x_idx[1]:
x_idx[1] = dimensions[0]
if dimensions[1] < y_idx[1]:
y_idx[1] = dimensions[1]
if dimensions[2] < z_idx[1]:
z_idx[1] = dimensions[2]
# Return the idxs for data
return x_idx, y_idx, z_idx
def generate_signal(dimensions,
feature_coordinates,
feature_size,
feature_type,
signal_magnitude=[1],
signal_constant=1,
):
"""Generate volume containing signal
Generate signal, of a specific shape in specific regions, for a single
volume. This will then be convolved with the HRF across time
Parameters
----------
dimensions : 1d array, ndarray
What are the dimensions of the volume you wish to create
feature_coordinates : multidimensional array
What are the feature_coordinates of the signal being created.
Be aware of clipping: features far from the centre of the
brain will be clipped. If you wish to have multiple features
then list these as a features x 3 array. To create a feature of
a unique shape then supply all the individual
feature_coordinates of the shape and set the feature_size to 1.
feature_size : list, int
How big is the signal. If feature_coordinates=1 then only one value is
accepted, if feature_coordinates>1 then either one value must be
supplied or m values
feature_type : list, string
What feature_type of signal is being inserted? Options are cube,
loop, cavity, sphere. If feature_coordinates = 1 then
only one value is accepted, if feature_coordinates > 1 then either
one value must be supplied or m values
signal_magnitude : list, float
What is the (average) magnitude of the signal being generated? A
value of 1 means that the signal is one standard deviation from the
noise
signal_constant : list, bool
Is the signal constant across the feature (for univariate activity)
or is it a random pattern of a given magnitude across the feature (for
multivariate activity)
Returns
----------
volume_signal : 3 dimensional array, float
Creates a single volume containing the signal
"""
# Preset the volume
volume_signal = np.zeros(dimensions)
feature_quantity = round(feature_coordinates.shape[0])
# If there is only one feature_size value then make sure to duplicate it
# for all signals
if len(feature_size) == 1:
feature_size = feature_size * feature_quantity
# Do the same for feature_type
if len(feature_type) == 1:
feature_type = feature_type * feature_quantity
if len(signal_magnitude) == 1:
signal_magnitude = signal_magnitude * feature_quantity
# Iterate through the signals and insert in the data
for signal_counter in range(feature_quantity):
# What is the centre of this signal
if len(feature_size) > 1:
feature_centre = np.asarray(feature_coordinates[signal_counter, ])
else:
feature_centre = np.asarray(feature_coordinates)[0]
# Generate the feature to be inserted in the volume
signal = _generate_feature(feature_type[signal_counter],
feature_size[signal_counter],
signal_magnitude[signal_counter],
)
# If the signal is a random noise pattern then multiply these ones by
# a noise mask
if signal_constant == 0:
signal = signal * np.random.random([feature_size[signal_counter],
feature_size[signal_counter],
feature_size[signal_counter]])
# Pull out the idxs for where to insert the data
x_idx, y_idx, z_idx = _insert_idxs(feature_centre,
feature_size[signal_counter],
dimensions)
# Insert the signal into the Volume
volume_signal[x_idx[0]:x_idx[1], y_idx[0]:y_idx[1], z_idx[0]:z_idx[
1]] = signal
return volume_signal
def generate_stimfunction(onsets,
event_durations,
total_time,
weights=[1],
timing_file=None,
temporal_resolution=100.0,
):
"""Return the function for the timecourse events
When do stimuli onset, how long for and to what extent should you
resolve the fMRI time course. There are two ways to create this, either
by supplying onset, duration and weight information or by supplying a
timing file (in the three column format used by FSL).
Parameters
----------
onsets : list, int
What are the timestamps (in s) for when an event you want to
generate onsets?
event_durations : list, int
What are the durations (in s) of the events you want to
generate? If there is only one value then this will be assigned
to all onsets
total_time : int
How long (in s) is the experiment in total.
weights : list, float
What is the weight for each event (how high is the box car)? If
there is only one value then this will be assigned to all onsets
timing_file : string
The filename (with path) to a three column timing file (FSL) to
make the events. Still requires total_time to work
temporal_resolution : float
How many elements per second are you modeling for the
timecourse. This is useful when you want to model the HRF at an
arbitrarily high resolution (and then downsample to your TR later).
Returns
----------
stim_function : 1 by timepoint array, float
The time course of stimulus evoked activation. This has a temporal
resolution of temporal resolution / 1.0 elements per second
"""
# If the timing file is supplied then use this to acquire the
if timing_file is not None:
# Read in text file line by line
with open(timing_file) as f:
text = f.readlines() # Pull out file as a an array
# Preset
onsets = list()
event_durations = list()
weights = list()
# Pull out the onsets, weights and durations, set as a float
for line in text:
onset, duration, weight = line.strip().split()
# Check if the onset is more precise than the temporal resolution
upsampled_onset = float(onset) * temporal_resolution
# Because of float precision, the upsampled values might
# not round as expected .
# E.g. float('1.001') * 1000 = 1000.99
if np.allclose(upsampled_onset, np.round(upsampled_onset)) == 0:
warning = 'Your onset: ' + str(onset) + ' has more decimal ' \
'points than the ' \
'specified temporal ' \
'resolution can ' \
'resolve. This means' \
' that events might' \
' be missed. ' \
'Consider increasing' \
' the temporal ' \
'resolution.'
logging.warning(warning)
onsets.append(float(onset))
event_durations.append(float(duration))
weights.append(float(weight))
# If only one duration is supplied then duplicate it for the length of
# the onset variable
if len(event_durations) == 1:
event_durations = event_durations * len(onsets)
if len(weights) == 1:
weights = weights * len(onsets)
# Check files
if np.max(onsets) > total_time:
raise ValueError('Onsets outside of range of total time.')
# Generate the time course as empty, each element is a millisecond by
# default
stimfunction = np.zeros((int(round(total_time * temporal_resolution)), 1))
# Cycle through the onsets
for onset_counter in list(range(len(onsets))):
# Adjust for the resolution
onset_idx = int(np.floor(onsets[onset_counter] * temporal_resolution))
# Adjust for the resolution
offset_idx = int(np.floor((onsets[onset_counter] + event_durations[
onset_counter]) * temporal_resolution))
# Store the weights
stimfunction[onset_idx:offset_idx, 0] = [weights[onset_counter]]
# Shorten the data if it's too long
if stimfunction.shape[0] > total_time * temporal_resolution:
stimfunction = stimfunction[0:int(total_time * temporal_resolution), 0]
return stimfunction
def export_3_column(stimfunction,
filename,
temporal_resolution=100.0
):
""" Output a tab separated three column timing file
This produces a three column tab separated text file, with the three
columns representing onset time (s), event duration (s) and weight,
respectively. Useful if you want to run the simulated data through FEAT
analyses. In a way, this is the reverse of generate_stimfunction
Parameters
----------
stimfunction : timepoint by 1 array
The stimulus function describing the time course of events. For
instance output from generate_stimfunction.
filename : str
The name of the three column text file to be output
temporal_resolution : float
How many elements per second are you modeling with the
stimfunction?
"""
# Iterate through the stim function
stim_counter = 0
event_counter = 0
while stim_counter < stimfunction.shape[0]:
# Is it an event?
if stimfunction[stim_counter, 0] != 0:
# When did the event start?
event_onset = str(stim_counter / temporal_resolution)
# The weight of the stimulus
weight = str(stimfunction[stim_counter, 0])
# Reset
event_duration = 0
# Is the event still ongoing?
while stimfunction[stim_counter, 0] != 0 & stim_counter <= \
stimfunction.shape[0]:
# Add one millisecond to each duration
event_duration = event_duration + 1
# Increment
stim_counter = stim_counter + 1
# How long was the event in seconds
event_duration = str(event_duration / temporal_resolution)
# Append this row to the data file
with open(filename, "a") as file:
file.write(event_onset + '\t' + event_duration + '\t' +
weight + '\n')
# Increment the number of events
event_counter = event_counter + 1
# Increment
stim_counter = stim_counter + 1
def export_epoch_file(stimfunction,
filename,
tr_duration,
temporal_resolution=100.0
):
""" Output an epoch file, necessary for some inputs into brainiak
This takes in the time course of stimulus events and outputs the epoch
file used in Brainiak. The epoch file is a way to structure the timing
information in fMRI that allows you to flexibly input different stimulus
sequences. This is a list with each entry a 3d matrix corresponding to a
participant. The dimensions of the 3d matrix are condition by epoch by time
Parameters
----------
stimfunction : list of timepoint by condition arrays
The stimulus function describing the time course of events. Each
list entry is from a different participant, each row is a different
timepoint (with the given temporal precision), each column is a
different condition. export_epoch_file is looking for differences in
the value of stimfunction to identify the start and end of an
epoch. If epochs in stimfunction are coded with the same weight and
there is no time between blocks then export_epoch_file won't be able to
label them as different epochs
filename : str
The name of the three column text file to be output
tr_duration : float
How long is each TR in seconds
temporal_resolution : float
How many elements per second are you modeling with the
stimfunction?
"""
# Cycle through the participants, different entries in the list
epoch_file = [0] * len(stimfunction)
for participant_counter in range(len(stimfunction)):
# What is the time course for the participant (binarized)
stimfunction_ppt = np.abs(stimfunction[participant_counter]) > 0
# Cycle through conditions
conditions = stimfunction_ppt.shape[1]
for condition_counter in range(conditions):
# Down sample the stim function
stride = tr_duration * temporal_resolution
stimfunction_temp = stimfunction_ppt[:, condition_counter]
stimfunction_temp = stimfunction_temp[::int(stride)]
if condition_counter == 0:
# Calculates the number of event onsets (max of all
# conditions). This uses changes in value to reflect
# different epochs. This might be false in some cases (the
# weight is supposed to unfold over an epoch or there is no
# break between identically weighted epochs). In such cases
# this will not work
weight_change = (np.diff(stimfunction_temp, 1, 0) != 0)
epochs = int(np.max(np.sum(weight_change, 0)) / 2)
# Get other information
trs = stimfunction_temp.shape[0]
# Make a timing file for this participant
epoch_file[participant_counter] = np.zeros((conditions,
epochs, trs))
epoch_counter = 0
tr_counter = 0
while tr_counter < stimfunction_temp.shape[0]:
# Is it an event?
if stimfunction_temp[tr_counter] == 1:
# Add a one for this TR
epoch_file[participant_counter][condition_counter,
epoch_counter,
tr_counter] = 1
# Find the next non event value
end_idx = np.where(stimfunction_temp[tr_counter:] == 0)[
0][0]
tr_idxs = list(range(tr_counter, tr_counter + end_idx))
# Add ones to all the trs within this event time frame
epoch_file[participant_counter][condition_counter,
epoch_counter,
tr_idxs] = 1
# Start from this index
tr_counter += end_idx
# Increment
epoch_counter += 1
# Increment the counter
tr_counter += 1
# Save the file
np.save(filename, epoch_file)
def _double_gamma_hrf(response_delay=6,
undershoot_delay=12,
response_dispersion=0.9,
undershoot_dispersion=0.9,
response_scale=1,
undershoot_scale=0.035,
temporal_resolution=100.0,
):
"""Create the double gamma HRF with the timecourse evoked activity.
Default values are based on Glover, 1999 and Walvaert, Durnez,
Moerkerke, Verdoolaege and Rosseel, 2011
Parameters
----------
response_delay : float
How many seconds until the peak of the HRF
undershoot_delay : float
How many seconds until the trough of the HRF
response_dispersion : float
How wide is the rising peak dispersion
undershoot_dispersion : float
How wide is the undershoot dispersion
response_scale : float
How big is the response relative to the peak
undershoot_scale :float
How big is the undershoot relative to the trough
scale_function : bool
Do you want to scale the function to a range of 1
temporal_resolution : float
How many elements per second are you modeling for the stimfunction
Returns
----------
hrf : multi dimensional array
A double gamma HRF to be used for convolution.
"""
hrf_length = 30 # How long is the HRF being created
# How many seconds of the HRF will you model?
hrf = [0] * int(hrf_length * temporal_resolution)
# When is the peak of the two aspects of the HRF
response_peak = response_delay * response_dispersion
undershoot_peak = undershoot_delay * undershoot_dispersion
for hrf_counter in list(range(len(hrf) - 1)):
# Specify the elements of the HRF for both the response and undershoot
resp_pow = math.pow((hrf_counter / temporal_resolution) /
response_peak, response_delay)
resp_exp = math.exp(-((hrf_counter / temporal_resolution) -
response_peak) /
response_dispersion)
response_model = response_scale * resp_pow * resp_exp
undershoot_pow = math.pow((hrf_counter / temporal_resolution) /
undershoot_peak,
undershoot_delay)
undershoot_exp = math.exp(-((hrf_counter / temporal_resolution) -
undershoot_peak /
undershoot_dispersion))
undershoot_model = undershoot_scale * undershoot_pow * undershoot_exp
# For this time point find the value of the HRF
hrf[hrf_counter] = response_model - undershoot_model
return hrf
def convolve_hrf(stimfunction,
tr_duration,
hrf_type='double_gamma',
scale_function=True,
temporal_resolution=100.0,
):
""" Convolve the specified hrf with the timecourse.
The output of this is a downsampled convolution of the stimfunction and
the HRF function. If temporal_resolution is 1 / tr_duration then the
output will be the same length as stimfunction. This time course assumes
that slice time correction has occurred and all slices have been aligned
to the middle time point in the TR.
Be aware that if scaling is on and event durations are less than the
duration of a TR then the hrf may or may not come out as anticipated.
This is because very short events would evoke a small absolute response
after convolution but if there are only short events and you scale then
this will look similar to a convolution with longer events. In general
scaling is useful, which is why it is the default, but be aware of this
edge case and if it is a concern, set the scale_function to false.
Parameters
----------
stimfunction : timepoint by timecourse array
What is the time course of events to be modelled in this
experiment. This can specify one or more timecourses of events.
The events can be weighted or binary
tr_duration : float
How long (in s) between each volume onset
hrf_type : str or list
Takes in a string describing the hrf that ought to be created.
Can instead take in a vector describing the HRF as it was
specified by any function. The default is 'double_gamma' in which
an initial rise and an undershoot are modelled.
scale_function : bool
Do you want to scale the function to a range of 1
temporal_resolution : float
How many elements per second are you modeling for the stimfunction
Returns
----------
signal_function : timepoint by timecourse array
The time course of the HRF convolved with the stimulus function.
This can have multiple time courses specified as different
columns in this array.
"""
# How will stimfunction be resized
stride = int(temporal_resolution * tr_duration)
duration = int(stimfunction.shape[0] / stride)
# Generate the hrf to use in the convolution
if hrf_type == 'double_gamma':
hrf = _double_gamma_hrf(temporal_resolution=temporal_resolution)
elif isinstance(hrf_type, list):
hrf = hrf_type
# How many timecourses are there
list_num = stimfunction.shape[1]
# Create signal functions for each list in the stimfunction
for list_counter in range(list_num):
# Perform the convolution
signal_temp = np.convolve(stimfunction[:, list_counter], hrf)
# Down sample the signal function so that it only has one element per
# TR. This assumes that all slices are collected at the same time,
# which is often the result of slice time correction. In other
# words, the output assumes slice time correction
signal_temp = signal_temp[:duration * stride]
signal_vox = signal_temp[int(stride / 2)::stride]
# Scale the function so that the peak response is 1
if scale_function:
signal_vox = signal_vox / np.max(signal_vox)
# Add this function to the stack
if list_counter == 0:
signal_function = np.zeros((len(signal_vox), list_num))
signal_function[:, list_counter] = signal_vox
return signal_function
def apply_signal(signal_function,
volume_signal,
):
"""Combine the signal volume with its timecourse
Apply the convolution of the HRF and stimulus time course to the
volume.
Parameters
----------
signal_function : timepoint by timecourse array, float
The timecourse of the signal over time. If there is only one column
then the same timecourse is applied to all non-zero voxels in
volume_signal. If there is more than one column then each column is
paired with a non-zero voxel in the volume_signal (a 3d numpy array
generated in generate_signal).
volume_signal : multi dimensional array, float
The volume containing the signal to be convolved with the same
dimensions as the output volume. The elements in volume_signal
indicate how strong each signal in signal_function are modulated by
in the output volume
Returns
----------
signal : multidimensional array, float
The convolved signal volume with the same 3d as volume signal and
the same 4th dimension as signal_function
"""
# How many timecourses are there within the signal_function
timepoints = signal_function.shape[0]
timecourses = signal_function.shape[1]
# Preset volume
signal = np.zeros([volume_signal.shape[0], volume_signal.shape[
1], volume_signal.shape[2], timepoints])
# Find all the non-zero voxels in the brain
idxs = np.where(volume_signal != 0)
if timecourses == 1:
# If there is only one time course supplied then duplicate it for
# every voxel
signal_function = np.matlib.repmat(signal_function, 1, len(idxs[0]))
elif len(idxs[0]) != timecourses:
raise IndexError('The number of non-zero voxels in the volume and '
'the number of timecourses does not match. Aborting')
# For each coordinate with a non zero voxel, fill in the timecourse for
# that voxel
for idx_counter in range(len(idxs[0])):
x = idxs[0][idx_counter]
y = idxs[1][idx_counter]
z = idxs[2][idx_counter]
# Pull out the function for this voxel
signal_function_temp = signal_function[:, idx_counter]
# Multiply the voxel value by the function timecourse
signal[x, y, z, :] = volume_signal[x, y, z] * signal_function_temp
return signal
def _calc_fwhm(volume,
mask,
voxel_size=[1.0, 1.0, 1.0],
):
""" Calculate the FWHM of a volume
Estimates the FWHM (mm) of a volume's non-masked voxels
Parameters
----------
volume : 3 dimensional array
Functional data to have the FWHM measured.
mask : 3 dimensional array
A binary mask of the brain voxels in volume
voxel_size : length 3 list, float
Millimeters per voxel for x, y and z.
Returns
-------
fwhm : float, list
Returns the FWHM of each TR in mm
"""
# What are the dimensions of the volume
dimensions = volume.shape
# Iterate through the TRs, creating a FWHM for each TR
# Preset
v_count = 0
v_sum = 0
v_sq = 0
d_sum = [0.0, 0.0, 0.0]
d_sq = [0.0, 0.0, 0.0]
d_count = [0, 0, 0]
# Pull out all the voxel coordinates
coordinates = list(product(range(dimensions[0]),
range(dimensions[1]),
range(dimensions[2])))
# Find the sum of squared error for the non-masked voxels in the brain
for i in list(range(len(coordinates))):
# Pull out this coordinate
x, y, z = coordinates[i]
# Is this within the mask?
if mask[x, y, z] > 0:
# Find the the volume sum and squared values
v_count += 1
v_sum += np.abs(volume[x, y, z])
v_sq += volume[x, y, z] ** 2
# Get the volume variance
v_var = (v_sq - ((v_sum ** 2) / v_count)) / (v_count - 1)
for i in list(range(len(coordinates))):
# Pull out this coordinate
x, y, z = coordinates[i]
# Is this within the mask?
if mask[x, y, z] > 0:
# For each xyz dimension calculate the squared
# difference of this voxel and the next
in_range = (x < dimensions[0] - 1)
in_mask = in_range and (mask[x + 1, y, z] > 0)
included = in_mask and (~np.isnan(volume[x + 1, y, z]))
if included:
d_sum[0] += volume[x, y, z] - volume[x + 1, y, z]
d_sq[0] += (volume[x, y, z] - volume[x + 1, y, z]) ** 2
d_count[0] += 1
in_range = (y < dimensions[1] - 1)
in_mask = in_range and (mask[x, y + 1, z] > 0)
included = in_mask and (~np.isnan(volume[x, y + 1, z]))
if included:
d_sum[1] += volume[x, y, z] - volume[x, y + 1, z]
d_sq[1] += (volume[x, y, z] - volume[x, y + 1, z]) ** 2
d_count[1] += 1
in_range = (z < dimensions[2] - 1)
in_mask = in_range and (mask[x, y, z + 1] > 0)
included = in_mask and (~np.isnan(volume[x, y, z + 1]))
if included:
d_sum[2] += volume[x, y, z] - volume[x, y, z + 1]
d_sq[2] += (volume[x, y, z] - volume[x, y, z + 1]) ** 2
d_count[2] += 1
# Find the variance
d_var = np.divide((d_sq - np.divide( | np.power(d_sum, 2) | numpy.power |
"""Parse CaffeModel.
Helped by caffe2theano, MarcBS's Caffe2Keras module.
Author: <NAME>
Email : <EMAIL>
"""
from __future__ import print_function
from collections import OrderedDict
import numpy as np
from scipy.io import loadmat
from transcaffe import caffe_pb2, utils
from google.protobuf.text_format import Merge
from keras.models import Model
from transcaffe import layers as L
v1_map = {0: 'NONE', 1: 'ACCURACY', 2: 'BNLL', 3: 'CONCAT', 4: 'CONVOLUTION',
5: 'DATA', 6: 'DROPOUT', 7: 'EUCLIDEANLOSS', 8: 'FLATTEN',
9: 'HDF5DATA', 10: 'HDF5OUTPUT', 11: 'IM2COL', 12: 'IMAGEDATA',
13: 'INFOGAINLOSS', 14: 'INNERPRODUCT', 15: 'LRN',
16: 'MULTINOMIALLOGISTICLOSS', 17: 'POOLING', 18: 'RELU',
19: 'SIGMOID', 20: 'SOFTMAX', 21: 'SOFTMAXWITHLOSS', 22: 'SPLIT',
23: 'TANH', 24: 'WINDOWDATA', 25: 'ELTWISE', 26: 'POWER',
27: 'SIGMOIDCROSSENTROPYLOSS', 28: 'HINGELOSS', 29: 'MEMORYDATA',
30: 'ARGMAX', 31: 'THRESHOLD', 32: 'DUMMY_DATA', 33: 'SLICE',
34: 'MVN', 35: 'ABSVAL', 36: 'SILENCE', 37: 'CONTRASTIVELOSS',
38: 'EXP', 39: 'DECONVOLUTION'}
def load(model_def, model_bin, target_lib="keras"):
"""Load a Caffe model and convert to target library.
Parameters
----------
model_def : string
absolute path of a given .protobuf text
model_bin : string
absolute path of a given .caffemodel binary
target_lib : string
target library, currently only Keras is supported.
In planning: Lasagne, TensorFlow
Returns
-------
model : keras.models.model
a loaded model.
"""
print ("[MESSAGE] Target model is loading...")
net_param = parse_protobuf(model_def)
layers, version = get_layers(net_param)
input_dim = get_input_size(net_param)
model = get_model(layers, 1, tuple(input_dim[1:]), net_param.name)
print ("[MESSAGE] Printing converted model...")
model.summary()
print ("[MESSAGE] The model is built.")
print ("[MESSAGE] Parsing network parameters...")
param_layers, _ = parse_caffemodel(model_bin)
net_weights = get_network_weights(param_layers, version)
print ("[MESSAGE] Loading parameters into network...")
build_model(model, net_weights)
print ("[MESSAGE] The model is loaded successfully.")
return model
def parse_caffemodel(filename):
"""Parse a given caffemodel.
Parameters
----------
filename : string
absolute path of a given .caffemodel
Returns
-------
layers : list
The list representation of the network
version : string
pretrined network version
"""
utils.file_checker(filename)
net_param = caffe_pb2.NetParameter()
f = open(filename, mode="rb")
contents = f.read()
f.close()
net_param.ParseFromString(contents)
return get_layers(net_param)
def parse_mean_file(filename, mode="proto"):
"""Parse a mean file by given path.
TODO: complete more options based on different Caffe Models
Parameters
----------
filename : string
absolute path of the mean file
mode : string
"proto" for .binaryproto file
"mat" for MAT binary file
Returns
-------
mean_mat : numpy.ndarray
an array that contains the mean values
"""
utils.file_checker(filename)
if mode == "proto":
tp = caffe_pb2.TransformationParameter()
f = open(filename, mode="rb")
mean_contents = f.read()
f.close()
tp.ParseFromString(mean_contents)
mean_mat = np.array(tp.mean_value).reshape((3,
tp.crop_size,
tp.crop_size))
mean_mat = np.transpose(mean_mat, (1, 2, 0))
elif mode == "mat":
# based on VGG's Mat file.
mean_contents = loadmat(filename)
mean_mat = mean_contents["image_mean"]
print(mean_mat.shape)
return mean_mat
def parse_protobuf(filename):
"""Parse a given protobuf file.
Parameters
----------
filename : string
absolute path of .prototxt file
Returns
-------
net_param : caffe_pb2.NetParameter
The parsed .prototxt structure.
"""
utils.file_checker(filename)
f = open(filename, mode="rb")
net_param = caffe_pb2.NetParameter()
net_def = f.read()
# append quotes around type information if needed.
# it seems not working because has newer definititon?
# net_def = f.read().split("\n")
# for i, line in enumerate(net_def):
# l = line.strip().replace(" ", "").split('#')[0]
# if len(l) > 6 and l[:5] == 'type:' and l[5] != "\'" and l[5] != '\"':
# type_ = l[5:]
# net_def[i] = ' type: "' + type_ + '"'
#
# net_def = '\n'.join(net_def)
# Check before Merge? For V1?
Merge(net_def, net_param)
f.close()
return net_param
def get_layers(net_param):
"""Get layers information.
Parameters
----------
net_param : caffe_pb2.NetParameter
A pretrined network description.
Returns
-------
layers : list
description of the layers.
version : string
version information of the pretrained model.
"""
if len(net_param.layers) > 0:
return net_param.layers[:], "V1"
elif len(net_param.layer) > 0:
return net_param.layer[:], "V2"
else:
raise Exception("Couldn't find layers!")
def get_layer_type(layer):
"""Get a given layer type.
Parameters
----------
layer : caffe_pb2.V1LayerParameter
a given layer in the network
Returns
-------
type : int or string
type of the layer.
"""
if type(layer.type) == int:
return str(v1_map[layer.type]).lower()
else:
return str(layer.type).lower()
def get_input_size(net_param):
"""Get input parameters, or guess one at least.
Parameters
----------
net_param : caffe_pb2.NetParameter
structure that contains all the network parameters
Returns
-------
in_size : tuple
tuple that defines the input size
"""
if len(net_param.input_dim) != 0:
return net_param.input_dim
elif len(net_param.input_shape) != 0:
return net_param.input_shape
else:
print("[MESSAGE] Couldn't find Input shape in the Network Parameters."
"The returned shape is inferenced from the network name")
# try:
# scale = layer.transform_param.scale
# scale = 1 if scale <= 0 else scale
# except AttributeError:
# pass
return []
def check_phase(layer, phase):
"""Check if the layer matches with the target phase.
Parameters
----------
layer : caffe_pb2.V1LayerParameter
A given layer.
phase : int
0 : train
1 : test
"""
try:
return True if layer.include[0].phase == phase else False
except IndexError:
return True
def get_network(layers, phase):
"""Get structure of the network.
Parameters
----------
layers : list
list of layers parsed from network parameters
phase : int
0 : train
1 : test
"""
num_layers = len(layers)
network = OrderedDict()
for i in xrange(num_layers):
layer = layers[i]
if check_phase(layer, phase):
layer_id = "trans_layer_"+str(i)
if layer_id not in network:
network[layer_id] = []
prev_blobs = map(str, layer.bottom)
next_blobs = map(str, layer.top)
for blob in prev_blobs+next_blobs:
if blob not in network:
network[blob] = []
for blob in prev_blobs:
network[blob].append(layer_id)
network[layer_id].extend(next_blobs)
network = remove_loops(network)
network = remove_blobs(network)
return network
def remove_loops(network):
"""Remove potential loops from the network.
Parameters
----------
network : OrderedDict
given network dictionary
new_network : OrderedDict
a loops free altered network.
"""
for e in network:
if e.startswith("trans_layer_"):
continue
idx = 0
while idx < len(network[e]):
next_e = network[e][idx]
if e in network[next_e]:
new_e = e+"_"+str(idx)
network[e].remove(next_e)
network[new_e] = network[e]
network[e] = [next_e]
network[next_e] = [new_e]
for n in network[new_e]:
if network[n] == [e]:
network[n] = [new_e]
e = new_e
idx = 0
else:
idx += 1
return network
def remove_blobs(network):
"""Remove blobs from network.
Parameters
----------
network : OrderedDict
given network dictionary
Returns
-------
new_network : OrderedDict
blobs removed network dictionary
"""
new_network = OrderedDict()
def get_idx(x): return int(x[12:])
for e in network:
if e.startswith("trans_layer_"):
idx = get_idx(e)
if idx not in new_network:
new_network[idx] = []
for next_e in network[e]:
next_es = map(get_idx, network[next_e])
new_network[idx].extend(next_es)
return new_network
def reverse_net(network):
"""Reverse a network.
Parameters
----------
network : OrderedDict
A parsed network
Returns
-------
rev : OrderedDict
reversed network
"""
rev = OrderedDict()
for node in network.keys():
rev[node] = []
for node in network.keys():
for n in network[node]:
rev[n].append(node)
return rev
def get_input_layers(network):
"""Get input layers (layers with zero in-order).
Parameters
----------
network : OrderedDict
A parsed network
Returns
-------
in_layers : list
a list of input layers
"""
return get_output_layers(reverse_net(network))
def get_output_layers(network):
"""Get output layers (layers with zero out-order).
Parameters
----------
network : OrderedDict
A parsed network
Returns
-------
out_layers : list
a list of out layers
"""
out_layers = []
for idx in network:
if network[idx] == []:
out_layers.append(idx)
return out_layers
def get_model(layers, phase, input_dim, model_name, lib_type="keras"):
"""Get a model by given network parameters.
Parameters
----------
layers : list
network structure by given parsed network.
phase : int
0 : train
1 : test
input_dim : list
the input dimension
model_name : string
the name of the given model.
lib_type : string
currently only Keras is supported.
"""
network = get_network(layers, phase)
if len(network) == 0:
raise Exception("No valid network is parsed!")
in_layers = get_input_layers(network)
out_layers = get_output_layers(network)
rev_network = reverse_net(network)
def data_layer(x): get_layer_type(x) in ['data', 'imagedata', 'memorydata',
'hdf5data', 'windowdata']
# remove the link from input to output.
for in_idx in in_layers:
for out_idx in out_layers:
if out_idx in network[in_idx] and data_layer(layers[in_idx]):
network[in_idx].remove[out_idx]
net = [None]*(max(network)+1)
for layer_id in network:
layer = layers[layer_id]
layer_name = layer.name
layer_type = get_layer_type(layer)
if layer_id in in_layers:
net[layer_id] = L.input_layer(input_dim, layer_name)
else:
layer_in = [None]*(len(rev_network[layer_id]))
for l in xrange(len(rev_network[layer_id])):
layer_in[l] = net[rev_network[layer_id][l]]
if layer_type in ["relu", "sigmoid", "softmax", "softmaxwithloss",
"split", "tanh"]:
net[layer_id] = L.activation(act_type=layer_type,
name=layer_name)(layer_in)
elif layer_type == "batchnorm":
epsilon = layer.batchnorm_param.eps
axis = layer.scale_param.axis
net[layer_id] = L.batch_norm(epsilon=epsilon, axis=axis,
name=layer_name)(layer_in)
elif layer_type == "lrn":
alpha = layer.lrn_param.alpha
k = layer.lrn_param.k
beta = layer.lrn_param.beta
n = layer.lrn_param.local_size
net[layer_id] = L.lrn(alpha, k, beta, n, layer_name)(layer_in)
elif layer_type == "scale":
axis = layer.scale_param.axis
net[layer_id] = L.scale(axis, layer_name)(layer_in)
elif layer_type == "dropout":
prob = layer.dropout_param.dropout_ratio
net[layer_id] = L.dropout(prob, name=layer_name)(layer_in)
elif layer_type == "flatten":
net[layer_id] = L.flatten(name=layer_name)(layer_in)
elif layer_type == "concat":
axis = layer.concat_param.axis
net[layer_id] = L.merge(layer_in, mode='concat',
concat_axis=1, name=layer_name)
elif layer_type == "eltwise":
axis = layer.scale_param.axis
op = layer.eltwise_param.operation
if op == 0:
mode = "mul"
elif op == 1:
mode = "sum"
elif op == 2:
mode == "max"
else:
raise NotImplementedError("Operation is not implemented!")
net[layer_id] = L.merge(layer_in, mode=mode, concat_axis=axis,
name=layer_name)
elif layer_type == "innerproduct":
output_dim = layer.inner_product_param.num_output
if len(layer_in[0]._keras_shape[1:]) > 1:
layer_in = L.flatten(name=layer_name+"_flatten")(layer_in)
net[layer_id] = L.dense(output_dim, name=layer_name)(layer_in)
elif layer_type == "convolution":
has_bias = layer.convolution_param.bias_term
nb_filter = layer.convolution_param.num_output
nb_col = (layer.convolution_param.kernel_size or
[layer.convolution_param.kernel_h])[0]
nb_row = (layer.convolution_param.kernel_size or
[layer.convolution_param.kernel_w])[0]
stride_h = (layer.convolution_param.stride or
[layer.convolution_param.stride_h])[0] or 1
stride_w = (layer.convolution_param.stride or
[layer.convolution_param.stride_w])[0] or 1
pad_h = (layer.convolution_param.pad or
[layer.convolution_param.pad_h])[0]
pad_w = (layer.convolution_param.pad or
[layer.convolution_param.pad_w])[0]
if pad_h + pad_w > 0:
layer_in = L.zeropadding(padding=(pad_h, pad_w),
name=layer_name)(layer_in)
net[layer_id] = L.convolution(nb_filter, nb_row, nb_col,
bias=has_bias,
subsample=(stride_h, stride_w),
name=layer_name)(layer_in)
elif layer_type == "pooling":
kernel_h = layer.pooling_param.kernel_size or \
layer.pooling_param.kernel_h
kernel_w = layer.pooling_param.kernel_size or \
layer.pooling_param.kernel_w
stride_h = layer.pooling_param.stride or \
layer.pooling_param.stride_h or 1
stride_w = layer.pooling_param.stride or \
layer.pooling_param.stride_w or 1
pad_h = layer.pooling_param.pad or layer.pooling_param.pad_h
pad_w = layer.pooling_param.pad or layer.pooling_param.pad_w
if pad_h + pad_w > 0:
layer_in = L.zeropadding(padding=(pad_h, pad_w),
name=layer_name)(layer_in)
net[layer_id] = L.pooling(pool_size=(kernel_h, kernel_w),
strides=(stride_h, stride_w),
pool_type=layer.pooling_param.pool,
name=layer_name)(layer_in)
in_l = [None]*(len(in_layers))
out_l = [None]*(len(out_layers))
for i in xrange(len(in_layers)):
in_l[i] = net[in_layers[i]]
for i in xrange(len(out_layers)):
out_l[i] = net[out_layers[i]]
return Model(input=in_l, output=out_l, name=model_name)
def get_network_weights(layers, version):
"""Parse network weights.
Parameters
----------
layers : list
List of parameter layers from caffemodel
version : "string"
"V1" or "V2"
Return
------
net_weights : OrderedDict
network's weights
"""
net_weights = OrderedDict()
for layer in layers:
layer_type = get_layer_type(layer)
if layer_type == "innerproduct":
blobs = layer.blobs
if (version == "V1"):
num_filters = blobs[0].num
num_channels = blobs[0].channels
num_col = blobs[0].height
num_row = blobs[0].width
elif (version == "V2"):
if (len(blobs[0].shape.dim) == 4):
num_filters = int(blobs[0].shape.dim[0])
num_channels = int(blobs[0].shape.dim[1])
num_col = int(blobs[0].shape.dim[2])
num_row = int(blobs[0].shape.dim[3])
else:
num_filters = 1
num_channels = 1
num_col = int(blobs[0].shape.dim[0])
num_row = int(blobs[0].shape.dim[1])
else:
raise Exception("Can't recognize the version %s" % (version))
W = np.array(blobs[0].data).reshape(num_filters, num_channels,
num_col, num_row)[0, 0, :, :]
W = W.T
b = np.array(blobs[1].data)
layer_weights = [W.astype(dtype=np.float32),
b.astype(dtype=np.float32)]
net_weights[layer.name] = layer_weights
elif layer_type == "convolution":
blobs = layer.blobs
if (version == "V1"):
num_filters = blobs[0].num
num_channels = blobs[0].channels
num_col = blobs[0].height
num_row = blobs[0].width
elif (version == "V2"):
num_filters = int(blobs[0].shape.dim[0])
num_channels = int(blobs[0].shape.dim[1])
num_col = int(blobs[0].shape.dim[2])
num_row = int(blobs[0].shape.dim[3])
else:
raise Exception("Can't recognize the version %s" % (version))
num_group = layer.convolution_param.group
num_channels *= num_group
W = np.zeros((num_filters, num_channels, num_col, num_row))
if layer.convolution_param.bias_term:
b = np.array(blobs[1].data)
else:
b = None
group_ds = len(blobs[0].data) // num_group
ncs_group = num_channels // num_group
nfs_group = num_filters // num_group
for i in range(num_group):
group_weights = W[i*nfs_group: (i+1)*nfs_group,
i*ncs_group: (i+1)*ncs_group, :, :]
group_weights[:] = np.array(
blobs[0].data[i*group_ds:
(i+1)*group_ds]).reshape(group_weights.shape)
for i in range(W.shape[0]):
for j in range(W.shape[1]):
W[i, j] = np.rot90(W[i, j], 2)
if b is not None:
layer_weights = [W.astype(dtype=np.float32),
b.astype(dtype=np.float32)]
else:
layer_weights = [W.astype(dtype=np.float32)]
net_weights[layer.name] = layer_weights
elif layer_type == "batchnorm":
blobs = layer.blobs
if (version == "V2"):
num_kernels = int(blobs[0].shape.dim[0])
else:
raise NotImplementedError("Batchnorm is not "
"implemented in %s" % (version))
W_mean = np.array(blobs[0].data)
W_std = | np.array(blobs[1].data) | numpy.array |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import numpy as np
import pandas as pd
import cloudpickle
from ... import opcodes as OperandDef
from ...config import options
from ...serialize import StringField, AnyField, BoolField, \
ListField, Int64Field, Float64Field, BytesField
from ...tensor.utils import normalize_chunk_sizes
from ..operands import DataFrameOperand, DataFrameOperandMixin, ObjectType
from ..utils import parse_index
class DataFrameReadSQLTable(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.READ_SQL_TABLE
_table_name = StringField('table_name')
_con = StringField('con')
_schema = StringField('schema')
_index_col = AnyField('index_col')
_coerce_float = BoolField('coerce_float')
_parse_dates = AnyField('parse_dates')
_columns = ListField('columns')
_chunksize = Int64Field('chunksize')
_engine_kwargs = BytesField('engine_kwargs', on_serialize=cloudpickle.dumps,
on_deserialize=cloudpickle.loads)
_row_memory_usage = Float64Field('row_memory_usage')
# for chunks
_offset = Int64Field('offset')
def __init__(self, table_name=None, con=None, schema=None, index_col=None,
coerce_float=None, parse_dates=None, columns=None, chunksize=None,
engine_kwargs=None, row_memory_usage=None, offset=None,
object_type=None, gpu=None, **kw):
super().__init__(_table_name=table_name, _con=con, _schema=schema,
_index_col=index_col, _coerce_float=coerce_float,
_parse_dates=parse_dates, _columns=columns, _chunksize=chunksize,
_engine_kwargs=engine_kwargs, _row_memory_usage=row_memory_usage,
_offset=offset, _object_type=object_type, _gpu=gpu, **kw)
if self._object_type is None:
self._object_type = ObjectType.dataframe
@property
def table_name(self):
return self._table_name
@property
def con(self):
return self._con
@property
def schema(self):
return self._schema
@property
def index_col(self):
return self._index_col
@property
def coerce_float(self):
return self._coerce_float
@property
def parse_dates(self):
return self._parse_dates
@property
def columns(self):
return self._columns
@property
def chunksize(self):
return self._chunksize
@property
def engine_kwargs(self):
return self._engine_kwargs
@property
def row_memory_usage(self):
return self._row_memory_usage
@property
def offset(self):
return self._offset
def _collect_info(self, engine_or_conn, table, columns, test_rows):
from sqlalchemy import sql
# fetch test DataFrame
query = sql.select(columns).limit(test_rows)
test_df = pd.read_sql(query, engine_or_conn, index_col=self._index_col,
coerce_float=self._coerce_float,
parse_dates=self._parse_dates)
self._row_memory_usage = \
test_df.memory_usage(deep=True, index=True).sum() / test_rows
# fetch size
size = list(engine_or_conn.execute(
sql.select([sql.func.count()]).select_from(table)))[0][0]
shape = (size, test_df.shape[1])
return test_df, shape
@contextmanager
def _create_con(self):
import sqlalchemy as sa
from sqlalchemy.engine import Connection, Engine
# process con
con = self._con
engine = None
if isinstance(con, Connection):
self._con = str(con.engine.url)
# connection create by user
close = False
dispose = False
elif isinstance(con, Engine):
self._con = str(con.url)
con = con.connect()
close = True
dispose = False
else:
engine = sa.create_engine(con, **(self._engine_kwargs or dict()))
con = engine.connect()
close = True
dispose = True
yield con
if close:
con.close()
if dispose:
engine.dispose()
def __call__(self, test_rows, chunk_size):
import sqlalchemy as sa
from sqlalchemy.sql import elements
with self._create_con() as con:
# process table_name
if isinstance(self._table_name, sa.Table):
table = self._table_name
self._table_name = table.name
else:
m = sa.MetaData()
table = sa.Table(self._table_name, m, autoload=True,
autoload_with=con, schema=self._schema)
# process index_col
index_col = self._index_col
if index_col is not None:
if not isinstance(index_col, (list, tuple)):
index_col = (index_col,)
new_index_col = []
sa_index_col = []
for col in index_col:
if isinstance(col, (sa.Column, elements.Label)):
new_index_col.append(col.name)
sa_index_col.append(col)
elif isinstance(col, str):
sa_index_col.append(table.columns[col])
new_index_col.append(col)
elif col is not None:
raise TypeError('unknown index_col type: {}'.format(type(col)))
self._index_col = new_index_col
index_col = sa_index_col
# process columns
columns = self._columns if self._columns is not None else table.columns
new_columns = []
sa_columns = []
for col in columns:
if isinstance(col, str):
new_columns.append(col)
sa_columns.append(table.columns[col])
else:
new_columns.append(col.name)
sa_columns.append(col)
self._columns = new_columns
if self._index_col is not None:
for icol in index_col:
sa_columns.append(icol)
test_df, shape = self._collect_info(con, table, sa_columns, test_rows)
if isinstance(test_df.index, pd.RangeIndex):
index_value = parse_index(pd.RangeIndex(shape[0]))
else:
index_value = parse_index(test_df.index)
columns_value = parse_index(test_df.columns, store_data=True)
return self.new_dataframe(None, shape=shape, dtypes=test_df.dtypes,
index_value=index_value,
columns_value=columns_value,
raw_chunk_size=chunk_size)
@classmethod
def tile(cls, op):
df = op.outputs[0]
chunk_size = df.extra_params.raw_chunk_size or options.chunk_size
if chunk_size is None:
chunk_size = (int(options.chunk_store_limit / op.row_memory_usage), df.shape[1])
row_chunk_sizes = normalize_chunk_sizes(df.shape, chunk_size)[0]
offsets = | np.cumsum((0,) + row_chunk_sizes) | numpy.cumsum |
import math
import json
import random
import numpy as np
import multiprocessing
NUM_CENTROIDS = 256
def segregate(attributearray, value):
outlist = []
for i in range(0, len(attributearray)):
if(attributearray[i] == value):
outlist.append(i)
return outlist
def select_labels(labels, ids):
result = []
for i in ids:
result.append(labels[i])
return result
def map_mean_function(x):
y = np.array(x)
#y = x
if(y.size > 0):
return np.mean(x, axis=0)
else:
return np.array([])
#Features
features_file = open("data/intrusion.features", "r")
features_str = features_file.read()
features = features_str.split("\n")
print("Reading features from file...")
for idx, item in enumerate(features):
item = item.split(": ")
item[-1] = item[-1][:-1]
features[idx] = item
features = features[:-1]
#print(features)
features_file.close()
# Get continuous and non-continuous
continuous = []
not_continuous = []
for idx, item in enumerate(features):
if "continuous" in item[1]:
continuous.append(idx)
else:
not_continuous.append(idx)
traindata_file = open("data/intrusion.traindata", "r")
traindata_str = traindata_file.read()
traindata = traindata_str.split("\n")
traindata_continuous = []
traindata_discrete = []
td_len = len(traindata)
print("Reading traindata from file...")
for idx, item in enumerate(traindata):
item = item.split(",")
item[-1] = item[-1][:-1]
traindata[idx] = item
if(idx < td_len - 1):
c = [float(item[index]) for index in continuous]
d = [item[index] for index in not_continuous]
d.append(item[-1])
traindata_continuous.append(c)
traindata_discrete.append(d)
if(idx % 10000 == 0):
print("Done reading " + str(idx) + " traindata")
traindata = traindata[:-1]
#traindata_continuous = traindata_continuous[:-1]
#print(traindata)
print("size of traindata: ", len(traindata))
print("size of traindata_continous: ", len(traindata_continuous))
print("size of traindata_discrete: ", len(traindata_discrete))
num_inst = len(traindata_continuous)
num_cattr = len(continuous)
num_dattr = len(not_continuous)
traindata_file.close()
print("Reading attacktypes from file...")
attacktypes_file = open("data/attacktypes.list", "r")
attacktypes_str = attacktypes_file.read()
attacktypes = attacktypes_str.split("\n")
attacktypes = attacktypes[:-1]
attacktypes_map = {}
for idx, item in enumerate(attacktypes):
item = item.split(" ")
attacktypes_map[item[0]] = item[1]
attacktypes[idx] = item
#print(attacktypes)
#print(attacktypes_map)
attacktypes_file.close()
print("converting to numpy matrix....")
cTraindata = np.array(traindata_continuous)
# print("removing not conituous columns....")
# cTraindata = np.delete(traindata_np, not_continuous, 1)
#cTraindata = cTraindata.astype('float32')
print("calculating the mean for each column....")
all_mean = np.mean(cTraindata, axis=0)
#print("all_mean: ", all_mean)
print("calculating the std for each column....")
all_std = | np.std(cTraindata, axis=0) | numpy.std |
import numpy as np
atts = np.load('./data/attypes.npy', allow_pickle=True)
uatoms = np.unique(np.concatenate(atts))
reps = np.load('./data/aslatms.npy', allow_pickle=True)
reps = [ | np.array(rep) | numpy.array |
# MIT License
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Functions that transform the raw data into trainable data."""
from itertools import product, starmap
from functools import partial
from typing import List, Tuple
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
from tqdm import tqdm
def get_claim_map(
n:int,
source_locations: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
) -> np.ndarray:
def get_n_closest_sources_encode(
n:int, # max number of sources to include
source_locations:np.ndarray, # locations of sources as an array of y,x idxs
ij:Tuple[int,int], # the index to retrieve the sources for
) -> Tuple[List[int], Tuple[int, int]]: # The n sources ordered by proximity idxs and the idx ij
distances = np.linalg.norm(source_locations-np.array(ij), axis=1)
closest_n_sources = np.argsort(distances)[:n]
if len(closest_n_sources) < n:
closest_n_sources = np.pad(
closest_n_sources,
(0, n-len(closest_n_sources)),
mode="constant",
constant_values = len(model_src_vals),
)
assert closest_n_sources.shape[0] == n
return (closest_n_sources, ij)
def get_src_flx(
scarlet_data:List[np.ndarray], # SCARLET data
src_idxs:List[int], # idx of source in the SCARLET output
ij:Tuple[List[np.ndarray], Tuple[int, int]], # idx in image space
) -> np.ndarray: # the source value at i,j in each of the bands
i, j = ij
# each element in this list is an array of the flux
# values that belong to each source
# [n, b, 1, 1]
src_flux_values = None
try:
src_flux_values = np.array([scarlet_data[src_idx][:, i, j] for src_idx in src_idxs if (src_idx != len(scarlet_data))])
except:
print(src_idxs)
print(len(scarlet_data))
raise ValueError("")
# this should be [n, b]
if src_flux_values.shape[0] < len(src_idxs):
src_flux_values = np.pad(
src_flux_values,
(
(0, len(src_idxs)-src_flux_values.shape[0]),
(0, 0)
),
mode="constant",
constant_values=0,
)
assert src_flux_values.shape[0]==len(src_idxs), f"{src_flux_values.shape}, {src_idxs}"
assert src_flux_values.shape[1]==scarlet_data[0].shape[0], f"{src_flux_values.shape}, {scarlet_data[0].shape}"
return (src_flux_values, ij)
def update_image(
output_array:np.ndarray, # [h, w, b, n]
normed_flux_vals:np.ndarray, # [n, b]
ij:Tuple[int, int], # pixel location
) -> None:
i, j = ij
output_array[i, j, ...] = normed_flux_vals.T[:]
def normed_combined_flux(
src_flux_values:np.ndarray, # [n, bands]
ij:Tuple[int, int]
) -> Tuple[List[np.ndarray], Tuple[int, int]]:
# restrict flux to positive values
src_flux_cmb = np.clip(np.array(src_flux_values), a_min=0, a_max=None) # [n, b]
flux_norm = src_flux_cmb.sum(axis=0) # [b,] total flux for each band
normed = src_flux_cmb / flux_norm
try:
normed[np.isnan(normed)] = 1 / src_flux_cmb.shape[0]
except:
print(src_flux_values)
print(src_flux_values.shape)
print(src_flux_cmb)
print(src_flux_cmb.shape)
raise ValueError()
return (normed, ij)
out_shape = list(model_src_vals[0].shape[1:]) + [model_src_vals[0].shape[0], n]
output_array = np.zeros(out_shape, dtype=np.float32)
get_n_src_f = partial(get_n_closest_sources_encode, n, source_locations)
get_src_flx_f = partial(get_src_flx, model_src_vals)
update_output_f = partial(update_image, output_array)
img_shape = model_src_vals[0].shape[1:]
idxs = product(range(img_shape[0]), range(img_shape[1]))
n_srcs_per_pixel = map(get_n_src_f, idxs)
src_flx_per_pixel = starmap(get_src_flx_f, n_srcs_per_pixel)
normed_src_flx_per_pixel = starmap(normed_combined_flux, src_flx_per_pixel)
for _ in starmap(update_output_f, normed_src_flx_per_pixel):pass
return output_array
# ==============================================================================
# Discretize claim vector directions
# ==============================================================================
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# ENCODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def get_claim_vector_magnitudes_single_pixel(
neighborhood_vectors: np.ndarray,
claim_vector_magnitude: np.ndarray,
claim_map: np.ndarray,
model_vals: List[np.ndarray],
src_centers: np.ndarray,
y: int,
x: int,
b: int
) -> None:
relative_vectors = src_centers - np.array([y, x])
src_fluxes = np.array([max(model_vals[i][b, y, x], 0) for i in range(len(model_vals))])
max_flux = src_fluxes.max()
normed_flux = src_fluxes / max_flux if max_flux > 0 else src_fluxes
flx_sum = src_fluxes.sum()
uniform_dist = np.ones_like(src_fluxes) / src_fluxes.shape[0]
normed_sum_to_one = src_fluxes / src_fluxes.sum() if flx_sum > 0 else uniform_dist
cosine_measure = cosine_similarity(neighborhood_vectors, relative_vectors)
euclidean_distance = euclidean_distances(neighborhood_vectors, relative_vectors)
euclidean_norm = np.maximum(euclidean_distance.max(axis=1, keepdims=True), 1e-5)
normed_euclidean_distance = euclidean_distance / euclidean_norm
metric = cosine_measure * (1 - normed_euclidean_distance) * (normed_flux[np.newaxis, :])
closest_srcs = np.argmax(metric, axis=1)
selected_srcs = relative_vectors[closest_srcs, :]
_claim_magnitudes = (selected_srcs * neighborhood_vectors).sum(axis=1)
idxs, counts = np.unique(closest_srcs, return_counts=True)
coefs = np.reciprocal(counts.astype(np.float32))
_claim_map = np.array(list(map(
lambda i: coefs[idxs==i][0] * normed_sum_to_one[i],
closest_srcs
)))
claim_vector_magnitude[y, x, b, :] = _claim_magnitudes
claim_map[y, x, b, :] = _claim_map
def get_claim_vector_image_and_map_discrete_directions(
source_locations: np.ndarray,
bkg: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
):
b, h, w = bhw
idxs = product(range(h), range(w), range(b))
neighborhood_vectors = np.array(list(product([0, -1, 1], [0, -1, 1]))[1:], dtype=np.float32)
neighborhood_vectors /= np.linalg.norm(neighborhood_vectors, axis=-1)[:, np.newaxis]
claim_vector_magnitude = np.zeros([h, w, b, 8], dtype=np.float32)
claim_map = np.zeros([h, w, b, 8], dtype=np.float32)
src_ys, src_xs = np.nonzero(source_locations)
src_centers = np.array([src_ys, src_xs]).T # [n, 2]
encode_f = partial(
get_claim_vector_magnitudes_single_pixel,
neighborhood_vectors,
claim_vector_magnitude,
claim_map,
model_src_vals,
src_centers
)
for _ in starmap(encode_f, idxs):
pass
return claim_vector_magnitude, claim_map
# ENCODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# DECODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def decode_discrete_vectors_single_pixel(
output:np.ndarray, # [n, h, w, b]
neighborhood_vectors: np.ndarray, # [8, 2]
flux:np.ndarray, # [h, w, b]
claim_vector_magnitude:np.ndarray, # [h, w, b, 8]
claim_map:np.ndarray, # [h, w, b, 8]
src_centers:np.ndarray, # [n, 2]
y:int,
x:int,
b:int
) -> None:
pixel_flux = flux[y, x, b]
pixel_magnitudes = claim_vector_magnitude[y, x, b, :].copy()
pixel_claim_map = claim_map[y, x, b, :].copy()
relative_vectors = neighborhood_vectors * pixel_magnitudes[:, np.newaxis]
relative_centers = src_centers - np.array([y, x])
distances = euclidean_distances(relative_vectors, relative_centers) # [n_neighborhood, n_centers]
closest_src = np.argmin(distances, axis=1)
distributed_flux = pixel_flux * pixel_claim_map
def update_output(src_idx:int, flx:float):
output[src_idx, y, x, b] += flx
for _ in starmap(update_output, zip(closest_src, distributed_flux)):
pass
def get_sources_discrete_directions(
flux_image: np.ndarray, # [h, w, b]
claim_vector_magnitude: np.ndarray, # [h, w, b, 8]
claim_map: np.ndarray, # [h, w, b, 8]
background_map: np.ndarray, # [h, w]
center_of_mass: np.ndarray, # [h, w]
bkg_thresh_coef: float = 0.7,
) -> np.ndarray: # [n, h, w, b]
y, x, b = flux_image.shape
src_locations = non_maximum_suppression(7, 0.1, center_of_mass) # [h, w]
src_centers = np.stack(np.nonzero(src_locations), axis=1) + 0.5 # [n, 2]
output = np.zeros([src_centers.shape[0], y, x, b], dtype=np.float32)
neighborhood_vectors = np.array(list(product([0, -1, 1], [0, -1, 1]))[1:], dtype=np.float32)
neighborhood_vectors /= np.linalg.norm(neighborhood_vectors, axis=-1)[:, np.newaxis]
idxs = product(range(y), range(x), range(b))
decode_f = partial(
decode_discrete_vectors_single_pixel,
output,
neighborhood_vectors,
flux_image,
claim_vector_magnitude,
claim_map,
src_centers
)
for _ in starmap(decode_f, idxs):
pass
#filter out background pixels
#bkg_filter = background_map[np.newaxis, :, :, np.newaxis] > bkg_thresh_coef
#return output * bkg_filter
return output
# DECODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================
# Discretize claim vector directions
# ==============================================================================
# ==============================================================================
# Closest n-sources claim vector
# ==============================================================================
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# ENCODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def get_n_closest_claim_vectors_single_pixel(
claim_vectors: np.ndarray, # [h, w, b, n, 2]
claim_map: np.ndarray, # [h, w, b, n]
model_vals: List[np.ndarray],
src_centers: np.ndarray, # [n, 2]
n: int,
y: int,
x: int,
b: int,
) -> None:
relative_vectors = src_centers - np.array([y, x]) # [n_srcs, 2]
relative_distances = np.linalg.norm(relative_vectors, axis=-1) # [n_srcs,]
raw_closest_sources = np.argsort(relative_distances)[:n] # [n, ]
num_pad = n - raw_closest_sources.shape[0]
if num_pad > 0:
n_closest_sources = np.pad(raw_closest_sources, (0, num_pad), mode="edge")
else:
n_closest_sources = raw_closest_sources
selected_srcs = relative_vectors[n_closest_sources]
src_fluxes = np.array([max(model_vals[i][b, y, x], 0) for i in raw_closest_sources])
sum_flux = src_fluxes.sum()
if sum_flux > 0:
normed_flux = src_fluxes / sum_flux
else:
raw_n = raw_closest_sources.shape[0]
normed_flux = np.ones([raw_n], dtype=np.float32) / raw_n
idxs, counts = np.unique(n_closest_sources, return_counts=True)
coefs = np.reciprocal(counts.astype(np.float32))
claim = np.array(list(map(
lambda i: coefs[idxs==i][0] * normed_flux[i==raw_closest_sources][0],
n_closest_sources
)))
claim_vectors[y, x, b, ...] = selected_srcs
claim_map[y, x, b, ...] = claim
def get_n_closest_claim_vectors(
source_locations: np.ndarray,
bkg: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
n: int,
) -> Tuple[np.ndarray, np.ndarray]: # [h, w, b, n], [h, w, b, n, 2]
b, y, x = bhw
src_ys, src_xs = np.nonzero(source_locations)
src_centers = np.array([src_ys, src_xs]).T # [n, 2]
idxs = product(range(y), range(x), range(b))
claim_vector = np.zeros([y, x, b, n, 2], dtype=np.float32)
claim_map = np.zeros([y, x, b, n], dtype=np.float32)
encode_f = partial(
get_n_closest_claim_vectors_single_pixel,
claim_vector,
claim_map,
model_src_vals,
src_centers,
n,
)
for _ in starmap(encode_f, idxs):
pass
return claim_vector, claim_map
# ENCODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# DECODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def decode_n_closest_sources_single_pixel(
output:np.ndarray,
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray,
y:int,
x:int,
b:int,
) -> None:
pixel_flux = flux[y, x, b]
pixel_vectors = claim_vector[y, x, b, ...].copy() # [n, 2]
pixel_claim_map = claim_map[y, x, b, ...].copy() # [n,]
relative_centers = src_centers - np.array([y, x])
distances = euclidean_distances(pixel_vectors, relative_centers) #[n, n_src_centers]
closest_srcs = np.argmin(distances, axis=1)
distributed_flux = pixel_flux * pixel_claim_map
def update_output(src_idx:int, flx:float):
output[src_idx, y, x, b] += flx
for _ in starmap(update_output, zip(closest_srcs, distributed_flux)):
pass
def decode_n_closest_sources(
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray,
background_map: np.ndarray, # [h, w]
center_of_mass: np.ndarray, # [h, w]
bkg_thresh_coef: float = 0.7,
) -> np.ndarray:
src_locations = non_maximum_suppression(7, 0.1, center_of_mass) # [h, w]
src_centers = np.stack(np.nonzero(src_locations), axis=1) + 0.5 # [n, 2]
y, x, b = flux.shape
output = np.zeros([src_centers.shape[0], y, x, b], dtype=np.float32)
idxs = product(range(y), range(x), range(b))
decode_f = partial(
decode_n_closest_sources_single_pixel,
output,
flux,
claim_vector,
claim_map,
src_centers,
)
for _ in starmap(decode_f, tqdm(idxs, total=y*x*b)):
pass
return output
# DECODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================
# Closest n-sources claim vector
# ==============================================================================
# ==============================================================================
# Closest flux-weighted n-sources claim vector
# ==============================================================================
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# ENCODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def get_n_closest_fw_claim_vectors_single_pixel(
claim_vectors: np.ndarray, # [h, w, b, n, 2]
claim_map: np.ndarray, # [h, w, b, n]
model_vals: List[np.ndarray], # list(n)
src_centers: np.ndarray, # [n, 2]
n: int,
y: int,
x: int,
b: int,
) -> None:
relative_vectors = src_centers - np.array([y, x]) # [n_srcs, 2]
relative_distances = np.linalg.norm(relative_vectors, axis=-1) # [n_srcs,]
normed_distances = relative_distances / relative_distances.max() # [n_srcs, ]
src_fluxes = np.array([max(model_vals[i][b, y, x], 0) for i in range(len(model_vals))]) # [n_srcs, ]
max_flux = src_fluxes.max()
if max_flux <= 0:
normed_flux = np.ones([src_fluxes.shape[0]]) / src_fluxes.shape[0] # [n_srcs, ]
normed_sum_to_one = np.ones([src_fluxes.shape[0]]) / src_fluxes.shape[0] # [n_srcs, ]
else:
normed_flux = src_fluxes / src_fluxes.max() # [n_srcs, ]
normed_sum_to_one = src_fluxes / src_fluxes.sum() # [n_srcs, ]
metric = (1 - normed_distances) * normed_flux # [n_srcs, ]
top_srcs = np.argsort(-metric)[:n] # [min(n, n_srcs), ]
num_pad = n - top_srcs.shape[0]
if num_pad > 0:
n_closest_sources = np.pad(top_srcs, (0, num_pad), mode="edge") # [n, ]
else:
n_closest_sources = top_srcs # [n, ]
selected_srcs = relative_vectors[n_closest_sources] # [n, 2]
src_fluxes = np.array([max(model_vals[i][b, y, x], 0) for i in top_srcs]) # [min(n, n_srcs), ]
sum_flux = src_fluxes.sum()
if sum_flux > 0:
normed_flux = src_fluxes / sum_flux # [min(n, n_srcs), ]
else:
normed_flux = np.ones([src_fluxes.shape[0]], dtype=np.float32) / n # [min(n, n_srcs), ]
idxs, counts = np.unique(n_closest_sources, return_counts=True) # [min(n, n_srcs), ], [min(n, n_srcs), ]
coefs = np.reciprocal(counts.astype(np.float32)) # [min(n, n_srcs), ]
claim = np.array(list(map(
lambda i: coefs[idxs==i][0] * normed_flux[i==top_srcs][0],
n_closest_sources
)))
claim_vectors[y, x, b, ...] = selected_srcs
claim_map[y, x, b, ...] = claim
def get_n_closest_fw_claim_vectors_maps(
source_locations: np.ndarray,
bkg: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
n: int,
) -> Tuple[np.ndarray, np.ndarray]: # [h, w, b, n], [h, w, b, n, 2]
b, y, x = bhw
src_ys, src_xs = np.nonzero(source_locations)
src_centers = np.array([src_ys, src_xs]).T # [n, 2]
idxs = product(range(y), range(x), range(b))
claim_vector = np.zeros([y, x, b, n, 2], dtype=np.float32)
claim_map = np.zeros([y, x, b, n], dtype=np.float32)
encode_f = partial(
get_n_closest_fw_claim_vectors_single_pixel,
claim_vector,
claim_map,
model_src_vals,
src_centers,
n,
)
for _ in starmap(encode_f, idxs):
pass
return claim_vector, claim_map
# ENCODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# DECODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def decode_n_closest_fw_sources_single_pixel(
output:np.ndarray,
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray,
y:int,
x:int,
b:int,
) -> None:
pixel_flux = flux[y, x, b]
pixel_vectors = claim_vector[y, x, b, ...].copy() # [n, 2]
pixel_claim_map = claim_map[y, x, b, ...].copy() # [n,]
relative_centers = src_centers - np.array([y, x])
distances = euclidean_distances(pixel_vectors, relative_centers) #[n, n_src_centers]
closest_srcs = np.argmin(distances, axis=1)
distributed_flux = pixel_flux * pixel_claim_map
def update_output(src_idx:int, flx:float):
output[src_idx, y, x, b] += flx
for _ in starmap(update_output, zip(closest_srcs, distributed_flux)):
pass
def decode_n_closest_fw_sources(
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray
) -> np.ndarray:
y, x, b = flux.shape
output = np.zeros([src_centers.shape[0], y, x, b], dtype=np.float32)
idxs = product(range(y), range(x), range(b))
decode_f = partial(
decode_n_closest_fw_sources_single_pixel,
output,
flux,
claim_vector,
claim_map,
src_centers,
)
for _ in starmap(decode_f, tqdm(idxs, total=y*x*b)):
pass
return output
# DECODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================
# Closest flux-weighted n-sources claim vector
# ==============================================================================
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================
# Closest n-sources claim vector avg map
# ==============================================================================
# ENCODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def get_n_closest_avg_claim_vector_single_pixel(
claim_vectors: np.ndarray, # [h, w, n, 2]
claim_map: np.ndarray, # [h, w, n]
model_vals: List[np.ndarray], # list(n)
src_centers: np.ndarray, # [n, 2]
n: int,
y: int,
x: int,
) -> None:
relative_vectors = src_centers - np.array([y, x]) # [n_srcs, 2]
relative_distances = np.linalg.norm(relative_vectors, axis=-1) # [n_srcs,]
raw_closest_sources = np.argsort(relative_distances)[:n] # [n, ]
num_pad = n - raw_closest_sources.shape[0]
if num_pad > 0:
n_closest_sources = raw_closest_sources
else:
n_closest_sources = np.pad(raw_closest_sources, (0, num_pad), mode="edge")
selected_srcs = relative_vectors[n_closest_sources]
claim_vectors[y, x, ...] = selected_srcs
def get_normed_src_fluxes(band:int):
src_fluxes = np.array([max(model_vals[i][band, y, x], 0) for i in raw_closest_sources])
sum_flux = src_fluxes.sum()
if sum_flux > 0:
normed_flux = src_fluxes / sum_flux
else:
normed_flux = np.ones([n], dtype=np.float32) / n
return normed_flux
n_bands = model_vals[0].shape[0]
avg_flux_contrib = np.array(list(map(get_normed_src_fluxes, range(n_bands)))).mean(axis=0)
normed_avg_flux_contrib = avg_flux_contrib / avg_flux_contrib.sum()
idxs, counts = np.unique(n_closest_sources, return_counts=True)
coefs = np.reciprocal(counts.astype(np.float32))
claim = np.array(list(map(
lambda i: coefs[idxs==i][0] * normed_avg_flux_contrib[i==raw_closest_sources][0],
n_closest_sources
)))
claim_map[y, x, ...] = claim
def get_n_closest_avg_claim_vector(
source_locations: np.ndarray,
bkg: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
n: int,
) -> Tuple[np.ndarray, np.ndarray]: # [h, w, n], [h, w, n, 2]
_, y, x = bhw
src_ys, src_xs = np.nonzero(source_locations)
src_centers = np.array([src_ys, src_xs]).T # [n, 2]
idxs = product(range(y), range(x))
claim_vector = np.zeros([y, x, n, 2], dtype=np.float32)
claim_map = np.zeros([y, x, n], dtype=np.float32)
encode_f = partial(
get_n_closest_avg_claim_vector_single_pixel,
claim_vector,
claim_map,
model_src_vals,
src_centers,
n,
)
for _ in starmap(encode_f, idxs):
pass
claim_vector, claim_map
# ENCODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# DECODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def decode_n_closest_avg_claim_vector_single_pixel(
output:np.ndarray,
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray,
y:int,
x:int,
b:int,
) -> None:
pixel_flux = flux[y, x, b]
pixel_vectors = claim_vector[y, x, ...].copy() # [n, 2]
pixel_claim_map = claim_map[y, x, ...].copy() # [n,]
relative_centers = src_centers - np.array([y, x])
distances = euclidean_distances(pixel_vectors, relative_centers) #[n, n_src_centers]
closest_srcs = np.argmin(distances, axis=1)
distributed_flux = pixel_flux * pixel_claim_map
def update_output(src_idx:int, flx:float):
output[src_idx, y, x, b] += flx
for _ in starmap(update_output, zip(closest_srcs, distributed_flux)):
pass
def decode_n_closest_fw_sources(
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray
) -> np.ndarray:
y, x, b = flux.shape
output = np.zeros([src_centers.shape[0], y, x, b], dtype=np.float32)
idxs = product(range(y), range(x), range(b))
decode_f = partial(
decode_n_closest_avg_claim_vector_single_pixel,
output,
flux,
claim_vector,
claim_map,
src_centers,
)
for _ in starmap(decode_f, tqdm(idxs, total=y*x*b)):
pass
return output
# DECODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================
# Closest n-sources claim vector avg map
# ==============================================================================
# ==============================================================================
# Closest n-sources claim vector limit bands
# ==============================================================================
# ENCODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def get_n_closest_claim_vector_map_limit_bands_single_pixel(
claim_vectors: np.ndarray, # [h, w, n, 2]
claim_map: np.ndarray, # [h, w, b, n]
model_vals: List[np.ndarray],
src_centers: np.ndarray, # [n_srcs, 2]
n: int,
n_bands: int,
y: int,
x: int,
) -> None:
# Claim vectors ============================================================
relative_vectors = src_centers - np.array([y, x]) # [n_srcs, 2]
relative_distances = np.linalg.norm(relative_vectors, axis=-1) # [n_srcs,]
raw_closest_sources = np.argsort(relative_distances)[:n] # [min(n_srcs, n), ]
num_pad = n - raw_closest_sources.shape[0]
if num_pad > 0:
n_closest_sources = np.pad(raw_closest_sources, (0, num_pad), mode="edge") # [n,]
else:
n_closest_sources = raw_closest_sources # [n,]
selected_srcs = relative_vectors[n_closest_sources] # [n, 2]
claim_vectors[y, x, ...] = selected_srcs
# Claim vectors ============================================================
# Claim maps ===============================================================
raw_n = raw_closest_sources.shape[0]
def get_normed_src_fluxes(band:int):
src_fluxes = np.array([max(model_vals[i][band, y, x], 0) for i in raw_closest_sources])
sum_flux = src_fluxes.sum()
if sum_flux > 0:
normed_flux = src_fluxes / sum_flux
else:
normed_flux = np.ones([raw_n], dtype=np.float32) / raw_n
return normed_flux
band_normed_flux = np.array(list(map(get_normed_src_fluxes, range(n_bands)))) # [n_bands, min(n_src, n)]
if num_pad > 0:
padded_band_normed_flux = np.pad(band_normed_flux, ((0, 0), (0, num_pad)), mode="edge")
else:
padded_band_normed_flux = band_normed_flux
idxs, counts = np.unique(n_closest_sources, return_counts=True) # [min(n_srcs, n), ], [min(n_srcs, n), ]
coefs = np.reciprocal(counts.astype(np.float32)) # [min(n_srcs, n), ]
coef_map = np.array([coefs[idxs==i][0] for i in n_closest_sources])[np.newaxis, :] #[1, n]
try:
claim = padded_band_normed_flux * coef_map
except:
print("raw_closest_sources: ", raw_closest_sources.shape)
print("selected_srcs_shape: ", selected_srcs.shape)
print("band_normed_flux: ", band_normed_flux.shape)
print("padded_band_normed_flux: ", padded_band_normed_flux.shape)
print("coefs: ", coefs.shape)
print("coef_map: ", coef_map.shape)
raise ValueError("Things Broke! Oh Man!")
claim_map[y, x, ...] = claim
# Claim maps ===============================================================
def get_n_closest_claim_vector_map_limit_bands(
source_locations: np.ndarray,
bkg: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
n: int,
n_bands:int,
) -> Tuple[np.ndarray, np.ndarray]: # [h, w, bands, n], [h, w, bands, n, 2]
b, y, x = bhw
src_ys, src_xs = np.nonzero(source_locations)
src_centers = np.array([src_ys, src_xs]).T # [n, 2]
idxs = product(range(y), range(x))
claim_vector = np.zeros([y, x, n, 2], dtype=np.float32)
claim_map = np.zeros([y, x, n_bands, n], dtype=np.float32)
encode_f = partial(
get_n_closest_claim_vector_map_limit_bands_single_pixel,
claim_vector,
claim_map,
model_src_vals,
src_centers,
n,
n_bands,
)
for _ in starmap(encode_f, idxs):
pass
return claim_vector, claim_map
# ENCODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# DECODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def decode_n_closest_claim_vector_map_limit_bands_single_pixel(
output:np.ndarray,
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray,
y:int,
x:int,
b:int,
) -> None:
pixel_flux = flux[y, x, b]
pixel_vectors = claim_vector[y, x, b, ...].copy() # [n, 2]
pixel_claim_map = claim_map[y, x, b, ...].copy() # [n,]
relative_centers = src_centers - np.array([y, x])
distances = euclidean_distances(pixel_vectors, relative_centers) #[n, n_src_centers]
closest_srcs = np.argmin(distances, axis=1)
distributed_flux = pixel_flux * pixel_claim_map
def update_output(src_idx:int, flx:float):
output[src_idx, y, x, b] += flx
for _ in starmap(update_output, zip(closest_srcs, distributed_flux)):
pass
def decode_n_closest_claim_vector_map_limit_bands(
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray
) -> np.ndarray:
y, x, b = flux.shape
output = np.zeros([src_centers.shape[0], y, x, b], dtype=np.float32)
idxs = product(range(y), range(x), range(b))
decode_f = partial(
decode_n_closest_fw_sources_single_pixel,
output,
flux,
claim_vector,
claim_map,
src_centers,
)
for _ in starmap(decode_f, tqdm(idxs, total=y*x*b)):
pass
return output
# DECODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================
# Closest n-sources claim vector limit bands
# ==============================================================================
def get_claim_vector_image_and_map(
source_locations: np.ndarray,
bkg: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
):
# Updates claim_vector_image and claim_map_image in place
def single_pixel_vector(
claim_vector_image: np.ndarray,
claim_map_image: np.ndarray,
centers: np.ndarray,
bkg:np.ndarray,
i: int,
j: int,
b: int,
) -> None:
ijb_src_flux = np.array([m[b, i, j] for m in model_src_vals])
ijb_src_flux_mask = ijb_src_flux > 0
if bkg[i, j, 0] > 0.9 or ijb_src_flux_mask.sum()==0:
idxs = list(product([-1, 0, 1], [-1, 0, 1]))
idxs.remove((0, 0))
claim_vector_image[i, j, b, ...] = | np.array(idxs) | numpy.array |
'Volume generation and augmentation'
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# MIT License
import keras
import os.path
import numpy as np
from tqdm import tqdm
from sklearn.neighbors import KDTree
from sklearn.decomposition import PCA
from enzynet.PDB import PDB_backbone
current_directory = os.path.dirname(os.path.abspath(__file__))
precomputed_path = os.path.join(current_directory, '../files/precomputed/')
PDB_path = os.path.join(current_directory, '../files/PDB/')
class VolumeDataGenerator(keras.utils.Sequence):
"""
Generates batches of volumes containing 3D structure of enzymes as well
as their associated class labels on the fly.
To be passed as argument in the fit_generator function of Keras.
Parameters
----------
v_size : int (optional, default is 32)
Size of each side of the output volumes.
flips : tuple of floats (optional, default is (0.2, 0.2, 0.2))
Probabilities that the volumes are flipped respectively with respect
to x, y, and z.
batch_size : int (optional, default is 32)
Number of samples in output array of each iteration of the 'generate'
method.
directory_precomputed : string (optional, default points to 'files/precomputed')
Path of the precomputed files.
directory_pdb : string (optional, default points to 'files/PDB')
Path of the PDB files.
labels : dict
Dictionary linking PDB IDs to their labels.
list_enzymes : list of strings
List of enzymes to generate.
shuffle : boolean (optional, default is True)
If True, shuffles order of exploration.
p : int (optional, default is 5)
Interpolation of enzymes with p added coordinates between each pair
of consecutive atoms.
max_radius : float (optional, default is 40)
Maximum radius of sphere that will completely fit into the volume.
noise_treatment : boolean (optional, default is False)
If True, voxels with no direct neighbor will be deleted.
weights : list of strings (optional, default is [])
List of weights (among the values ['hydropathy', 'charge']) to consider
as additional channels.
scaling_weights : boolean (optional, default is True)
If True, divides all weights by the weight that is maximum in absolute
value.
Example
-------
>>> from enzynet.volume import VolumeDataGenerator
>>> from enzynet.tools import read_dict
>>> labels = read_dict('../datasets/dataset_single.csv')
>>> partition_red = read_dict('../../datasets/partition_single_red.csv')
>>> exec("partition_red['train'] = " + partition_red['train'])
>>> generator = VolumeDataGenerator(partition_red['train'], labels,
v_size=64, flips=(0.2, 0.2, 0.2),
batch_size=32, shuffle=True, p=5,
max_radius=40, noise_treatment=False,
weights=[], scaling_weights=True)
"""
def __init__(self, list_enzymes, labels, v_size=32, flips=(0.2, 0.2, 0.2), batch_size=32,
directory_precomputed=precomputed_path, directory_pdb=PDB_path,
shuffle=True, p=5, max_radius=40, noise_treatment=False,
weights=[], scaling_weights=True):
'Initialization'
self.batch_size = batch_size
self.directory_precomputed = directory_precomputed
self.directory_pdb = directory_pdb
self.flips = flips
self.labels = labels
self.list_enzymes = list_enzymes
self.max_radius = max_radius
self.noise_treatment = noise_treatment
self.n_channels = max(1, len(weights))
self.p = p
self.scaling_weights = scaling_weights
self.shuffle = shuffle
self.v_size = v_size
self.weights = weights
self.on_epoch_end()
def check_precomputed(self):
'Checks if all coordinates and weights have been precomputed, and precomputes them otherwise'
# Initialization
list_enzymes = list(self.labels)
counter = 0
# Loop over all enzymes
for pdb_id in tqdm(list_enzymes):
# Find name of paths
names = [precomputed_name(pdb_id, self.directory_precomputed, 'coords', self.p)] + \
[precomputed_name(pdb_id, self.directory_precomputed, 'weights', self.p,
weight, self.scaling_weights)
for weight in self.weights]
# Precomputes all files
if all([os.path.isfile(name) for name in names]): # Check if all already exist
pass
else: # Precomputes files otherwise
save_coords_weights(pdb_id, self.weights, self.p, self.scaling_weights,
self.directory_pdb, self.directory_precomputed)
counter += 1
print("Had to compute files of {0} enzymes.".format(counter))
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_enzymes))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_enzymes) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_enzymes_temp = [self.list_enzymes[k] for k in indexes]
# Generate data
X, y = self.__data_augmentation(list_enzymes_temp)
return X, y
def __data_augmentation(self, list_enzymes_temp):
'Returns augmented data with batch_size enzymes' # X : (n_samples, v_size, v_size, v_size, n_channels)
# Initialization
X = np.empty((self.batch_size, # n_enzymes
self.v_size, # dimension w.r.t. x
self.v_size, # dimension w.r.t. y
self.v_size, # dimension w.r.t. z
self.n_channels)) # n_channels
y = np.empty((self.batch_size), dtype=int)
# Computations
for i in range(self.batch_size):
# Store class
y[i] = self.labels[list_enzymes_temp[i]]
# Load precomputed coordinates
coords = load_coords(list_enzymes_temp[i], self.p, self.directory_precomputed)
coords = coords_center_to_zero(coords)
coords = adjust_size(coords, v_size=self.v_size, max_radius=self.max_radius)
# Get weights
local_weights = []
for weight in self.weights:
local_weight = load_weights(list_enzymes_temp[i], weight, self.p,
self.scaling_weights, self.directory_precomputed) # Compute extended weights
local_weights += [local_weight] # Store
# PCA
coords = PCA(n_components=3).fit_transform(coords)
# Do flip
coords_temp = flip_around_axis(coords, axis=self.flips)
if len(self.weights) == 0:
# Convert to volume and store
X[i, :, :, :, 0] = coords_to_volume(coords_temp, self.v_size,
noise_treatment=self.noise_treatment)
else:
# Compute to weights of volume and store
for k in range(self.n_channels):
X[i, :, :, :, k] = weights_to_volume(coords_temp, local_weights[k],
self.v_size, noise_treatment=self.noise_treatment)
return X, sparsify(y)
def coords_to_volume(coords, v_size, noise_treatment=False):
'Converts coordinates to binary voxels' # Input is centered on [0,0,0]
return weights_to_volume(coords=coords, weights=1, v_size=v_size, noise_treatment=noise_treatment)
def weights_to_volume(coords, weights, v_size, noise_treatment=False):
'Converts coordinates to voxels with weights' # Input is centered on [0,0,0]
# Initialization
volume = np.zeros((v_size, v_size, v_size))
# Translate center
coords = coords + np.full((coords.shape[0], 3), (v_size-1)/2)
# Round components
coords = coords.astype(int)
# Filter rows with values that are out of the grid
mask = ((coords >= 0) & (coords < v_size)).all(axis=1)
# Convert to volume
volume[tuple(coords[mask].T)] = weights[mask] if type(weights) != int else weights
# Remove noise
if noise_treatment == True:
volume = remove_noise(coords, volume)
return volume
def coords_center_to_zero(coords):
'Centering coordinates on [0,0,0]'
barycenter = get_barycenter(coords)
return coords - np.full((coords.shape[0], 3), barycenter)
def adjust_size(coords, v_size=32, max_radius=40):
return np.multiply((v_size/2-1)/max_radius, coords)
def sparsify(y):
'Returns labels in binary NumPy array'
n_classes = 6
return np.array([[1 if y[i] == j+1 else 0 for j in range(n_classes)]
for i in range(y.shape[0])])
def flip_around_axis(coords, axis=(0.2, 0.2, 0.2)):
'Flips coordinates randomly w.r.t. each axis with its associated probability'
for col in range(3):
if np.random.binomial(1, axis[col]):
coords[:,col] = np.negative(coords[:,col])
return coords
def get_barycenter(coords):
'Gets barycenter point of a Nx3 matrix'
return np.array([ | np.mean(coords, axis=0) | numpy.mean |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal as mvn
cov = np.array([[1,0.8],[0.8,3]]) # a covariance matrix
mu = | np.array([0,2]) | numpy.array |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
import tsim
import sys
""" Vector Bit Slice and Pack Function
Parameters
----------
A : Vector to be sliced and packed
slice_width : slice width
Returnsi
---------
C: 2d matrix where each cloumn (because of bit packing) represents each bit slice of A
"""
def slice(A, slice_width):
assert np.log2(slice_width) % 1 == 0, "only power of 2 is supported"
dtype = type(A[0])
row = 0
# currently only supports uint
if dtype is np.uint8: row = 8 // slice_width
elif dtype is np.uint16: row = 16 // slice_width
elif dtype is np.uint32: row = 32 // slice_width
elif dtype is np.uint64: row = 64 // slice_width
else: raise ValueError("datatype " + str(dtype) + "currently not supported")
if (row >= 8):
dtype = 'uint' + str(row)
else:
dtype = 'uint8'
C = np.zeros((row, len(A))).astype(dtype) # sliced and transform
# create mask
slice_mask = 2**(slice_width)-1
# slice and pack
for x in range(len(A)):
for y in range(row):
C[y][x] = (np.uint64(A[x]) >> np.uint64(slice_width * y)) & np.uint64(slice_mask)
return C
""" Matrix Multiplication Function
Parameters
----------
A : Matrix A
B: Matrix B
w_width : weight slice width
a_width : activation slice width
Returns
---------
C: result of A * B
"""
# A is a n*m matrix, B is a m*p matrix(not transposed yet)
def matrix_multiply(A, B, w_width, a_width):
assert A.shape[1] == B.shape[0], "can't perform multiplication"
BT = B.transpose()
cycles = 0
C = np.zeros((A.shape[0], B.shape[1])).astype('uint64')
for i in range(A.shape[0]):
for j in range(B.shape[1]):
# C[i, j] = A[i].dot(BT[j])
A_sliced = slice(A[i], w_width)
B_sliced = slice(BT[j], a_width)
C[i, j] = compute(A_sliced, B_sliced, w_width, a_width)
test = test_accel(A_sliced, B_sliced, w_width, a_width)
cycles += test[1]
np.testing.assert_equal(C[i,j], A[i].astype('uint64').dot(BT[j]))
print("PASS SW serial & parallel")
np.testing.assert_equal(test[0], C[i, j])
print("PASS SW & HW bit serial")
np.testing.assert_equal(test[0], A[i].astype('uint64').dot(BT[j]))
print("PASS SW bit parallel & HW bit parallel")
print("result: ")
print(C)
print("ALL TESTS PASSED, cycles: " + str(cycles))
return C
""" Software Verification Function"""
# takes 2 matrix input (sliced and packed)
def compute(A, B, w_width, a_width):
assert A.shape[1] == B.shape[1], "sliced shape not match"
# reset hardware accumulator
accum = 0
for x in range(A.shape[0]):
for y in range(B.shape[0]):
# hardware implementation
accum += np.uint64(A[x]).dot(np.uint64(B[y])) << np.uint64(x*w_width + y*a_width)
# get value from accumulator
return accum
"""Testing Function for Dot Product"""
def test_accel(A, B, w_width, a_width):
assert A.shape[1] == B.shape[1], "sliced shape not match"
dtype = A.dtype
ctx = tvm.cpu(0)
f = tsim.load_module()
a_arr = []
b_arr = []
for i in range(A.shape[0]):
list_a = np.zeros(A.shape[1]).astype(dtype)
for j in range(A.shape[1]):
list_a[j] = A[i][j]
a_arr.append(tvm.nd.array(list_a.astype(dtype), ctx))
for i in range(B.shape[0]):
list_b = np.zeros(B.shape[1]).astype(dtype)
for j in range(B.shape[1]):
list_b[j] = B[i][j]
b_arr.append(tvm.nd.array(list_b.astype(dtype), ctx))
cycles = 0
accum = tvm.nd.array(np.array([0]).astype("uint64"), ctx)
for i in range(len(a_arr)):
for j in range(len(b_arr)):
shift = | np.uint8(i*w_width + j*a_width) | numpy.uint8 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Generate map plots.
Example::
$ python plot_maps.py -c : plot from files from combined
output file
$ python plot_maps.py -m max_id : plot from files with maximal subset
id of max_id
$ python plot_maps.py -h : display this help
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.colors as colors
from matplotlib.ticker import LogFormatter
import cartopy
import cartopy.crs as ccrs
# General plot settings.
cline_label_format_default = '%.1f'
n_fill_levels_default = 14
n_line_levels_default = 6
color_map = plt.get_cmap('YlOrRd') # 'coolwarm' 'RdBu' 'bwr' # plt.get_cmap('YlOrRd')
if __name__ == "__main__":
# TODO this has to be changed to work via class!
# from ..utils.convenience_utils import hour_to_date_str
from plotting_utils import read_dataset_user_input
# Load the processed data from the NetCDF files specified in the input.
nc = read_dataset_user_input()
# TODO remove - use config for this!
lons = nc['longitude'].values
lats = nc['latitude'].values
height_range_floor = 50.
height_range_ceilings = list(nc['height_range_ceiling'].values)
fixed_heights = list(nc['fixed_height'].values)
integration_range_ids = list(nc['integration_range_id'].values)
p_integral_mean = nc['p_integral_mean'].values
# Hours since 1900-01-01 00:00:00, see: print(nc['time'].values).
hours = nc['time'].values
# print("Analyzing " + hour_to_date_str(hours[0]) + " till "
# + hour_to_date_str(hours[-1]))
# TODO fix from config
else:
lons = list(np.arange(-20, 20.25, .25))
lats = list(np.arange(65, 29.75, -.25))
# #lons = np.arange(-12, -5.0, .25) # config.Data.all_lons
# #lats = np.arange(51, 56.25, .25) # config.Data.all_lats
# else:
# # TODO make more understandable
# # TODO make into utils -> use for map plots in production
# # TODO fix from config
# # Ireland
# # lons = list(np.arange(-12, -5.0, .25)) # -5.75, .25))
# # lats = list(np.arange(51, 56.25, .25))
# # Europe map
# lons = list(np.arange(-20, 20.25, .25))
# lats = list(np.arange(65, 29.75, -.25))
# Plotting map - region selection # TODO rework -> config
plot_northern_germany = False
label_cities = False
map_plot_aspect_ratio = 9 / 12.5 # len(lons)/len(lats) # TODO this makes sense - adapt fixed number later on -> adaptable
mrc = ccrs.Mercator()
def calc_fig_height(fig_width, subplot_shape, plot_frame_top,
plot_frame_bottom, plot_frame_left, plot_frame_right):
""""Calculate figure height, such that all maps have the same resolution.
Args:
fig_width (float): Figure width in inches.
subplot_shape (tuple of int): Containing number of rows and columns of
subplot.
plot_frame_top (float): Top of plot as a fraction of the figure window
height w.r.t. bottom.
plot_frame_bottom (float): Bottom of plot as a fraction of the figure
window height w.r.t. bottom.
plot_frame_left (float): Left side of plot as a fraction of the figure
window width w.r.t. left.
plot_frame_right (float): Right side of plot as a fraction of the
figure window width w.r.t. left.
Returns:
float: Figure height in inches.
"""
plot_frame_width = fig_width*(plot_frame_right - plot_frame_left)
plot_frame_height = plot_frame_width/(map_plot_aspect_ratio *
subplot_shape[1] / subplot_shape[0])
fig_height = plot_frame_height/(plot_frame_top - plot_frame_bottom)
return fig_height
def eval_contour_fill_levels(plot_items):
""""Evaluate the plot data, e.g. if values are within contour fill
levels limits.
Args:
plot_items (list of dict): List containing the plot property dicts.
"""
for i, item in enumerate(plot_items):
max_value = np.amax(item['data'])
min_value = np.amin(item['data'])
print("Max and min value of plot"
" {}: {:.3f} and {:.3f}".format(i, max_value, min_value))
if item['contour_fill_levels'][-1] < max_value:
print("Contour fills "
"(max={:.3f}) do not cover max value of plot {}"
.format(item['contour_fill_levels'][-1], i))
if item['contour_fill_levels'][0] > min_value:
print("Contour fills "
"(min={:.3f}) do not cover min value of plot {}"
.format(item['contour_fill_levels'][0], i))
def individual_plot(z, cf_lvls, cl_lvls,
cline_label_format=cline_label_format_default,
log_scale=False,
extend="neither",
overflow=None):
""""Individual plot of coastlines and contours.
Args:
z (ndarray): 2D array containing contour plot data.
cf_lvls (list): Contour fill levels.
cl_lvls (list): Contour line levels.
cline_label_format (str, optional): Contour line label format string.
Defaults to `cline_label_format_default`.
log_scale (bool): Logarithmic scaled contour levels are used if True,
linearly scaled if False.
extend (str): Setting for extension of contour fill levels.
Returns:
QuadContourSet: Contour fills object.
"""
# Care if colorbar ticks are set beforehand, see plot_single_map
# colors_undersea = plt.cm.terrain(np.linspace(0, 0.17, 56))
# colors_land = plt.cm.terrain(np.linspace(0.25, 1, 200))
# # combine them and build a new colormap
# colors_stack = np.vstack((colors_undersea, colors_land))
# color_map = colors.LinearSegmentedColormap.from_list('color_map',
# colors_stack)
color_map = plt.get_cmap('YlOrRd')
if overflow is not None:
n_normal = 224
n_over = 32
top_overflow = overflow
colors_underflow = []
underflow_bounds = []
min_val = np.min(z)
if isinstance(overflow, list):
top_overflow = overflow[1]
min_val = overflow[0]
n_over = int(n_over/2)
colors_underflow = list(plt.get_cmap('coolwarm')(
np.linspace(0, 0.21, n_over)))
underflow_bounds = list(np.linspace(np.min(z), min_val,
n_over+1))[:-1]
colors_normal = list(plt.get_cmap('YlOrRd')(
np.linspace(0, .9, n_normal)))
colors_overflow = list(
plt.get_cmap('Greens')(np.linspace(0.5, 1, n_over)))
all_colors = colors_underflow + colors_normal + colors_overflow
color_map = mpl.colors.LinearSegmentedColormap.from_list(
'color_map', all_colors)
normal_bounds = list(np.linspace(min_val,
top_overflow, n_normal+1))[:-1]
overflow_bounds = list(np.linspace(top_overflow,
np.max(z), n_over))
bounds = underflow_bounds + normal_bounds + overflow_bounds
norm = mpl.colors.BoundaryNorm(boundaries=bounds, ncolors=256)
elif log_scale:
norm = colors.LogNorm(vmin=cf_lvls[0], vmax=cf_lvls[-1])
else:
norm = None
if extend == 'neither':
# plot with appropriate parameters
# zorder: put the filled-contour below coastlines
contour_fills = plt.contourf(lons, lats, z, cf_lvls,
transform=cartopy.crs.PlateCarree(),
zorder=0.5,
cmap=color_map,
norm=norm)
else:
contour_fills = plt.contourf(lons, lats, z, cf_lvls,
transform=cartopy.crs.PlateCarree(),
zorder=0.5,
cmap=color_map,
norm=norm,
extend=extend)
contour_lines = plt.contour(lons, lats, z, cl_lvls, colors='0.1',
transform=cartopy.crs.PlateCarree(),
linewidths=1)
# Label levels with specially formatted floats
plt.rcParams['font.weight'] = 'bold'
plt.clabel(contour_lines, fmt=cline_label_format, inline=1, fontsize=9,
colors='k')
plt.rcParams['font.weight'] = 'normal'
if label_cities: # TODO remove/ better: test locations
HH = (53.551086, 9.993682)
Hannover = (52.373954, 9.741647)
Bremen = (53.075176, 8.801850)
city_labels = ['Hamburg', 'Hannover', 'Bremen']
x_cities, y_cities = plt([HH[1], Hannover[1], Bremen[1]],
[HH[0], Hannover[0], Bremen[0]])
plt.plot(x_cities, y_cities, 'o', color='darkslategrey',
markersize=4)
for label, xpt, ypt in zip(city_labels, x_cities, y_cities):
plt.text(xpt+0.5, ypt+0.01, label, color='darkslategrey',
fontsize=6)
return contour_fills
def plot_single_panel(plot_item, plot_title='',
overflow=None):
""""Plot panel with one individual plot.
Args:
plot_item (dict): Individual properties of the plots.
plot_title (string, optional): Title to be written above the plot.
"""
# Set up figure, calculate figure height corresponding to desired width.
plot_frame_top, plot_frame_bottom, plot_frame_left, \
plot_frame_right = .95, 0.15, 0., 1.
bottom_pos_colorbar = .09
fig_width = 3
if plot_title == '':
plot_frame_top = 1.
plot_frame_width = plot_frame_right - plot_frame_left
width_colorbar = plot_frame_width*0.9
fig_height = calc_fig_height(fig_width, (1, 1), plot_frame_top,
plot_frame_bottom, plot_frame_left,
plot_frame_right)
fig, ax = plt.subplots(1, 1, figsize=(fig_width, fig_height), dpi=150,
subplot_kw={'projection': mrc})
fig.subplots_adjust(top=plot_frame_top, bottom=plot_frame_bottom,
left=plot_frame_left, right=plot_frame_right,
hspace=0.0, wspace=0.0)
ax.coastlines(color='darkslategrey') # TODO resolution='50m', color='black', linewidth=1)
# Plot the data.
# Mapping individual properties of the plots.
z = plot_item['data']
cf_lvls = plot_item['contour_fill_levels']
cl_lvls = plot_item['contour_line_levels']
cb_ticks = plot_item['colorbar_ticks']
cb_tick_fmt = plot_item['colorbar_tick_fmt']
apply_log_scale = plot_item.get('log_scale', False)
extend = plot_item.get('extend', "neither")
cl_label_fmt = plot_item.get('contour_line_label_fmt', None)
if cl_label_fmt is None:
cl_label_fmt = cb_tick_fmt.replace("{:", "%").replace("}", "")
plt.title(plot_title)
contour_fills = individual_plot(z, cf_lvls, cl_lvls,
cline_label_format=cl_label_fmt,
log_scale=apply_log_scale,
extend=extend,
overflow=overflow)
# Add axis for colorbar.
i = 0
left_pos_colorbar = plot_frame_width*i + \
(plot_frame_width-width_colorbar)/2 + plot_frame_left
cbar_ax = fig.add_axes([left_pos_colorbar, bottom_pos_colorbar,
width_colorbar, 0.035])
if apply_log_scale:
formatter = LogFormatter(10, labelOnlyBase=False)
else:
formatter = None
cbar = plt.colorbar(contour_fills, orientation="horizontal",
cax=cbar_ax, ticks=cb_ticks, format=formatter)
cbar.ax.set_xticklabels([cb_tick_fmt.format(t) for t in cb_ticks])
cbar.set_label(plot_item['colorbar_label'])
def plot_panel_1x3(plot_items, column_titles, row_item):
""""Plot panel with 3 columns of individual plots.
Args:
plot_items (list of dict): Individual properties of the plots.
column_titles (list): Plot titles per column.
row_item (dict): General properties of the plots.
"""
# Set up figure, calculate figure height corresponding to desired width.
bottom_pos_colorbar = .09
fig_width = 9.
plot_frame_top, plot_frame_bottom, plot_frame_left, \
plot_frame_right = .95, 0, .035, 0.88
fig_height = calc_fig_height(fig_width, (1, 3), plot_frame_top,
plot_frame_bottom, plot_frame_left,
plot_frame_right)
fig, axs = plt.subplots(1, 3, figsize=(fig_width, fig_height), dpi=150,
subplot_kw={'projection': mrc})
fig.subplots_adjust(top=plot_frame_top, bottom=plot_frame_bottom,
left=plot_frame_left, right=plot_frame_right,
hspace=0.0, wspace=0.0)
# Mapping general properties of the plots.
cf_lvls = row_item['contour_fill_levels']
cb_tick_fmt = row_item.get('colorbar_tick_fmt', "{:.1f}")
cl_label_fmt = row_item.get('contour_line_label_fmt', None)
if cl_label_fmt is None:
cl_label_fmt = cb_tick_fmt.replace("{:", "%").replace("}", "")
# Plot the data.
for ax, title, plot_item in zip(axs, column_titles, plot_items):
# Mapping individual properties of the plots.
z = plot_item['data']
cl_lvls = plot_item['contour_line_levels']
plt.axes(ax)
ax.coastlines(color='darkslategrey') # TODO resolution='50m', color='black', linewidth=1)
plt.title(title)
contour_fills = individual_plot(z, cf_lvls, cl_lvls,
cline_label_format=cl_label_fmt)
# Add axis for colorbar.
height_colorbar = .85
bottom_pos_colorbar = (plot_frame_top - height_colorbar)/2
cbar_ax = fig.add_axes([0.91, bottom_pos_colorbar, 0.02, height_colorbar])
cbar = fig.colorbar(contour_fills, cax=cbar_ax,
ticks=row_item['colorbar_ticks'])
cbar.ax.set_yticklabels([cb_tick_fmt.format(t)
for t in row_item['colorbar_ticks']])
cbar.set_label(row_item['colorbar_label'])
def plot_panel_1x3_seperate_colorbar(plot_items, column_titles):
""""Plot panel with 3 columns of individual plots using solely seperate
plot properties.
Args:
plot_items (list of dict): Individual properties of the plots.
column_titles (list): Plot titles per column.
"""
# Set up figure, calculate figure height corresponding to desired width.
plot_frame_top, plot_frame_bottom, plot_frame_left, \
plot_frame_right = .95, 0.17, 0., 1.
width_colorbar = .27
bottom_pos_colorbar = .1
fig_width = 9.*(0.88-.035)
if column_titles is None:
plot_frame_top = 1.
column_titles = [None]*3
plot_frame_width = plot_frame_right - plot_frame_left
fig_height = calc_fig_height(fig_width, (1, 3), plot_frame_top,
plot_frame_bottom, plot_frame_left,
plot_frame_right)
fig, axs = plt.subplots(1, 3, figsize=(fig_width, fig_height), dpi=150,
subplot_kw={'projection': mrc})
fig.subplots_adjust(top=plot_frame_top, bottom=plot_frame_bottom,
left=plot_frame_left, right=plot_frame_right,
hspace=0.0, wspace=0.0)
# Plot the data.
for i, (ax, title, plot_item) in enumerate(zip(axs, column_titles,
plot_items)):
# Mapping individual properties of the plots.
z = plot_item['data']
cf_lvls = plot_item['contour_fill_levels']
cl_lvls = plot_item['contour_line_levels']
cb_ticks = plot_item['colorbar_ticks']
cb_tick_fmt = plot_item['colorbar_tick_fmt']
apply_log_scale = plot_item.get('log_scale', False)
extend = plot_item.get('extend', "neither")
cl_label_fmt = plot_item.get('contour_line_label_fmt', None)
if cl_label_fmt is None:
cl_label_fmt = cb_tick_fmt.replace("{:", "%").replace("}", "")
plt.axes(ax)
ax.coastlines(color='darkslategrey') # TODO resolution='50m', color='black', linewidth=1)
plt.title(title)
contour_fills = individual_plot(z, cf_lvls, cl_lvls,
cline_label_format=cl_label_fmt,
log_scale=apply_log_scale,
extend=extend)
# Add axis for colorbar.
left_pos_colorbar = plot_frame_width/3*i + \
(plot_frame_width/3-width_colorbar)/2 + plot_frame_left
cbar_ax = fig.add_axes([left_pos_colorbar, bottom_pos_colorbar,
width_colorbar, 0.035])
if apply_log_scale:
formatter = LogFormatter(10, labelOnlyBase=False)
else:
formatter = None
cbar = plt.colorbar(contour_fills, orientation="horizontal",
cax=cbar_ax, ticks=cb_ticks, format=formatter)
cbar.ax.set_xticklabels([cb_tick_fmt.format(t) for t in cb_ticks])
cbar.set_label(plot_item['colorbar_label'])
def plot_panel_2x3(plot_items, column_titles, row_items):
""""Plot panel with 2 rows and 3 columns of individual plots.
Args:
plot_items (list of dict): Individual properties of the plots.
column_titles (list): Plot titles per column.
row_items (list of dict): Properties of the plots shared per row.
"""
# Set up figure, calculate determine figure height corresponding to
# desired width.
plot_frame_top, plot_frame_bottom, plot_frame_left, \
plot_frame_right = .96, 0.0, .035, 0.88
fig_width = 9.
fig_height = calc_fig_height(fig_width, (2, 3), plot_frame_top,
plot_frame_bottom, plot_frame_left,
plot_frame_right)
fig, axs = plt.subplots(2, 3, figsize=(fig_width, fig_height), dpi=150,
subplot_kw={'projection': mrc})
fig.subplots_adjust(top=plot_frame_top, bottom=plot_frame_bottom,
left=plot_frame_left, right=plot_frame_right,
hspace=0.0, wspace=0.0)
# Positioning of colorbars.
height_colorbar = .4
right_pos_colorbar = .9
for i_row, row_item in enumerate(row_items):
# Mapping properties of the plots shared per row.
cb_tick_fmt = row_item.get('colorbar_tick_fmt', "{:.1f}")
extend = row_item.get('extend', "neither")
cl_label_fmt = row_item.get('contour_line_label_fmt', None)
if cl_label_fmt is None:
cl_label_fmt = cb_tick_fmt.replace("{:", "%").replace("}", "")
cf_lvls = row_items[i_row]['contour_fill_levels']
# First row of plots.
for ax, plot_item in zip(axs[i_row, :], plot_items[i_row]):
# Mapping individual properties of the plots.
z = plot_item['data']
cl_lvls = plot_item['contour_line_levels']
plt.axes(ax)
ax.coastlines(color='darkslategrey') # TODO resolution='50m', color='black', linewidth=1)
contour_fills = individual_plot(z, cf_lvls, cl_lvls,
cline_label_format=cl_label_fmt,
extend=extend)
# Add axis for colorbar.
bottom_pos_colorbar = (1-i_row)*plot_frame_top/2 + \
(plot_frame_top/2-height_colorbar)/2
cbar_ax = fig.add_axes([right_pos_colorbar, bottom_pos_colorbar,
0.02, height_colorbar])
cbar = fig.colorbar(contour_fills, cax=cbar_ax,
ticks=row_item['colorbar_ticks'])
cbar.ax.set_yticklabels([cb_tick_fmt.format(t)
for t in row_item['colorbar_ticks']])
cbar.set_label(row_item['colorbar_label'])
# Add subplot row and column labels.
row_titles = [r['title'] for r in row_items]
for ax, col in zip(axs[0], column_titles):
ax.annotate(col, xy=(0.5, 1), xytext=(0, 5.),
xycoords='axes fraction', textcoords='offset points',
size='large', ha='center', va='baseline')
for ax, row in zip(axs[:, 0], row_titles):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad + 2., 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size='large', ha='right', va='center', rotation=90)
def percentile_plots(plot_var, i_case, plot_settings):
"""" Reading processed data and plotting the 5th, 32nd, 50th percentile
maps. Used for figure 3.
Args:
plot_var (str): Name of plotting variable in netCDF source file.
i_case (int): Id of plotted case.
plot_settings (dict): Individual and shared properties of the plots.
"""
column_titles = ["5th percentile", "32nd percentile", "50th percentile"]
plot_var_suffix = ["_perc5", "_perc32", "_perc50"]
# Read data from NetCDF source file.
plot_items = []
plot_data_max = 0
for s in plot_var_suffix:
d = nc[plot_var+s].values[i_case, :, :]
if plot_var[0] == "p":
d *= 1e-3
plot_items.append({'data': d})
if np.amax(d) > plot_data_max:
plot_data_max = np.amax(d)
# Mapping plot properties and splitting up into individual and
# shared properties.
plot_handling = plot_settings["plot_handling"]
contour_fill_levels = plot_handling["contour_fill_levels"]
contour_line_levels = plot_handling.get("contour_line_levels", 3 *
[contour_fill_levels])
colorbar_ticks = plot_handling.get("colorbar_ticks", contour_fill_levels)
colorbar_label = plot_settings["color_label"]
# Write the contour handling to plot_items.
for i, plot_item in enumerate(plot_items):
plot_item['contour_line_levels'] = contour_line_levels[i]
# Write the row dependent settings to row_items.
row_item = {
'colorbar_ticks': colorbar_ticks,
'colorbar_label': colorbar_label,
'contour_fill_levels': contour_fill_levels,
}
if 'colorbar_tick_fmt' in plot_handling:
row_item['colorbar_tick_fmt'] = plot_handling["colorbar_tick_fmt"]
if 'contour_line_label_fmt' in plot_handling:
row_item['contour_line_label_fmt'] = \
plot_handling["contour_line_label_fmt"]
plot_panel_1x3(plot_items, column_titles, row_item)
def percentile_plots_ref(plot_var, i_case, plot_var_ref, i_case_ref,
plot_settings_abs, plot_settings_rel):
"""" Reading processed data and plotting the 5th, 32nd, 50th percentile
maps on the first row and the relative
increase w.r.t the reference case on the second row. Used for figure 7.
Args:
plot_var (str): Name of plotting variable in netCDF source file.
i_case (int): Id of plotted case.
plot_var_ref (str): Name of reference variable in netCDF source file.
i_case_ref (int): Id of reference case
plot_settings_abs (dict): Individual and shared properties of the top
row plots.
plot_settings_rel (dict): Individual and shared properties of the
bottom row plots.
"""
column_titles = ["5th percentile", "32nd percentile", "50th percentile"]
row_titles = ['Absolute value', 'Relative to reference case']
plot_var_suffix = ["_perc5", "_perc32", "_perc50"]
# Read data from NetCDF source file.
plot_items = [[], []]
plot_data_max, plot_data_relative_max = 0, 0
for s in plot_var_suffix:
d = nc[plot_var+s].values[i_case, :, :]
if plot_var[0] == "p":
d *= 1e-3
plot_items[0].append({'data': d})
if np.amax(d) > plot_data_max:
plot_data_max = np.amax(d)
d_ref = nc[plot_var_ref+s].values[i_case_ref, :, :]
if plot_var[0] == "p":
d_ref *= 1e-3
d_relative = d/d_ref
plot_items[1].append({'data': d_relative})
if np.amax(d_relative) > plot_data_relative_max:
plot_data_relative_max = np.amax(d_relative)
print("Max absolute and relative value are respectively {:.2f} and {:.2f}"
.format(plot_data_max, plot_data_relative_max))
# Mapping plot properties and splitting up into individual and shared properties.
plot_handling = plot_settings_abs["plot_handling"]
contour_fill_levels = plot_handling["contour_fill_levels"]
contour_line_levels = plot_handling.get("contour_line_levels", 3*[contour_fill_levels])
colorbar_ticks = plot_handling.get("colorbar_ticks", contour_fill_levels)
contour_fill_levels_rel = plot_settings_rel["contour_fill_levels"]
contour_line_levels_rel = plot_settings_rel.get("contour_line_levels", 3*[contour_fill_levels_rel])
colorbar_ticks_rel = plot_settings_rel.get("colorbar_ticks", contour_fill_levels_rel)
# Write the contour handling to plot_items.
for i, plot_item in enumerate(plot_items[0]):
plot_item['contour_line_levels'] = contour_line_levels[i]
for i, plot_item in enumerate(plot_items[1]):
plot_item['contour_line_levels'] = contour_line_levels_rel[i]
# Write the row dependent settings to row_items.
row_items = []
for i in range(2):
row_items.append({
'title': row_titles[i],
})
row_items[0]['colorbar_ticks'] = colorbar_ticks
row_items[0]['colorbar_label'] = plot_settings_abs["color_label"]
row_items[0]['contour_fill_levels'] = contour_fill_levels
if 'colorbar_tick_fmt' in plot_handling:
row_items[0]['colorbar_tick_fmt'] = plot_handling["colorbar_tick_fmt"]
row_items[0]['contour_line_label_fmt'] = '%.1f'
row_items[1]['colorbar_ticks'] = colorbar_ticks_rel
row_items[1]['colorbar_label'] = "Increase factor [-]"
row_items[1]['contour_fill_levels'] = contour_fill_levels_rel
if 'colorbar_tick_fmt' in plot_settings_rel:
row_items[1]['colorbar_tick_fmt'] = plot_settings_rel["colorbar_tick_fmt"]
row_items[1]['extend'] = plot_settings_rel.get('extend', "neither")
plot_panel_2x3(plot_items, column_titles, row_items)
def plot_figure5():
"""" Generate integrated mean power plot. """
column_titles = ["50 - 150m", "10 - 500m", "Ratio"]
linspace0 = np.linspace(0, .31, 21)
plot_item0 = {
'data': p_integral_mean[0, :, :]*1e-6,
'contour_line_levels': linspace0[::4],
'contour_fill_levels': linspace0,
'colorbar_ticks': linspace0[::4],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': '[$MWm/m^2$]',
}
linspace1 = np.linspace(0, 1.5, 21)
plot_item1 = {
'data': p_integral_mean[1, :, :]*1e-6,
'contour_line_levels': linspace1[::4],
'contour_fill_levels': linspace1,
'colorbar_ticks': linspace1[::4],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': '[$MWm/m^2$]',
}
logspace2 = np.logspace(np.log10(4), np.log10(28.0), num=17)
plot_item2 = {
'data': plot_item1['data']/plot_item0['data'],
'contour_line_levels': [10, 15],
'contour_fill_levels': logspace2,
'colorbar_ticks': logspace2[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Increase factor [-]',
}
plot_items = [plot_item0, plot_item1, plot_item2]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_figure3():
"""" Generate fixed height wind speed plot. """
plot_settings = {
"color_label": 'Wind speed [m/s]',
"plot_handling": {
"contour_fill_levels": np.arange(0, 15.1, 0.5), # 13.1, 1),
"contour_line_levels": [
[1., 2., 3., 4.],
[3., 5., 7., 9.],
[5., 7., 9., 11.],
],
"colorbar_ticks": np.arange(0, 15, 2), # 13, 2),
"colorbar_tick_fmt": "{:.0f}",
'contour_line_label_fmt': '%.1f',
},
}
percentile_plots("v_fixed", 0, plot_settings)
def plot_figure4():
"""" Generate fixed height power density plot. """
column_titles = ["5th percentile", "32nd percentile", "50th percentile"]
fixed_height_ref = 100.
fixed_height_id = list(fixed_heights).index(fixed_height_ref)
linspace0 = np.linspace(0, 0.027, 21) # np.linspace(0, .033, 21)
plot_item0 = {
'data': nc["p_fixed_perc5"].values[fixed_height_id, :, :]*1e-3,
'contour_fill_levels': linspace0,
'contour_line_levels': sorted([.003]+list(linspace0[::5])),
'contour_line_label_fmt': '%.3f',
'colorbar_ticks': linspace0[::5],
'colorbar_tick_fmt': '{:.3f}',
'colorbar_label': 'Power density [$kW/m^2$]',
}
linspace1 = np.linspace(0, 0.45, 21) # np.linspace(0, .45, 21)
plot_item1 = {
'data': nc["p_fixed_perc32"].values[fixed_height_id, :, :]*1e-3,
'contour_fill_levels': linspace1,
'contour_line_levels': sorted([.04]+list(linspace1[::4])),
'contour_line_label_fmt': '%.2f',
'colorbar_ticks': linspace1[::4],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': 'Power density [$kW/m^2$]',
}
linspace2 = np.linspace(0, 0.95, 21) # np.linspace(0, 1, 21)
plot_item2 = {
'data': nc["p_fixed_perc50"].values[fixed_height_id, :, :]*1e-3,
'contour_fill_levels': linspace2,
'contour_line_levels': sorted([.1]+list(linspace2[::4])),
'contour_line_label_fmt': '%.2f',
'colorbar_ticks': linspace2[::4],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': 'Power density [$kW/m^2$]',
}
plot_items = [plot_item0, plot_item1, plot_item2]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_figure8():
"""" Generate baseline comparison wind speed plot. """
linspace_absolute = np.linspace(0, 15, 21) # np.arange(0, 15.1, 1)
plot_settings_absolute_row = {
"color_label": 'Wind speed [m/s]',
"plot_handling": {
"contour_fill_levels": linspace_absolute,
"colorbar_ticks": linspace_absolute[::2],
"contour_line_levels": [
linspace_absolute,
[5., 7., 9., 10.],
[7., 9., 11., 13.],
],
"colorbar_tick_fmt": "{:.0f}",
},
}
linspace_relative = np.linspace(0, 2, 21) # np.linspace(1., 2.2, 21)
plot_settings_relative_row = {
"contour_fill_levels": linspace_relative,
"colorbar_ticks": linspace_relative[::4],
"contour_line_levels": [
[1.1, 1.4, 1.7],
[1.1, 1.4, 1.7],
[1.1, 1.4, 1.7],
],
'extend': 'max',
}
percentile_plots_ref("v_ceiling", height_range_ceilings.index(500),
"v_fixed", fixed_heights.index(100),
plot_settings_absolute_row, plot_settings_relative_row)
def plot_figure9_upper():
"""" Generate baseline comparison wind power plot - upper part. """
column_titles = ["5th percentile", "32nd percentile", "50th percentile"]
height_ceiling = 500.
height_ceiling_id = list(height_range_ceilings).index(height_ceiling)
linspace0 = np.linspace(0, .04, 21)
plot_item0 = {
'data': nc["p_ceiling_perc5"].values[height_ceiling_id, :, :]*1e-3,
'contour_fill_levels': linspace0,
'contour_line_levels': linspace0[::5],
'contour_line_label_fmt': '%.2f',
'colorbar_ticks': linspace0[::5],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': 'Power density [$kW/m^2$]',
}
linspace1 = np.linspace(0, .6, 21)
plot_item1 = {
'data': nc["p_ceiling_perc32"].values[height_ceiling_id, :, :]*1e-3,
'contour_fill_levels': linspace1,
'contour_line_levels': linspace1[::4],
'contour_line_label_fmt': '%.2f',
'colorbar_ticks': linspace1[::4],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': 'Power density [$kW/m^2$]',
}
linspace2 = np.linspace(0, 1.3, 21)
plot_item2 = {
'data': nc["p_ceiling_perc50"].values[height_ceiling_id, :, :]*1e-3,
'contour_fill_levels': linspace2,
'contour_line_levels': linspace2[::4],
'contour_line_label_fmt': '%.2f',
'colorbar_ticks': linspace2[::4],
'colorbar_tick_fmt': '{:.2f}',
'colorbar_label': 'Power density [$kW/m^2$]',
}
plot_items = [plot_item0, plot_item1, plot_item2]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_figure9_lower():
"""" Generate baseline comparison wind power plot - lower part. """
column_titles = None
height_ceiling = 500.
height_ceiling_id = list(height_range_ceilings).index(height_ceiling)
fixed_height_ref = 100.
fixed_height_id = list(fixed_heights).index(fixed_height_ref)
linspace0 = np.linspace(0, 20, 21)
plot_item0 = {
'data': nc["p_ceiling_perc5"].values[height_ceiling_id, :, :]
/ nc["p_fixed_perc5"].values[fixed_height_id, :, :],
'contour_fill_levels': linspace0,
'contour_line_levels': np.arange(2., 5., 1.),
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace0[::4],
'colorbar_tick_fmt': '{:.1f}',
'colorbar_label': 'Increase factor [-]',
'extend': 'max',
}
linspace1 = np.linspace(0, 10, 21)
plot_item1 = {
'data': nc["p_ceiling_perc32"].values[height_ceiling_id, :, :]
/ nc["p_fixed_perc32"].values[fixed_height_id, :, :],
'contour_fill_levels': linspace1,
'contour_line_levels': linspace1[::4],
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace1[::4],
'colorbar_tick_fmt': '{:.1f}',
'colorbar_label': 'Increase factor [-]',
'extend': 'max',
}
linspace2 = np.linspace(0, 10, 21)
plot_item2 = {
'data': nc["p_ceiling_perc50"].values[height_ceiling_id, :, :]
/ nc["p_fixed_perc50"].values[fixed_height_id, :, :],
'contour_fill_levels': linspace2,
'contour_line_levels': linspace2[::4],
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace2[::4],
'colorbar_tick_fmt': '{:.1f}',
'colorbar_label': 'Increase factor [-]',
'extend': 'max',
}
plot_items = [plot_item0, plot_item1, plot_item2]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_figure10():
"""" Generate power availability plot. """
height_ceiling = 500.
height_ceiling_id = list(height_range_ceilings).index(height_ceiling)
linspace00 = np.linspace(0, 100, 21)
plot_item00 = {
'data': 100.-nc["p_ceiling_rank40"].values[height_ceiling_id, :, :],
'contour_fill_levels': linspace00,
'contour_line_levels': [70., 80., 90., 95.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability [%]',
'extend': 'min',
}
linspace01 = np.linspace(0, 100, 21)
plot_item01 = {
'data': 100.-nc["p_ceiling_rank300"].values[height_ceiling_id, :, :],
'contour_fill_levels': linspace01,
'contour_line_levels': linspace01[::4][2:],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace01[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability [%]',
}
linspace02 = np.linspace(0, 70, 21)
plot_item02 = {
'data': 100.-nc["p_ceiling_rank1600"].values[height_ceiling_id, :, :],
'contour_fill_levels': linspace02,
'contour_line_levels': linspace02[::4][2:],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace02[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability [%]',
}
column_titles = ["40 $W/m^2$", "300 $W/m^2$", "1600 $W/m^2$"]
plot_items = [plot_item00, plot_item01, plot_item02]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
linspace10 = np.linspace(0., 50., 21)
plot_item10 = {
'data': (100.-nc["p_ceiling_rank40"].values[height_ceiling_id, :, :]) -
(100.-nc["p_fixed_rank40"].values[0, :, :]),
'contour_fill_levels': linspace10,
'contour_line_levels': sorted([1.1, 2.2]+list(linspace10[::4][:-2])),
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace10[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability increase [%]',
}
linspace11 = np.linspace(0., 55., 21)
plot_item11 = {
'data': (100.-nc["p_ceiling_rank300"].values[height_ceiling_id, :, :]) -
(100.-nc["p_fixed_rank300"].values[0, :, :]),
'contour_fill_levels': linspace11,
'contour_line_levels': linspace11[::4][:-2],
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace11[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability increase [%]',
}
linspace12 = np.linspace(0., 45., 21)
plot_item12 = {
'data': (100.-nc["p_ceiling_rank1600"].values[height_ceiling_id, :, :]) -
(100.-nc["p_fixed_rank1600"].values[0, :, :]),
'contour_fill_levels': linspace12,
'contour_line_levels': linspace12[::4][:-2],
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace12[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability increase [%]',
}
column_titles = None
plot_items = [plot_item10, plot_item11, plot_item12]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_figure11():
"""" Generate 40 W/m^2 power availability plot for alternative height ceilings. """
height_ceilings = [300., 1000., 1250.]
height_ceiling_ids = [list(height_range_ceilings).index(height_ceiling) for height_ceiling in height_ceilings]
baseline_height_ceiling = 500.
baseline_height_ceiling_id = list(height_range_ceilings).index(baseline_height_ceiling)
linspace00 = np.linspace(0, 100, 21)
plot_item00 = {
'data': 100.-nc["p_ceiling_rank40"].values[height_ceiling_ids[0], :, :],
'contour_fill_levels': linspace00,
'contour_line_levels': [70., 80., 90., 95.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability [%]',
'extend': 'min',
}
linspace01 = np.linspace(10, 100, 21)
plot_item01 = {
'data': 100.-nc["p_ceiling_rank40"].values[height_ceiling_ids[1], :, :],
'contour_fill_levels': linspace01,
'contour_line_levels': [70., 80., 90., 95.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace01[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability [%]',
'extend': 'min',
}
linspace02 = np.linspace(10, 100, 21)
plot_item02 = {
'data': 100.-nc["p_ceiling_rank40"].values[height_ceiling_ids[2], :, :],
'contour_fill_levels': linspace02,
'contour_line_levels': [70., 80., 90., 95.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace02[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability [%]',
'extend': 'min',
}
column_titles = ["300 m", "1000 m", "1250 m"]
plot_items = [plot_item00, plot_item01, plot_item02]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
linspace10 = np.linspace(0., 22., 21)
plot_item10 = {
'data': -(100.-nc["p_ceiling_rank40"].values[height_ceiling_ids[0], :, :]) +
(100.-nc["p_ceiling_rank40"].values[baseline_height_ceiling_id, :, :]),
'contour_fill_levels': linspace10,
'contour_line_levels': sorted([1.1]+list(linspace10[::4])),
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace10[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability decrease [%]',
}
linspace11 = np.linspace(0., 38., 21)
plot_item11 = {
'data': (100.-nc["p_ceiling_rank40"].values[height_ceiling_ids[1], :, :]) -
(100.-nc["p_ceiling_rank40"].values[baseline_height_ceiling_id, :, :]),
'contour_fill_levels': linspace11,
'contour_line_levels': sorted([2.3]+list(linspace11[::4])),
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace11[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability increase [%]',
}
linspace12 = np.linspace(0., 50., 21)
plot_item12 = {
'data': (100.-nc["p_ceiling_rank40"].values[height_ceiling_ids[2], :, :]) -
(100.-nc["p_ceiling_rank40"].values[baseline_height_ceiling_id, :, :]),
'contour_fill_levels': linspace12,
'contour_line_levels': sorted([3.8]+list(linspace12[::4])),
'contour_line_label_fmt': '%.1f',
'colorbar_ticks': linspace12[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'Availability increase [%]',
}
column_titles = None
plot_items = [plot_item10, plot_item11, plot_item12]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_mean_and_ratio(data_type='v',
fill_range=[0, 20],
ratio_range=[0, 2],
line_levels=[2, 5, 15, 20],
n_decimals=0):
if data_type == 'v':
label = r'v [m/s]'
scale = 1
ratio_levels = [1.1, 1.3, 1.6]
elif data_type == 'p':
label = r'Power density [$kW/m^2$]'
scale = 10**(-3)
ratio_levels = [1, 3, 4.5]
height_ceiling = 500.
height_ceiling_id = list(height_range_ceilings).index(height_ceiling)
fixed_height_ref = 100.
fixed_height_id = list(fixed_heights).index(fixed_height_ref)
# TODO automatize with data?
plot_title = '500m ceiling'
linspace00 = np.linspace(fill_range[0], fill_range[1], 21)
plot_item = {
'data': nc['{}_ceiling_mean'.format(data_type)].values[height_ceiling_id, :, :]*scale,
'contour_fill_levels': linspace00,
'contour_line_levels': line_levels,
'contour_line_label_fmt': '%.{}f'.format(n_decimals),
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.1f}',
'colorbar_label': label,
'extend': 'max',
}
eval_contour_fill_levels([plot_item])
plot_single_panel(plot_item, plot_title=plot_title)
plot_title = '100m fixed'
linspace00 = np.linspace(fill_range[0], fill_range[1], 21)
plot_item = {
'data': nc['{}_fixed_mean'.format(data_type)].values[fixed_height_id, :, :]*scale,
'contour_fill_levels': linspace00,
'contour_line_levels': line_levels,
'contour_line_label_fmt': '%.{}f'.format(n_decimals),
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.1f}',
'colorbar_label': label,
'extend': 'max',
}
eval_contour_fill_levels([plot_item])
plot_single_panel(plot_item, plot_title=plot_title)
plot_title = 'Ratio using 100m'
linspace00 = np.linspace(ratio_range[0], ratio_range[1], 25)
plot_item = {
'data': nc['{}_ceiling_mean'.format(data_type)].values[height_ceiling_id, :, :]/nc[
'{}_fixed_mean'.format(data_type)].values[fixed_height_id, :, :],
'contour_fill_levels': linspace00,
'contour_line_levels': ratio_levels,
'contour_line_label_fmt': '%.{}f'.format(1),
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.1f}',
'colorbar_label': '{}/{}_ref [-]'.format(data_type, data_type),
'extend': 'max',
}
eval_contour_fill_levels([plot_item])
plot_single_panel(plot_item, plot_title=plot_title)
def plot_surface_elevation_from_geopotential():
from process_data_paper import get_surface_elevation
data = get_surface_elevation(lats, lons, remove_neg=False,
revert_lat=True)
data[np.logical_and(data < 20, data > 0)] = 0
plot_title = 'Topography'
# color_map = plt.get_cmap('terrain')
# Set range such that 0 is at blue part
min_range_data, max_range = np.min(data), np.max(data)
blue = 56/256.
min_range = blue/(1 - blue) * max_range
if -min_range > min_range_data:
print('Min range does not cover full min range.')
linspace00 = | np.linspace(-min_range, max_range, 42) | numpy.linspace |
"""FILE CREATED BY: <NAME>, <EMAIL>
Copyright by RoMeLa (Robotics and Mechanisms Laboratory, University of California, Los Angeles)"""
# This file provides a stochastic and robust model predictive controller for a simple unmanned ground vehicle that
# moves a ground vehicle to any desired goal location, while considering obstacles (represented as polygons and circles)
# and with cross communication consideration with another robot (using cooperative localization algorithms)
from casadi import *
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import mpl_toolkits.mplot3d.art3d as art3d
from matplotlib.patches import Circle
from scipy.spatial import distance
import matplotlib.pyplot as plt
import math as m
import control
from scipy.stats import linregress
#from ROS_interface import *
class SMPC_UGV_Planner():
def __init__(self, dT, mpc_horizon, curr_pos, robot_size, lb_state,
ub_state, lb_control, ub_control, Q, R, angle_noise_r1, angle_noise_r2,
relative_measurement_noise_cov, maxComm_distance, obs, animate):
# initialize Optistack class
self.opti = casadi.Opti()
# dt = discretized time difference
self.dT = dT
# mpc_horizon = number of time steps for the mpc to look ahead
self.N = mpc_horizon
# robot_size = input a radius value, where the corresponding circle represents the size of the robot
self.robot_size = robot_size
# lower_bound_state = numpy array corresponding to the lower limit of the robot states, e.g.
# lb_state = np.array([[-20], [-20], [-pi], dtype=float), the same for the upper limit (ub). Similar symbolic
# representation for the controls (lb_control and ub_control) as well
self.lb_state = lb_state
self.ub_state = ub_state
self.lb_control = lb_control
self.ub_control = ub_control
# Q and R diagonal matrices, used for the MPC objective function, Q is 3x3, R is 4x4 (first 2 diagonals
# represent the cost on linear and angular velocity, the next 2 diagonals represent cost on state slack,
# and terminal slack respectively. The P diagonal matrix represents the cost on the terminal constraint.
self.Q = Q
self.R_dare = R
self.R = np.array([[R[0,0], 0], [0, R[2,2]]])
# initialize discretized state matrices A and B (note, A is constant, but B will change as it is a function of
# state theta)
self.A = | np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) | numpy.array |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import math
import onnx
from onnx import helper, TensorProto, mapping
import torch
import torchvision
import topi
import topi.testing
import tvm
from tvm import te
from tvm import relay
from tvm.contrib import graph_runtime
from tvm.relay.testing.config import ctx_list
import scipy
def get_input_data_shape_dict(graph_def, input_data):
if isinstance(input_data, list):
input_names = {}
shape_dict = {}
for i, _ in enumerate(input_data):
input_names[i] = graph_def.graph.input[i].name
shape_dict[input_names[i]] = input_data[i].shape
else:
input_names = graph_def.graph.input[0].name
shape_dict = {input_names: input_data.shape}
return input_names, shape_dict
def get_tvm_output_with_vm(graph_def, input_data, target, ctx, opset=None):
""" Generic function to execute and get tvm output with vm executor"""
_, shape_dict = get_input_data_shape_dict(graph_def, input_data)
mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset)
ex = relay.create_executor('vm', mod=mod, ctx=ctx, target=target)
indata = tvm.nd.array(input_data)
result = ex.evaluate()(indata)
return result.asnumpy()
def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output_dtype='float32', opset=None):
""" Generic function to execute and get tvm output"""
target = 'llvm'
input_names, shape_dict = get_input_data_shape_dict(graph_def, input_data)
mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset)
with tvm.transform.PassContext(opt_level=1):
graph, lib, params = relay.build(mod,
target,
params=params)
ctx = tvm.cpu(0)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
if isinstance(input_data, list):
for i, e in enumerate(input_names):
# Its possible for some onnx inputs to not be needed in the tvm
# module, confirm its present before setting.
try:
m.set_input(input_names[i], tvm.nd.array(
input_data[i].astype(input_data[i].dtype)))
except:
continue
else:
m.set_input(input_names, tvm.nd.array(
input_data.astype(input_data.dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
if isinstance(output_shape, list) and isinstance(output_dtype, list):
tvm_output_list = []
for i, _ in enumerate(output_shape):
tvm_output = m.get_output(i)
tvm_output_list.append(tvm_output.asnumpy())
return tvm_output_list
else:
tvm_output = m.get_output(0)
return tvm_output.asnumpy()
def get_onnxruntime_output(model, inputs, dtype='float32'):
import onnxruntime.backend
rep = onnxruntime.backend.prepare(model, 'CPU')
if isinstance(inputs, list) and len(inputs) > 1:
ort_out = rep.run(inputs)
else:
x = inputs.astype(dtype)
ort_out = rep.run(x)[0]
return ort_out
def verify_onnx_forward_impl(graph_file, data_shape, out_shape):
dtype = 'float32'
x = np.random.uniform(size=data_shape)
model = onnx.load_model(graph_file)
c2_out = get_onnxruntime_output(model, x, dtype)
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, x, target, ctx, out_shape, dtype)
tvm.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_reshape():
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node('Constant',
inputs=[],
outputs=['ref_in'],
value=onnx.helper.make_tensor(name='const_tensor',
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int)))
reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])
graph = helper.make_graph([ref_node, reshape_node],
"reshape_test",
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(ref_shape))])
model = helper.make_model(graph, producer_name='reshape_test')
for target, ctx in ctx_list():
x = np.random.uniform(size=in_shape).astype('int32')
tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32')
tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
def test_expand():
def _test_expand(name, data, shape, ref_data):
shape_array = np.array(shape)
shape_node = onnx.helper.make_node('Constant',
inputs=[],
outputs=['shape'],
value=onnx.helper.make_tensor(name = 'const_tensor',
data_type = onnx.TensorProto.INT32,
dims = shape_array.shape,
vals = shape_array.flatten().astype('int32')))
expand_node = helper.make_node("Expand", ["in", "shape"], ["out"])
graph = helper.make_graph([shape_node, expand_node],
"expand_test",
inputs = [helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(data.shape))],
outputs = [helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(ref_data.shape))])
model = helper.make_model(graph, producer_name=name)
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, data, target, ctx, ref_data.shape, 'float32')
tvm.testing.assert_allclose(ref_data, tvm_out)
in_shape = (3, 1)
shape = (3, 4)
data = np.random.uniform(size=in_shape).astype(np.float32)
ref_data = np.tile(data, 4)
_test_expand('expand_with_dim_unchanged_test', data, shape, ref_data)
in_shape = (3, 1)
shape = (2, 1, 6)
data = np.random.uniform(size=in_shape).astype(np.float32)
ref_data = data * np.ones(shape, dtype=np.float32)
_test_expand('expand_with_dim_changed_test', data, shape, ref_data)
def verify_depth_to_space(inshape, outshape, mode, blockSize):
node = onnx.helper.make_node('DepthToSpace',
inputs=['x'],
outputs=['y'],
blocksize=blockSize)
graph = helper.make_graph([node],
"depth_to_space_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))])
model = helper.make_model(graph, producer_name='depth_to_space_test')
for target, ctx in ctx_list():
x = np.random.uniform(size=inshape).astype('float32')
tvm_out = get_tvm_output(model, x, target, ctx, outshape, 'float32')
onnx_out = get_onnxruntime_output(model, x, 'float32')
tvm.testing.assert_allclose(onnx_out, tvm_out)
def test_depth_to_space():
# current onnx.checker use OpSet-1 version of DepthToSpace, which doesn't have a mode argument.
# TO-DO, we can add mode arguement to test CRD mode and DCR mode
# in the future when we update to a newer onnx version.
verify_depth_to_space((1, 8, 2, 3), (1, 2, 4, 6), mode="CRD", blockSize=2)
def verify_space_to_depth(inshape, outshape, blockSize):
node = onnx.helper.make_node('SpaceToDepth',
inputs=['x'],
outputs=['y'],
blocksize=blockSize)
graph = helper.make_graph([node],
"space_to_depth_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))])
model = helper.make_model(graph, producer_name='space_to_depth_test')
for target, ctx in ctx_list():
x = np.random.uniform(size=inshape).astype('float32')
tvm_out = get_tvm_output(model, x, target, ctx, outshape, 'float32')
onnx_out = get_onnxruntime_output(model, x, 'float32')
tvm.testing.assert_allclose(onnx_out, tvm_out)
def test_space_to_depth():
verify_space_to_depth((1, 1, 4, 6), (1, 4, 2, 3), 2)
def test_shape():
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node('Constant',
inputs=[],
outputs=['ref_in'],
value=onnx.helper.make_tensor(name='const_tensor',
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int)))
reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])
shape_node = helper.make_node("Shape", ['out'], ['final_out'])
graph = helper.make_graph([ref_node, reshape_node, shape_node],
"shape_test",
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("final_out",
TensorProto.FLOAT, list(ref_shape))])
model = helper.make_model(graph, producer_name='shape_test')
for target, ctx in ctx_list():
x = np.random.uniform(size=in_shape).astype('int32')
tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'int32')
tvm.testing.assert_allclose(ref_shape, tvm_out)
def _test_power_iteration(x_shape, y_shape):
if isinstance(y_shape, int):
y_shape = [y_shape]
x = np.random.uniform(size=x_shape).astype(np.float32)
y = np.random.uniform(size=y_shape).astype(np.float32)
np_res = np.power(x, y).astype(np.float32)
res = helper.make_node("Pow", ['x', 'y'], ['out'])
graph = helper.make_graph([res],
'power_test',
inputs=[helper.make_tensor_value_info("x",
TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("y",
TensorProto.FLOAT, list(y_shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(np_res.shape))])
model = helper.make_model(graph, producer_name='power_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [x, y], target, ctx, np_res.shape)
tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)
def test_power():
_test_power_iteration((1, 3), (1))
_test_power_iteration((2, 3), (2, 3))
_test_power_iteration((2, 3), (1, 3))
def test_squeeze():
in_shape = (1, 3, 1, 3, 1, 1)
out_shape = (3, 3)
y = helper.make_node("Squeeze", ['in'], ['out'], axes=[0, 2, 4, 5])
graph = helper.make_graph([y],
'squeeze_test',
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(out_shape))])
model = helper.make_model(graph, producer_name='squeeze_test')
for target, ctx in ctx_list():
x = np.random.uniform(size=in_shape).astype('float32')
tvm_out = get_tvm_output(model, x, target, ctx, out_shape, 'float32')
tvm.testing.assert_allclose(out_shape, tvm_out.shape)
def test_flatten():
in_shape = (1, 3, 4, 4)
axis = 1
ref_shape = (1, 48)
flatten_node = helper.make_node("Flatten", ["in"], ["out"], axis=axis)
graph = helper.make_graph([flatten_node],
"flatten_test",
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(ref_shape))])
model = helper.make_model(graph, producer_name='flatten_test')
for target, ctx in ctx_list():
x = np.random.uniform(size=in_shape).astype('int32')
tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32')
tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
def test_unsqueeze():
in_shape = (3, 3)
axis = (0, 3, 4)
out_shape = (1, 3, 3, 1, 1)
y = helper.make_node("Unsqueeze", ['in'], ['out'], axes=list(axis))
graph = helper.make_graph([y],
'squeeze_test',
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(out_shape))])
model = helper.make_model(graph, producer_name='squeeze_test')
for target, ctx in ctx_list():
x = np.random.uniform(size=in_shape).astype('float32')
tvm_out = get_tvm_output(model, x, target, ctx, out_shape, 'float32')
tvm.testing.assert_allclose(out_shape, tvm_out.shape)
def verify_gather(in_shape, indices, axis, dtype):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int32")
out_np = np.take(x, indices, axis=axis)
y = helper.make_node("Gather", ['in', 'indices'], ['out'], axis=axis)
graph = helper.make_graph([y],
'gather_test',
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices",
TensorProto.INT32, list(indices.shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(out_np.shape))])
model = helper.make_model(graph, producer_name='gather_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [x, indices], target, ctx, out_np.shape)
tvm.testing.assert_allclose(out_np, tvm_out)
def test_gather():
verify_gather((4,), [1], 0, 'int32')
verify_gather((1, 4), [0], 0, 'int32')
verify_gather((4,), [[[1, 0], [0, 1]]], 0, 'float32')
verify_gather((2, 2), [[[1, 0], [0, 1]]], 1, 'int32')
verify_gather((3, 3, 3), [[[1, 0]]], -1, 'int32')
verify_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, 'float32')
def verify_scatter(in_shape, indices, axis):
x = np.random.uniform(size=in_shape).astype("float32")
indices = np.array(indices, dtype="int32")
updates = np.random.uniform(size=indices.shape).astype("float32")
y = helper.make_node("ScatterElements", ['data', 'indices', 'updates'], ['output'], axis=axis)
graph = helper.make_graph([y],
'scatter_test',
inputs=[helper.make_tensor_value_info("data",
TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices",
TensorProto.INT32, list(indices.shape)),
helper.make_tensor_value_info("updates",
TensorProto.FLOAT, list(indices.shape))],
outputs=[helper.make_tensor_value_info("output",
TensorProto.FLOAT, list(in_shape))])
model = helper.make_model(graph, producer_name='scatter_test')
onnx_out = get_onnxruntime_output(model, [x, indices, updates])
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [x, indices, updates], target, ctx, onnx_out[0].shape)
tvm.testing.assert_allclose(onnx_out[0], tvm_out)
def test_scatter():
verify_scatter((4,), [1], 0)
verify_scatter((1, 4), [[0]], 0)
verify_scatter((4,), [2, 3], 0)
verify_scatter((2, 2), [[1, 0], [0, 1]], 1)
verify_scatter((3, 3, 3), [[[-1, -3]]], -1)
verify_scatter((4, 3, 5, 6), [[[[2, 1, 0, 0]]]], 0)
def _test_slice_iteration_v1(indata, outdata, starts, ends, axes=None):
if axes:
y = helper.make_node(
"Slice", ['in'], ['out'], axes=axes, starts=starts, ends=ends)
else:
y = helper.make_node(
"Slice", ['in'], ['out'], starts=starts, ends=ends)
graph = helper.make_graph([y],
'slice_test',
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(outdata.shape))])
model = helper.make_model(graph, producer_name='slice_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, indata, target, ctx, outdata.shape, 'float32', opset=1)
tvm.testing.assert_allclose(outdata, tvm_out)
def _test_slice_iteration_v10(indata, outdata, starts, ends, axes=None):
if isinstance(starts, int):
starts = (starts, )
if isinstance(ends, int):
ends = (ends, )
if isinstance(axes, int):
axes = (axes, )
starts = np.asarray(starts)
ends = np.asarray(ends)
inputs = [
helper.make_tensor_value_info("data", TensorProto.FLOAT,
list(indata.shape)),
helper.make_tensor_value_info("starts", TensorProto.INT32,
list(starts.shape)),
helper.make_tensor_value_info("ends", TensorProto.INT32,
list(ends.shape))
]
initializer = [
helper.make_tensor("starts", TensorProto.INT32, list(starts.shape),
starts),
helper.make_tensor("ends", TensorProto.INT32, list(ends.shape), ends)
]
if axes:
axes = np.asarray(axes)
y = helper.make_node("Slice", ["data", "starts", "ends", "axes"],
["out"])
inputs.append(
helper.make_tensor_value_info("axes", TensorProto.INT32,
list(axes.shape)))
initializer.append(
helper.make_tensor("axes", TensorProto.INT32, list(axes.shape),
axes))
else:
y = helper.make_node("Slice", ["data", "starts", "ends"], ["out"])
graph = helper.make_graph([y],
'slice_test',
inputs=inputs,
outputs=[
helper.make_tensor_value_info(
"out", TensorProto.FLOAT,
list(outdata.shape))
],
initializer=initializer)
model = helper.make_model(graph, producer_name='slice_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model,
indata,
target,
ctx,
outdata.shape,
'float32',
opset=10)
tvm.testing.assert_allclose(outdata, tvm_out)
def test_slice():
x = np.random.randn(20, 10, 5).astype(np.float32)
_test_slice_iteration_v1(x, x[0:3, 0:10], (0, 0), (3, 10), (0, 1))
_test_slice_iteration_v1(x, x[:, :, 3:4], (0, 0, 3), (20, 10, 4))
_test_slice_iteration_v1(x, x[:, 1:1000], (1), (1000), (1))
_test_slice_iteration_v1(x, x[:, 0:-1], (0), (-1), (1))
_test_slice_iteration_v10(x, x[0:3, 0:10], (0, 0), (3, 10), (0, 1))
_test_slice_iteration_v10(x, x[:, :, 3:4], (0, 0, 3), (20, 10, 4))
_test_slice_iteration_v10(x, x[:, 1:1000], (1), (1000), (1))
_test_slice_iteration_v10(x, x[:, 0:-1], (0), (-1), (1))
def _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs):
indata = np.random.uniform(-1, 1, size=inshape).astype(dtype)
outdata = outfunc(indata, **npargs)
y = helper.make_node(opname, ['in'], ['out'], **kwargs)
graph = helper.make_graph([y],
opname+'_test',
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(outdata.shape))])
model = helper.make_model(graph, producer_name=opname+'_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, indata, target, ctx, outdata.shape, dtype)
tvm.testing.assert_allclose(outdata, tvm_out)
def test_floor():
_test_onnx_op_elementwise((2, 4, 5, 6), np.floor,
{}, 'float32', 'Floor', {})
def test_ceil():
_test_onnx_op_elementwise((2, 4, 5, 6), np.ceil, {}, 'float32', 'Ceil', {})
def test_clip():
_test_onnx_op_elementwise((2, 4, 5, 6),
np.clip,
{'a_min': -1.0, 'a_max': 1.0},
'float32',
'Clip',
{'min': -1.0, 'max': 1.0})
def test_round():
_test_onnx_op_elementwise((2, 4, 5, 6), np.round, {}, 'float32', 'Round', {})
def _test_finite_ops(inshape, outfunc, npargs, dtype, opname, kwargs):
indata = np.random.choice(a=[np.nan, np.inf, -np.inf, 0.5, 1.0, 0], size=inshape).astype(dtype)
outdata = outfunc(indata, **npargs)
y = helper.make_node(opname, ['in'], ['out'], **kwargs)
graph = helper.make_graph([y],
opname+'_test',
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.BOOL, list(outdata.shape))])
model = helper.make_model(graph, producer_name=opname+'_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, indata, target, ctx, outdata.shape, dtype)
tvm.testing.assert_allclose(outdata, tvm_out)
def test_isinf():
_test_finite_ops((2, 4, 5, 6), np.isinf, {}, 'float32', 'IsInf', {})
def test_isnan():
_test_finite_ops((2, 4, 5, 6), np.isnan, {}, 'float32', 'IsNaN', {})
def verify_gather_nd(in_shape, indices, dtype):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int32")
out_np = topi.testing.gather_nd_python(x, indices)
y = helper.make_node("GatherND", ['in', 'indices'], ['out'])
graph = helper.make_graph([y],
'gather_test',
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices",
TensorProto.INT32, list(indices.shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(out_np.shape))])
model = helper.make_model(graph, producer_name='gather_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [x, indices], target, ctx, out_np.shape)
tvm.testing.assert_allclose(out_np, tvm_out)
def test_gather_nd():
verify_gather_nd((2, 2), [[0,0],[1,1]], 'int32')
verify_gather_nd((3, 3, 3), [[0,1],[1,0]] , 'float32')
verify_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], 'float32')
def test_onehot():
indices_shape = [10]
indices_array = np.random.randint(
low=0, high=9, size=indices_shape, dtype='int32')
depth = 10
values = np.asarray([0, 1])
out_np = np.eye(depth)[indices_array.reshape(-1)]
onehot_node = helper.make_node(
"OneHot", ["indices", "depth", "values"], ["out"])
graph = helper.make_graph([onehot_node],
"onehot_test",
inputs=[helper.make_tensor_value_info("indices",
TensorProto.INT32, indices_shape),
helper.make_tensor_value_info("depth",
TensorProto.INT32, [1]),
helper.make_tensor_value_info("values",
TensorProto.INT32, values.shape)],
initializer=[helper.make_tensor("depth", TensorProto.INT32, [1], [depth]),
helper.make_tensor("values", TensorProto.INT32, values.shape, values)],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT32, out_np.shape)])
model = helper.make_model(graph, producer_name="onehot_test")
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [indices_array], target, ctx, out_np.shape)
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
def test_matmul():
a_shape = (4, 3)
b_shape = (3, 4)
a_array = np.random.uniform(size=a_shape).astype('float32')
b_array = np.random.uniform(size=b_shape).astype('float32')
out_np = np.matmul(a_array, b_array)
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
graph = helper.make_graph([mul_node],
"matmul_test",
inputs=[helper.make_tensor_value_info("a",
TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b",
TensorProto.FLOAT, list(b_shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(out_np.shape))])
model = helper.make_model(graph, producer_name='matmul_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [a_array, b_array], target, ctx, out_np.shape)
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
def verify_batch_matmul(a_shape, b_shape):
a_array = np.random.uniform(size=a_shape).astype('float32')
b_array = np.random.uniform(size=b_shape).astype('float32')
out_np = np.matmul(a_array, b_array)
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
graph = helper.make_graph([mul_node],
"matmul_test",
inputs=[helper.make_tensor_value_info("a",
TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b",
TensorProto.FLOAT, list(b_shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(out_np.shape))])
model = helper.make_model(graph, producer_name='matmul_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [a_array, b_array], target, ctx, out_np.shape)
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
def test_batch_matmul():
verify_batch_matmul((2, 3, 4, 3), (2, 3, 3, 4))
verify_batch_matmul((2, 4, 3), (3, 4))
verify_batch_matmul((2, 3, 4, 3), (3, 4))
def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None):
in_array = np.random.uniform(size=shape).astype(dtype)
if alpha == None and beta == None and bias == None:
alpha = 0.0001
beta = 0.75
bias = 1.0
node = onnx.helper.make_node(
'LRN', inputs=['in'], outputs=['out'], size=nsize)
else:
node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], alpha=alpha,
beta=beta, bias=bias, size=nsize)
graph = helper.make_graph([node],
"lrn_test",
inputs=[helper.make_tensor_value_info(
"in", TensorProto.FLOAT, list(shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))])
model = helper.make_model(graph, producer_name='lrn_test')
def _get_python_lrn():
square_sum = np.zeros(shape).astype(dtype)
for n, c, h, w in np.ndindex(in_array.shape):
square_sum[n, c, h, w] = sum(in_array[n,
max(0, c - int(math.floor((nsize - 1) / 2))):
min(5, c + int(math.ceil((nsize - 1) / 2)) + 1),
h,
w] ** 2)
py_out = in_array / ((bias + (alpha / nsize) * square_sum) ** beta)
return py_out
for target, ctx in ctx_list():
input_name = model.graph.input[0].name
py_out = _get_python_lrn()
tvm_out = get_tvm_output(
model, in_array, target, ctx, py_out.shape, 'float32')
tvm.testing.assert_allclose(py_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_lrn():
verify_lrn((5, 5, 5, 5), 3, 'float32')
verify_lrn((5, 5, 5, 5), 3, 'float32', alpha=0.0002, beta=0.5, bias=2.0)
def verify_instance_norm(shape, axis=1):
def _get_python_instance_norm(x, gamma, beta, epsilon=1e-5):
dims_x = len(x.shape)
axis = tuple(range(2, dims_x))
mean = np.mean(x, axis=axis, keepdims=True)
var = np.var(x, axis=axis, keepdims=True)
dim_ones = (1,) * (dims_x - 2)
gamma = gamma.reshape(-1, *dim_ones)
beta = beta.reshape(-1, *dim_ones)
return gamma * (x - mean) / np.sqrt(var + epsilon) + beta
x = np.random.randn(*shape).astype(np.float32)
gamma = np.random.randn(shape[1]).astype(np.float32)
beta = np.random.randn(shape[1]).astype(np.float32)
epsilon = 1e-5
y = _get_python_instance_norm(x, gamma, beta, epsilon).astype(np.float32)
node = onnx.helper.make_node(
'InstanceNormalization',
inputs=['x', 'gamma', 'beta'],
outputs=['y'],
epsilon=epsilon,
)
graph = helper.make_graph([node],
"instance_norm_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(shape)),
helper.make_tensor_value_info(
"gamma", TensorProto.FLOAT, (shape[1],)),
helper.make_tensor_value_info("beta", TensorProto.FLOAT, (shape[1],))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(shape))])
model = helper.make_model(graph, producer_name='instance_norm_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [x, gamma, beta], target, ctx, shape, 'float32')
tvm.testing.assert_allclose(y, tvm_out, rtol=1e-5, atol=1e-5)
def test_instance_norm():
verify_instance_norm((2, 3, 4, 5))
verify_instance_norm((32, 64, 80, 64))
verify_instance_norm((8, 6, 5))
verify_instance_norm((8, 7, 6, 5, 4))
def _test_upsample_nearest():
scale = 2
in_shape = (1, 1, 3, 3)
out_shape = (1, 1, 3*scale, 3*scale)
y = helper.make_node("Upsample", ['in'], [
'out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = topi.testing.upsampling_python(
in_array, (scale, scale), "NCHW")
graph = helper.make_graph([y],
'upsample_nearest_test',
inputs=[helper.make_tensor_value_info(
"in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))])
model = helper.make_model(graph, producer_name='upsample_nearest_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, in_array, target, ctx, out_shape, 'float32')
tvm.testing.assert_allclose(out_array, tvm_out)
def _test_upsample3d_nearest():
scale = 2
in_shape = (1, 1, 3, 3, 3)
out_shape = (1, 1, 3*scale, 3*scale, 3*scale)
y = helper.make_node("Upsample", ['in'], [
'out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = topi.testing.upsampling3d_python(
in_array, (scale, scale, scale), "NCDHW")
graph = helper.make_graph([y],
'upsample_nearest_test',
inputs=[helper.make_tensor_value_info(
"in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))])
model = helper.make_model(graph, producer_name='upsample_nearest_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, in_array, target, ctx, out_shape, 'float32')
tvm.testing.assert_allclose(out_array, tvm_out)
def _test_upsample_bilinear():
scale = 2
in_shape = (1, 1, 3, 3)
out_shape = (1, 1, 3*scale, 3*scale)
y = helper.make_node("Upsample", ['in'], [
'out'], mode='linear', scales=[1.0, 1.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = topi.testing.bilinear_resize_python(
in_array, (3*scale, 3*scale), "NCHW")
graph = helper.make_graph([y],
'upsample_bilinear_test',
inputs=[helper.make_tensor_value_info(
"in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))])
model = helper.make_model(graph, producer_name='upsample_bilinear_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, in_array, target, ctx, out_shape, 'float32')
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
def _test_upsample_bilinear_opset9():
scale = 2
in_shape = (1, 1, 3, 3)
out_shape = (1, 1, 3*scale, 3*scale)
y = helper.make_node("Upsample", ['in', 'scales'], ['out'], mode='linear')
scales = [1, 1, 2, 2]
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = topi.testing.bilinear_resize_python(
in_array, (3*scale, 3*scale), "NCHW")
ref_node = helper.make_node('Constant',
inputs=[],
outputs=['const'],
value=onnx.helper.make_tensor(name='const_tensor',
data_type=TensorProto.FLOAT,
dims=scales,
vals=np.random.random(scales).flatten().astype(float)))
shape_node = helper.make_node("Shape", ['const'], ['scales'])
graph = helper.make_graph([ref_node, shape_node, y],
'upsample_bilinear_opset9_test',
inputs=[helper.make_tensor_value_info(
"in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))])
model = helper.make_model(
graph, producer_name='upsample_bilinear_opset9_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, in_array, target, ctx, out_shape, 'float32')
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
def _test_upsample3d_trilinear():
scale = 2
in_shape = (1, 1, 3, 3, 3)
out_shape = (1, 1, 3*scale, 3*scale, 3*scale)
y = helper.make_node("Upsample", ['in', 'scales'], ['out'], mode='linear')
scales = [1.0, 1.0, 2.0, 2.0, 2.0]
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = topi.testing.trilinear_resize3d_python(
in_array, (3*scale, 3*scale, 3*scale), "NCDHW", coordinate_transformation_mode="half_pixel")
ref_array = np.array(scales)
ref_node = helper.make_node('Constant',
inputs=[],
outputs=['scales'],
value=onnx.helper.make_tensor(name='const_tensor',
data_type=TensorProto.FLOAT,
dims=ref_array.shape,
vals=ref_array.flatten().astype(float)))
graph = helper.make_graph([ref_node, y],
'upsample_trilinear_test',
inputs=[helper.make_tensor_value_info(
"in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))])
model = helper.make_model(
graph, producer_name='upsample_trilinear_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, in_array, target, ctx, out_shape, 'float32')
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
def test_upsample():
_test_upsample_nearest()
_test_upsample_bilinear()
_test_upsample_bilinear_opset9()
_test_upsample3d_nearest()
_test_upsample3d_trilinear()
def _test_softmax(inshape, axis):
opname = 'Softmax'
indata = np.random.uniform(size=inshape).astype(np.float32)
outshape = inshape
outdata = topi.testing.softmax_python(indata)
if isinstance(axis, int):
y = helper.make_node(opname, ['in'], ['out'], axis=axis)
elif axis is None:
y = helper.make_node(opname, ['in'], ['out'])
graph = helper.make_graph([y],
opname+'_test',
inputs=[helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(outdata.shape))])
model = helper.make_model(graph, producer_name=opname+'_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, indata, target, ctx, outshape, 'float32')
tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
def test_softmax():
_test_softmax((1, 10), None)
_test_softmax((1, 10), 1)
def verify_min(input_dim):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.min((a_np1, a_np2, a_np3), axis=0)
min_node = helper.make_node("Min", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph([min_node],
"Min_test",
inputs=[helper.make_tensor_value_info("a_np1",
TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2",
TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3",
TensorProto.FLOAT, list(input_dim))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(b_np.shape))])
model = helper.make_model(graph, producer_name='Min_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_min():
verify_min((1, 3, 20, 20))
verify_min((20, 20))
def verify_max(input_dim):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.max((a_np1, a_np2, a_np3), axis=0)
max_node = helper.make_node("Max", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph([max_node],
"Max_test",
inputs=[helper.make_tensor_value_info("a_np1",
TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2",
TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3",
TensorProto.FLOAT, list(input_dim))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(b_np.shape))])
model = helper.make_model(graph, producer_name='Max_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_max():
verify_max((1, 3, 20, 20))
verify_max((20, 20))
def verify_mean(input_dim):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.mean((a_np1, a_np2, a_np3), axis=0)
mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph([mean_node],
"Mean_test",
inputs=[helper.make_tensor_value_info("a_np1",
TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2",
TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3",
TensorProto.FLOAT, list(input_dim))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(b_np.shape))])
model = helper.make_model(graph, producer_name='Mean_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_mean():
verify_mean((1, 3, 20, 20))
verify_mean((20, 20))
def verify_hardsigmoid(input_dim, alpha, beta):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.clip(a_np1 * alpha + beta, 0, 1)
hardsigmoid_node = helper.make_node("HardSigmoid", ["a_np1"], [
"out"], alpha=alpha, beta=beta)
graph = helper.make_graph([hardsigmoid_node],
"HardSigmoid_test",
inputs=[helper.make_tensor_value_info("a_np1",
TensorProto.FLOAT, list(input_dim))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(b_np.shape))])
model = helper.make_model(graph, producer_name='HardSigmoid_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape)
tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_hardsigmoid():
verify_hardsigmoid((1, 3, 20, 20), 0.5, 0.6)
verify_hardsigmoid((20, 20), 0.3, 0.4)
def verify_argmin(input_dim, axis=None, keepdims=None):
def _argmin_numpy(data, axis=0, keepdims=True):
result = np.argmin(data, axis=axis)
if (keepdims == 1):
result = np.expand_dims(result, axis)
return result.astype(data.dtype)
a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)
if keepdims is None and axis is None:
b_np = _argmin_numpy(a_np1)
node = onnx.helper.make_node('ArgMin',
inputs=['a_np1'],
outputs=['out'])
elif axis is None:
b_np = _argmin_numpy(a_np1, keepdims=keepdims)
node = onnx.helper.make_node('ArgMin',
inputs=['a_np1'],
outputs=['out'],
keepdims=keepdims)
elif keepdims is None:
b_np = _argmin_numpy(a_np1, axis=axis)
node = onnx.helper.make_node('ArgMin',
inputs=['a_np1'],
outputs=['out'],
axis=axis)
else:
b_np = _argmin_numpy(a_np1, axis=axis, keepdims=keepdims)
node = onnx.helper.make_node('ArgMin',
inputs=['a_np1'],
outputs=['out'],
axis=axis,
keepdims=keepdims)
graph = helper.make_graph([node],
"argmin_test",
inputs=[helper.make_tensor_value_info("a_np1",
TensorProto.INT32, list(a_np1.shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.INT32, list(b_np.shape))])
model = helper.make_model(graph, producer_name='argmin_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [a_np1], target, ctx, b_np.shape, b_np.dtype)
tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
def verify_argmax(input_dim, axis=None, keepdims=None):
def _argmax_numpy(data, axis=0, keepdims=True):
result = np.argmax(data, axis=axis)
if (keepdims == 1):
result = np.expand_dims(result, axis)
return result.astype(data.dtype)
a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)
if keepdims is None and axis is None:
b_np = _argmax_numpy(a_np1)
node = onnx.helper.make_node('ArgMax',
inputs=['a_np1'],
outputs=['out'])
elif axis is None:
b_np = _argmax_numpy(a_np1, keepdims=keepdims)
node = onnx.helper.make_node('ArgMax',
inputs=['a_np1'],
outputs=['out'],
keepdims=keepdims)
elif keepdims is None:
b_np = _argmax_numpy(a_np1, axis=axis)
node = onnx.helper.make_node('ArgMax',
inputs=['a_np1'],
outputs=['out'],
axis=axis)
else:
b_np = _argmax_numpy(a_np1, axis=axis, keepdims=keepdims)
node = onnx.helper.make_node('ArgMax',
inputs=['a_np1'],
outputs=['out'],
axis=axis,
keepdims=keepdims)
graph = helper.make_graph([node],
"argmax_test",
inputs=[helper.make_tensor_value_info("a_np1",
TensorProto.INT32, list(a_np1.shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.INT32, list(b_np.shape))])
model = helper.make_model(graph, producer_name='argmax_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, [a_np1], target, ctx, b_np.shape, b_np.dtype)
tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_arg_min_max():
'''Verify argmin and argmax'''
verify_argmin([3, 4, 4])
verify_argmax([3, 4, 4])
verify_argmin([3, 4, 4], axis=1)
verify_argmax([3, 4, 4], axis=0)
verify_argmin([3, 4, 4], keepdims=0)
verify_argmax([3, 4, 4], keepdims=1)
for axis in [None, 0, 1, 2]:
for keepdims in [None, True, False]:
verify_argmin([3, 4, 4], axis, keepdims)
verify_argmax([3, 4, 4], axis, keepdims)
def verify_constantofshape(input_dim, value, dtype):
out = np.empty(shape=input_dim, dtype=dtype)
out.fill(value)
fill_node = helper.make_node("ConstantOfShape", ["input"], ["output"],
value=helper.make_tensor(
'value',
mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)],
(1, ), (value, )))
inputs = [
helper.make_tensor_value_info("input", TensorProto.FLOAT, input_dim)
]
graph = helper.make_graph(
[fill_node],
"fill_test",
inputs,
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT,
list(out.shape))
],
initializer=[
helper.make_tensor("input", TensorProto.INT32, (len(input_dim), ),
input_dim)
])
model = helper.make_model(graph, producer_name='fill_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [], target, ctx, out.shape)
tvm.testing.assert_allclose(out, tvm_out, rtol=1e-5, atol=1e-5)
def test_constantofshape():
verify_constantofshape((2, 3, 4, 5), 10, 'float32')
verify_constantofshape((3, 3), 0, 'int32')
verify_constantofshape((1, 2, 3), -1, 'float32')
def verify_pad(indata, pads, mode='constant', value=0.0):
indata = np.array(indata).astype(np.float32)
# numpy expect result
len_dim = len(pads) // 2
np_pads = [(pads[i], pads[i+len_dim]) for i in range(len_dim)]
# onnx graph
if mode in ['edge', 'reflect']:
outdata = np.pad(indata, pad_width=np_pads, mode=mode)
node = helper.make_node(
'Pad',
inputs=['input'],
outputs=['output'],
mode=mode,
pads=pads,
)
else:
outdata = np.pad(indata, pad_width=np_pads,
mode='constant', constant_values=value)
node = helper.make_node(
'Pad',
inputs=['input'],
outputs=['output'],
mode='constant',
pads=pads,
value=value
)
graph = helper.make_graph([node],
'pad_test',
inputs=[helper.make_tensor_value_info("input",
TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("output",
TensorProto.FLOAT, list(outdata.shape))])
model = helper.make_model(graph, producer_name='pad_test')
# tvm result
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, indata, target, ctx, outdata.shape, 'float32', opset=2)
tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
def verify_pad_v11(indata, pads, mode='constant', value=0.0):
indata = np.array(indata).astype(np.float32)
# numpy expect result
len_dim = len(pads) // 2
np_pads = [(pads[i], pads[i+len_dim]) for i in range(len_dim)]
pads = np.array(pads)
# onnx graph
if mode in ['edge', 'reflect']:
inputs = [indata, pads]
outdata = np.pad(indata, pad_width=np_pads, mode=mode)
node = helper.make_node(
'Pad',
inputs=['input', 'pads'],
outputs=['output'],
mode=mode
)
graph = helper.make_graph([node],
'pad_test',
inputs=[helper.make_tensor_value_info("input",
TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("pads",
TensorProto.INT64,(len(pads),))],
initializer=[helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads)],
outputs=[helper.make_tensor_value_info("output",
TensorProto.FLOAT, list(outdata.shape))])
else:
inputs = [indata, pads, np.array([value])]
outdata = np.pad(indata, pad_width=np_pads,
mode='constant', constant_values=value)
node = helper.make_node(
'Pad',
inputs=['input', 'pads', 'constant_value'],
outputs=['output'],
mode='constant'
)
graph = helper.make_graph([node],
'pad_test',
inputs=[helper.make_tensor_value_info("input",
TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("pads",
TensorProto.INT64,(len(pads),)),
helper.make_tensor_value_info("constant_value",
TensorProto.INT64,(1,)),
],
initializer=[helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads),
helper.make_tensor("constant_value", TensorProto.FLOAT, (1,), [value])],
outputs=[helper.make_tensor_value_info("output",
TensorProto.FLOAT, list(outdata.shape))])
model = helper.make_model(graph, producer_name='pad_test')
# tvm result
for target, ctx in ctx_list():
tvm_out = get_tvm_output(
model, inputs, target, ctx, outdata.shape, 'float32', opset=11)
tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
def test_pad():
verify_pad(np.random.randn(2, 2).astype(
np.float32), [0, 1, 0, 0], 'constant', 0.0)
verify_pad(np.random.randn(2, 3).astype(
np.float32), [1, 0, 0, 1], 'constant', 0.0)
verify_pad(np.random.randn(3, 2).astype(
np.float32), [0, 0, 1, 0], 'constant', 5.0)
verify_pad(np.random.randn(1, 3, 4, 5).astype(
np.float32), [0, 0, 1, 1, 0, 0, 1, 1], 'edge')
verify_pad(np.random.randn(1, 3, 4, 5).astype(
np.float32), [0, 0, 1, 1, 0, 0, 1, 1], 'reflect')
verify_pad_v11(np.random.randn(2, 2).astype(
np.float32), [0, 1, 0, 0], 'constant', 0.0)
verify_pad_v11(np.random.randn(2, 3).astype(
np.float32), [1, 0, 0, 1], 'constant', 0.0)
verify_pad_v11(np.random.randn(3, 2).astype(
np.float32), [0, 0, 1, 0], 'constant', 5.0)
verify_pad_v11(np.random.randn(1, 3, 4, 5).astype(
np.float32), [0, 0, 1, 1, 0, 0, 1, 1], 'edge')
verify_pad_v11(np.random.randn(1, 3, 4, 5).astype(
np.float32), [0, 0, 1, 1, 0, 0, 1, 1], 'reflect')
def verify_reduce_func(func, data, axis, keepdims):
inshape = data.shape
outshape = np.sum(data, axis=axis, keepdims=keepdims == 1).shape
if axis:
node = onnx.helper.make_node(func,
inputs=['x'],
outputs=['y'],
axes=axis,
keepdims=keepdims)
else:
node = onnx.helper.make_node(func,
inputs=['x'],
outputs=['y'],
keepdims=keepdims)
graph = helper.make_graph([node],
"reduce_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))])
model = helper.make_model(graph, producer_name='reduce_test')
onnx_out = get_onnxruntime_output(model, data, 'float32')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, data, target, ctx, outshape, 'float32')
tvm.testing.assert_allclose(onnx_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_all_reduce_funcs():
funcs = ["ReduceMax",
"ReduceMean",
"ReduceMin",
"ReduceProd",
"ReduceSum",
'ReduceSumSquare',
"ReduceLogSum",
"ReduceLogSumExp",
"ReduceL1",
"ReduceL2"]
for func in funcs:
for keepdims in [True, False]:
verify_reduce_func(func,
np.random.randn(3, 2, 2).astype(np.float32),
axis=None, keepdims=keepdims)
verify_reduce_func(func,
np.random.randn(3, 2, 3).astype(np.float32),
axis=None, keepdims=keepdims)
verify_reduce_func(func,
np.random.randn(3, 3, 3).astype(np.float32),
axis=(1,), keepdims=keepdims)
verify_reduce_func(func,
np.random.randn(3, 3, 3, 1).astype(np.float32),
axis=(1, 2), keepdims=keepdims)
verify_reduce_func(func,
np.random.randn(3, 3, 3, 1).astype(np.float32),
axis=(1,), keepdims=keepdims)
verify_reduce_func(func,
np.random.randn(1, 3, 4, 1).astype(np.float32),
axis=(1,), keepdims=keepdims)
def verify_split(indata, outdatas, split, axis=0):
indata = np.array(indata).astype(np.float32)
outdatas = [np.array(o).astype(np.float32) for o in outdatas]
if split:
split_index = range(len(split))
else:
split_index = range(len(outdatas))
node = helper.make_node(
'Split',
inputs=['input'],
outputs=['output_{}'.format(i) for i in range(len(split_index))],
axis=axis,
split=split
)
graph = helper.make_graph([node],
'split_test',
inputs=[helper.make_tensor_value_info("input",
TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("output_{}".format(i),
TensorProto.FLOAT, list(outdatas[i].shape))
for i in range(len(split_index))
])
model = helper.make_model(graph, producer_name='split_test')
for target, ctx in ctx_list():
output_shape = [o.shape for o in outdatas]
output_type = ['float32', 'float32', 'float32']
tvm_out = get_tvm_output(
model, indata, target, ctx, output_shape, output_type)
for o, t in zip(outdatas, tvm_out):
tvm.testing.assert_allclose(o, t)
def test_split():
# 1D
verify_split([1., 2., 3., 4., 5., 6.], [
[1., 2.], [3., 4.], [5., 6.]], [2, 2, 2], 0)
verify_split([1., 2., 3., 4., 5., 6.], [
[1., 2.], [3.], [4., 5., 6.]], [2, 1, 3], 0)
# 2D
verify_split([[1., 2., 3., 4.], [7., 8., 9., 10.]],
[[[1., 2.], [7., 8.]], [[3., 4.], [9., 10.]]], [2, 2], 1)
# Split evenly (unstack)
verify_split([1, 2, 3], [[1], [2], [3]], False)
def test_binary_ops():
in_shape = (1, 2, 3, 3)
dtype = "float32"
out_shape = in_shape
def verify_binary_ops(op, x, y, out_np, x_name='in1', y_name='in2', broadcast=None):
if broadcast is None:
z = helper.make_node(op, [x_name, y_name], ['out'])
else:
z = helper.make_node(op, [x_name, y_name], ['out'], broadcast=1)
graph = helper.make_graph([z],
'_test',
inputs=[helper.make_tensor_value_info(x_name,
TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info(y_name,
TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(out_shape))])
model = helper.make_model(graph, producer_name='_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [x, y], target, ctx)
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
x = np.random.uniform(size=in_shape).astype(dtype)
y = np.random.uniform(size=in_shape).astype(dtype)
z = np.random.uniform(size=(3,)).astype(dtype)
verify_binary_ops("Add", x, y, x + y, broadcast=None)
verify_binary_ops("Add", x, z, x + z, broadcast=True)
verify_binary_ops("Sub", x, y, x - y, broadcast=None)
verify_binary_ops("Sub", x, z, x - z, broadcast=True)
verify_binary_ops("Mul", x, y, x * y, broadcast=None)
verify_binary_ops("Mul", x, z, x * z, broadcast=True)
verify_binary_ops("Mul", x, x, x * x, x_name='in1', y_name='in1', broadcast=None)
verify_binary_ops("Div", x, y, x / y, broadcast=None)
verify_binary_ops("Div", x, z, x / z, broadcast=True)
verify_binary_ops("Sum", x, y, x + y, broadcast=None)
verify_binary_ops("Greater", x, y, x > y, broadcast=True)
verify_binary_ops("Less", x, y, x < y, broadcast=True)
verify_binary_ops("Equal", x, y, x == y, broadcast=True)
def test_single_ops():
in_shape = (1, 2, 3, 3)
dtype = "float32"
out_shape = in_shape
def verify_single_ops(op, x, out_np, rtol=1e-5, atol=1e-5):
z = helper.make_node(op, ['in1'], ['out'])
graph = helper.make_graph([z],
'_test',
inputs=[helper.make_tensor_value_info("in1",
TensorProto.FLOAT, list(in_shape)), ],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(out_shape))])
model = helper.make_model(graph, producer_name='_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [x], target, ctx)
tvm.testing.assert_allclose(out_np, tvm_out, rtol=rtol, atol=atol)
x = np.random.uniform(size=in_shape).astype(dtype)
verify_single_ops("Neg", x, -x)
verify_single_ops("Abs", x, np.abs(x))
verify_single_ops("Reciprocal", x, 1/x)
verify_single_ops("Sqrt", x, np.sqrt(x))
verify_single_ops("Relu", x, np.maximum(x, 0))
verify_single_ops("Exp", x, np.exp(x))
verify_single_ops("Log", x, np.log(x))
verify_single_ops("Log", x, np.log(x))
verify_single_ops("ACos", x, np.arccos(x))
verify_single_ops("ACosh", x, np.arccosh(x))
verify_single_ops("ASin", x, np.arcsin(x))
verify_single_ops("ASinh", x, np.arcsinh(x))
verify_single_ops("ATan", x, np.arctan(x))
verify_single_ops("ATanh", x, np.arctanh(x))
verify_single_ops("Cos", x, np.cos(x))
verify_single_ops("Cosh", x, np.cosh(x))
verify_single_ops("Sin", x, np.sin(x))
verify_single_ops("Sinh", x, np.sinh(x))
verify_single_ops("Tan", x, np.tan(x))
verify_single_ops("Tanh", x, np.tanh(x))
verify_single_ops("Sigmoid", x, 1 / (1 + np.exp(-x)))
verify_single_ops("Softsign", x, x / (1 + np.abs(x)))
verify_single_ops("SoftPlus", x, np.log(1 + np.exp(x)))
def test_leaky_relu():
def leaky_relu_x(x, alpha):
return np.where(x >= 0, x, x * alpha)
_test_onnx_op_elementwise((2, 4, 5, 6),
leaky_relu_x,
{'alpha': 0.25},
'float32',
'LeakyRelu',
{'alpha': 0.25})
def test_elu():
def elu_x(x, alpha):
return np.where(x > 0, x, alpha * (np.exp(x) - 1.0))
_test_onnx_op_elementwise((2, 4, 5, 6),
elu_x,
{'alpha': 0.25},
'float32',
'Elu',
{'alpha': 0.25})
def test_selu():
def selu_x(x, alpha, gamma):
return gamma * np.where(x > 0, x, alpha * (np.exp(x) - 1.0))
_test_onnx_op_elementwise((2, 4, 5, 6),
selu_x,
{'alpha': 0.25, 'gamma': 0.3},
'float32',
'Selu',
{'alpha': 0.25, 'gamma': 0.3})
def test_prelu():
def verify_prelu(x_shape, a_shape):
node = helper.make_node('PRelu',
inputs=['X', 'slope'],
outputs=['Y'])
graph = helper.make_graph([node],
"prelu_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("slope", TensorProto.FLOAT, list(a_shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(x_shape))])
model = helper.make_model(graph, producer_name='prelu_test')
indata = np.random.uniform(-10, 10, x_shape).astype(np.float32)
slopedata = np.random.uniform(-10, 10, a_shape).astype(np.float32)
onnx_out = get_onnxruntime_output(model, [indata, slopedata])
for target, ctx in [('llvm', tvm.cpu())]:
tvm_out = get_tvm_output(model, [indata, slopedata], target, ctx, list(x_shape),
output_dtype='float32')
tvm.testing.assert_allclose(onnx_out[0], tvm_out, rtol=1e-05, atol=1e-05)
verify_prelu([3,4,5,6], [1, 4, 1, 1])
verify_prelu([1,8,5,6], [1, 8, 1, 1])
verify_prelu([2,12,16,16], [1, 12, 1, 1])
def test_ThresholdedRelu():
def ThresholdedRelu_x(x, alpha):
out_np = np.clip(x, alpha, np.inf)
out_np[out_np == alpha] = 0
return out_np
_test_onnx_op_elementwise((2, 4, 5, 6),
ThresholdedRelu_x,
{'alpha': 0.25},
'float32',
'ThresholdedRelu',
{'alpha': 0.25})
def test_ScaledTanh():
def ScaledTanh_x(x, alpha, beta):
return alpha * np.tanh(beta * x)
_test_onnx_op_elementwise((2, 4, 5, 6),
ScaledTanh_x,
{'alpha': 0.25, 'beta': 0.3},
'float32',
'ScaledTanh',
{'alpha': 0.25, 'beta': 0.3})
def test_ParametricSoftplus():
def ParametricSoftplus_x(x, alpha, beta):
return alpha * np.log(np.exp(beta * x) + 1)
_test_onnx_op_elementwise((2, 4, 5, 6),
ParametricSoftplus_x,
{'alpha': 0.25, 'beta': 0.3},
'float32',
'ParametricSoftplus',
{'alpha': 0.25, 'beta': 0.3})
def test_Scale():
def Scale_x(x, scale):
return scale * x
_test_onnx_op_elementwise((2, 4, 5, 6),
Scale_x,
{'scale': 0.25},
'float32',
'Scale',
{'scale': 0.25})
def test_LogSoftmax():
_test_onnx_op_elementwise((1, 4),
topi.testing.log_softmax_python,
{},
'float32',
'LogSoftmax',
{'axis': 1})
def check_torch_conversion(model, input_size):
dummy_input = torch.randn(*input_size)
file_name = '{}.onnx'.format(model.__name__)
# Set verbose=True for more output
torch.onnx.export(model(), dummy_input, file_name,
export_params=True, verbose=False)
onnx_model = onnx.load(file_name)
for target, ctx in ctx_list():
input_data = np.random.uniform(size=input_size).astype('int32')
c2_out = get_onnxruntime_output(onnx_model, input_data)
tvm_out = get_tvm_output(onnx_model, input_data, target, ctx)
tvm.testing.assert_allclose(c2_out, tvm_out)
def test_resnet():
check_torch_conversion(torchvision.models.resnet18, (1, 3, 224, 224))
# check_torch_conversion(torchvision.models.resnet101, (1,3,224,224))
# def test_alexnet():
# Torch's ONNX export does not support the adaptive pooling used by AlexNet?
# check_torch_conversion(torchvision.models.alexnet, (1,3,224,224))
# Torch's ONNX export does not support the adaptive pooling used by vgg16?
# def test_vgg16():
# check_torch_conversion(torchvision.models.vgg16, (1,3,224,224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_squeezenet():
# # Torch's ONNX export does not support the max pooling used by Squezenet
# check_torch_conversion(torchvision.models.squeezenet1_0, (1,3,224,224))
def test_densenet():
check_torch_conversion(torchvision.models.densenet161, (1, 3, 224, 224))
def test_inception():
check_torch_conversion(torchvision.models.inception_v3, (1, 3, 224, 224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_googlenet():
# check_torch_conversion(torchvision.models.googlenet, (1,3,224,224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_shufflenetv2():
# check_torch_conversion(torchvision.models.shufflenetv2, (1,3,224,224))
def test_sign():
def Sign_x(x):
return np.sign(x)
_test_onnx_op_elementwise((3, 4, 5, 6),
Sign_x,
{},
'float32',
'Sign',
{})
def verify_not(indata, dtype):
x = indata.astype(dtype)
outdata = np.logical_not(x)
node = helper.make_node('Not', inputs=['in'], outputs=['out'],)
graph = helper.make_graph([node],
'not_test',
inputs=[helper.make_tensor_value_info(
"in", TensorProto.BOOL, list(x.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))])
model = helper.make_model(graph, producer_name='not_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [x], target, ctx, outdata.shape)
tvm.testing.assert_allclose(outdata, tvm_out)
def test_not():
# 2d
verify_not(indata=(np.random.randn(3, 4) > 0), dtype=bool)
# 3d
verify_not(indata=(np.random.randn(3, 4, 5) > 0), dtype=bool)
# 4d
verify_not(indata=(np.random.randn(3, 4, 5, 6) > 0), dtype=bool)
def verify_and(indata, dtype):
x = indata[0].astype(dtype)
y = indata[1].astype(dtype)
outdata = np.logical_and(x, y)
node = helper.make_node('And', inputs=['in1', 'in2'], outputs=['out'], )
graph = helper.make_graph([node],
'and_test',
inputs=[helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))])
model = helper.make_model(graph, producer_name='and_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [x, y], target, ctx, outdata.shape)
tvm.testing.assert_allclose(outdata, tvm_out)
def test_and():
# 2d
x = (np.random.randn(3, 4) > 0)
y = (np.random.randn(3, 4) > 0)
verify_and(indata=[x, y], dtype=bool)
# 3d
x = (np.random.randn(3, 4, 5) > 0)
y = ( | np.random.randn(3, 4, 5) | numpy.random.randn |
from PyQt5 import QtWidgets, uic, QtCore, Qt
from PyQt5.QtWidgets import QAction, QMessageBox, QFileDialog, QDesktopWidget, QColorDialog, QFontDialog, QDialog, QTableWidgetItem, QVBoxLayout, QSplashScreen, QProgressBar
from PyQt5.QtGui import QIcon, QPixmap
import sys, os, time
from webbrowser import open_new_tab
import xlwt
import subprocess as sp
from plotting import *
from mode import *
from recorder import *
from read_outfiles import *
from utilities import *
import matplotlib as mpl
from RS import*
import numpy as np
import pandas as pd
from pyvistaqt import QtInteractor
main=None
class FeViewMain(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(FeViewMain, self).__init__(parent)
# load MainWindows.ui from Qt Designer
uic.loadUi('UI\MainWindows.ui', self)
# add the pyvista interactor object
vlayout = QVBoxLayout()
self.p=self.plot_widget = QtInteractor(self.frame)
self.p.show_axes()
vlayout.addWidget(self.plot_widget.interactor)
self.frame.setLayout(vlayout)
self.setCentralWidget(self.frame)
vlayout.setContentsMargins(0, 0, 0, 0)
# add some tool bar
self.btn_tool_openTCL = QAction(QIcon('UI/icon/Open.png'),'Open TCL File', self)
self.btn_tool_editTCL = QAction(QIcon('UI/icon/edit.png'),'Edit TCL File with CypressEditor', self)
self.btn_tool_run_OS = QAction(QIcon('UI/icon/run.png'),'run TCL file with OpenSees', self)
self.btn_iso = QAction(QIcon('UI/icon/iso.png'),'View isometric', self)
self.btn_iso.setCheckable(True) # toolbar button checkable
self.btn_xy_zpluss = QAction(QIcon('UI/icon/xy_zpluss.png'), 'View xy_zpluss', self)
self.btn_xy_zpluss.setCheckable(True)
self.btn_xy_zminus = QAction(QIcon('UI/icon/xy_zminus.png'), 'View xy_zminus', self)
self.btn_xy_zminus.setCheckable(True)
self.btn_xz_ypluss = QAction(QIcon('UI/icon/xz_ypluss.png'), 'View xz_ypluss', self)
self.btn_xz_ypluss.setCheckable(True)
self.btn_xz_yminus = QAction(QIcon('UI/icon/xz_yminus.png'), 'View xz_yminus', self)
self.btn_xz_yminus.setCheckable(True)
self.btn_yz_xpluss = QAction(QIcon('UI/icon/yz_xpluss.png'), 'View yz_xpluss', self)
self.btn_yz_xpluss.setCheckable(True)
self.btn_yz_xminus = QAction(QIcon('UI/icon/yz_xminus.png'), 'View yz_xminus', self)
self.btn_yz_xminus.setCheckable(True)
self.btn_node_label = QAction(QIcon('UI/icon/nl.png'), 'View Node Label', self)
self.btn_node_label.setCheckable(True)
self.btn_node_cord = QAction(QIcon('UI/icon/nc.png'), 'View Node Co-ordinate', self)
self.btn_node_cord.setCheckable(True)
self.btn_load = QAction(QIcon('UI/icon/load.png'), 'View Point Load', self)
self.btn_load.setCheckable(True)
self.btn_color_plot_background= QAction(QIcon('UI/icon/color_plot_background.png'), 'Change Plot Background Color', self)
self.btn_color_gui = QAction(QIcon('UI/icon/color_gui.png'), 'Change Theme Color', self)
self.btn_font = QAction(QIcon('UI/icon/font.png'), 'Change Font Style', self)
self.btn_plot_image = QAction(QIcon('UI/icon/plot_image.png'), 'Save Plot as Image', self)
self.btn_plot_image_wb = QAction(QIcon('UI/icon/plot_image_wb.png'), 'Save Plot as Image with White Background', self)
self.btn_calc = QAction(QIcon('UI/icon/calculator.png'), 'Calculator', self)
self.btn_minumize = QAction(QIcon('UI/icon/minimize.png'), 'Mimimize the Window', self)
self.btn_maximize = QAction(QIcon('UI/icon/maximize.png'), 'Maximize the Window', self)
self.btn_full_s = QAction(QIcon('UI/icon/full_s.png'), 'Fullscreen', self)
self.btn_center = QAction(QIcon('UI/icon/center.png'), 'Center', self)
self.btn_min_s = QAction(QIcon('UI/icon/min.png'), 'Minimum Window Size', self)
self.btn_max_s = QAction(QIcon('UI/icon/max.png'), 'Maximum Window Size', self)
self.btn_restore = QAction(QIcon('UI/icon/rest_w.png'), 'Restore Window', self)
self.btn_help = QAction(QIcon('UI/icon/help.png'), 'Help', self)
self.btn_about = QAction(QIcon('UI/icon/info.png'), 'Info', self)
self.btn_close = QAction(QIcon('UI/icon/close.png'), 'Exir', self)
toolbar = self.addToolBar('Exit')
toolbar.addAction(self.btn_tool_openTCL)
toolbar.addAction(self.btn_tool_editTCL)
toolbar.addAction(self.btn_tool_run_OS)
toolbar.addSeparator()
toolbar.addAction(self.btn_iso)
toolbar.addAction(self.btn_xy_zpluss)
toolbar.addAction(self.btn_xy_zminus)
toolbar.addAction(self.btn_xz_ypluss)
toolbar.addAction(self.btn_xz_yminus)
toolbar.addAction(self.btn_yz_xpluss)
toolbar.addAction(self.btn_yz_xminus)
toolbar.addSeparator()# add separator
toolbar.addAction(self.btn_node_label)
toolbar.addAction(self.btn_node_cord)
toolbar.addAction(self.btn_load)
toolbar.addSeparator()
toolbar.addAction(self.btn_color_plot_background)
toolbar.addAction(self.btn_color_gui)
toolbar.addAction(self.btn_font)
toolbar.addSeparator()
toolbar.addAction(self.btn_plot_image)
toolbar.addAction(self.btn_plot_image_wb)
toolbar.addAction(self.btn_calc)
toolbar.addSeparator()
toolbar.addAction(self.btn_minumize)
toolbar.addAction(self.btn_maximize)
toolbar.addAction(self.btn_full_s)
toolbar.addAction(self.btn_center)
toolbar.addAction(self.btn_min_s)
toolbar.addAction(self.btn_max_s)
toolbar.addAction(self.btn_restore)
toolbar.addSeparator()
toolbar.addAction(self.btn_help)
toolbar.addAction(self.btn_about)
toolbar.addAction(self.btn_close)
toolbar.addSeparator()
# margin & layout setting for toolbar
toolbar.setContentsMargins(0, 0, 0, 0)
toolbar.layout().setSpacing(0)
toolbar.layout().setContentsMargins(0, 0, 0, 0)
self.btn_tool_openTCL.triggered.connect(self.openTCL) # call function for 'Open TCL file' toolbar button
self.actionOpen.triggered.connect(self.openTCL) # call function for 'Open TCL file' main manu button
self.btn_apply_static.clicked.connect(self.DispStatic)
self.actionApply_Static.triggered.connect(self.DispStatic)
self.btn_apply_modal.clicked.connect(self.DispModal)
self.actionApply_Modal.triggered.connect(self.DispModal)
self.btn_apply_dynamic.clicked.connect(self.DispDynamic)
self.Apply_Dyanamic.triggered.connect(self.DispDynamic)
self.btn_response_static.clicked.connect(self.res_static)
self.actionShow_Response.triggered.connect(self.res_static)
self.btn_response_dynamic.clicked.connect(self.res_dynamic)
self.actionShow_Response_dynamic.triggered.connect(self.res_dynamic)
self.btn_tool_editTCL.triggered.connect(self.edit_TCL)
self.actionEdit.triggered.connect(self.edit_TCL)
self.btn_tool_run_OS.triggered.connect(self.runOS)
self.actionRun_OpenSees.triggered.connect(self.runOS)
self.btn_iso.triggered.connect(self.iso)
self.btn_xy_zpluss.triggered.connect(self.xy_zpluss)
self.btn_xy_zminus.triggered.connect(self.xy_zminus)
self.btn_xz_ypluss.triggered.connect(self.xz_ypluss)
self.btn_xz_yminus.triggered.connect(self.xz_yminus)
self.btn_yz_xpluss.triggered.connect(self.yz_xpluss)
self.btn_yz_xminus.triggered.connect(self.yz_xminus)
self.actionFeView.triggered.connect(self.about_feview)
self.btn_about.triggered.connect(self.about_feview)
self.actionPlot_Background_Color.triggered.connect(self.Plot_Background_Color)
self.btn_color_plot_background.triggered.connect(self.Plot_Background_Color)
self.actionGUI_Font.triggered.connect(self.GUI_Font)
self.btn_font.triggered.connect(self.GUI_Font)
self.actionTheme_Color.triggered.connect(self.gui_color)
self.btn_color_gui.triggered.connect(self.gui_color)
self.btn_plot_image.triggered.connect(self.savePlot)
self.actionWith_background.triggered.connect(self.savePlot)
self.btn_plot_image_wb.triggered.connect(self.savePlot_wb)
self.actionWhite_Background.triggered.connect(self.savePlot_wb)
self.btn_calc.triggered.connect(self.calculator)
self.actionMinimize.triggered.connect(lambda: self.showMinimized())
self.btn_minumize.triggered.connect(lambda: self.showMinimized())
self.actionMaximize.triggered.connect(lambda: self.showMaximized())
self.btn_maximize.triggered.connect(lambda: self.showMaximized())
self.actionFull_Screen.triggered.connect(lambda: self.showFullScreen())
self.btn_full_s.triggered.connect(lambda: self.showFullScreen())
self.actionCenter.triggered.connect(lambda: self.center())
self.btn_center.triggered.connect(lambda: self.center())
self.actionMinimum_Size.triggered.connect(lambda: self.resize(self.minimumSize()))
self.btn_min_s.triggered.connect(lambda: self.resize(self.minimumSize()))
self.actionMaximum_Size.triggered.connect(lambda: self.resize(self.maximumSize()))
self.btn_max_s.triggered.connect(lambda: self.resize(self.maximumSize()))
self.actionRestore.triggered.connect(lambda: self.showNormal())
self.btn_restore.triggered.connect(lambda: self.showNormal())
self.actionSSL.triggered.connect(lambda: open_new_tab('Help\FeView_Help.chm'))
self.btn_help.triggered.connect(lambda: open_new_tab('Help\FeView_Help.chm'))
self.actionOpenSees.triggered.connect(lambda: open_new_tab('https://opensees.berkeley.edu'))
self.actionSSL_Website.triggered.connect(lambda: open_new_tab('http://www.kim2kie.com/3_ach/SSL_Software.php'))
self.actionFeView_Website.triggered.connect(lambda: open_new_tab('http://www.kim2kie.com/3_ach/FeView/FeView.php'))
self.btn_node_label.triggered.connect(self.nodelebels)
self.actionNode_Label.triggered.connect(self.nodelebels)
self.btn_node_cord.triggered.connect(self.nodecoordinates)
self.actionNode_Coordinate.triggered.connect(self.nodecoordinates)
self.btn_load.triggered.connect(self.pointload_show)
self.actionLoad.triggered.connect(self.pointload_show)
self.actionExit.triggered.connect(self.close)
self.btn_close.triggered.connect(self.close)
self.actionMesh_Fiew.triggered.connect(self.mesh_view_model)
self.actionSmooth_View.triggered.connect(self.smoth_view_model)
self.actionWireframe.triggered.connect(self.wiremesh_model)
self.actionMesh_View_2.triggered.connect(self.mesh_view_model_deform)
self.actionSmooth_View_2.triggered.connect(self.smoth_view_model_deform)
self.actionMesh_View_Wiremesh_undeform.triggered.connect(self.mesh_wiremesh_model_deform)
self.actionSmooth_View_Wiremesh_undeform.triggered.connect(self.smooth_wiremesh_model_deform)
self.btn_datatable_static.clicked.connect(self.data_table_static)
self.actionData_Table.triggered.connect(self.data_table_static)
self.btn_datatable_modal.clicked.connect(self.data_table_modal)
self.actionData_Table_modal.triggered.connect(self.data_table_modal)
self.btn_datatable_dynamic.clicked.connect(self.data_table_dynamic)
self.actionData_Table_dynamic.triggered.connect(self.data_table_dynamic)
self.actionView_load.triggered.connect(self.load_setting_arrow)
self.reportEdit.keyReleaseEvent = self.handleKeyRelease
self.addInfoText("Opend tcl file")
self.prgb = QProgressBar(self)
self.statusBar().addPermanentWidget(self.prgb)
self.dialogs = list()
def progress(self, value, newLines):
#self.te.append('\n'.join(newLines))
self.prgb.setValue(value)
def addInfoText(self, text):
"""Adds info text"""
return self.reportEdit.insertPlainText("\n >>"+str(text))
def handleKeyRelease(self, event):
"""Handles key inputs to report box"""
if(event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter):
self.interpretUserInput(self.reportEdit.toPlainText())
# function to unchecked another model display style setting except 'mesh view'
def mesh_view_model(self):
self.actionSmooth_View.setChecked(False)
self.actionWireframe.setChecked(False)
# function to unchecked another model display style setting except 'smooth view'
def smoth_view_model(self):
self.actionMesh_Fiew.setChecked(False)
self.actionWireframe.setChecked(False)
# function to unchecked another model display style setting except 'wiremesh view'
def wiremesh_model(self):
self.actionMesh_Fiew.setChecked(False)
self.actionSmooth_View.setChecked(False)
# function to unchecked another deform model display style setting except 'mesh view'
def mesh_view_model_deform(self):
self.actionSmooth_View_2.setChecked(False)
self.actionMesh_View_Wiremesh_undeform.setChecked(False)
self.actionSmooth_View_Wiremesh_undeform.setChecked(False)
# function to unchecked another deform model display style setting except 'smooth view'
def smoth_view_model_deform(self):
self.actionMesh_View_2.setChecked(False)
self.actionMesh_View_Wiremesh_undeform.setChecked(False)
self.actionSmooth_View_Wiremesh_undeform.setChecked(False)
# function to unchecked another deform model display style setting except 'mesh view+wiremesh'
def mesh_wiremesh_model_deform(self):
self.actionMesh_View_2.setChecked(False)
self.actionSmooth_View_2.setChecked(False)
self.actionSmooth_View_Wiremesh_undeform.setChecked(False)
# function to unchecked another deform model display style setting except 'smooth view+wiremesh'
def smooth_wiremesh_model_deform(self):
self.actionMesh_View_2.setChecked(False)
self.actionSmooth_View_2.setChecked(False)
self.actionMesh_View_Wiremesh_undeform.setChecked(False)
def openTCL(self):
try:
global numModes #set numModes as global variable
# create file dialog function to browse file path
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
self.fileName, _ = QFileDialog.getOpenFileName(self, "OpenSees File", "","OpenSees File (*.tcl)", options=options)
self.file_path, self.file_name = os.path.split(self.fileName)
[filename0, sep, ext] = self.file_name.partition('.')
# make path for output files
self.result_directory = os.path.join(self.file_path, r'out_files_%s' % filename0)
if not os.path.exists(self.result_directory):
# create directory for output files
os.mkdir(self.result_directory)
# clear all actors from plot interface
self.prgb.setMaximum(len(node(self.fileName)))
self.p.clear()
if self.actionSmooth_View.isChecked() == True:
# call plotter considering smooth view
plotter(self.p, self.fileName, 'smooth_view',NodeCoords(self.fileName), None, None)
elif self.actionWireframe.isChecked() == True:
# call plotter considering wiremesh view
plotter(self.p, self.fileName, 'wireframe', NodeCoords(self.fileName),None, None)
elif self.actionMesh_Fiew.isChecked() == True:
# call plotter considering mesh view
plotter(self.p, self.fileName, 'mesh_view',NodeCoords(self.fileName), None, None)
#plotter_rigiddiaphram(self.p, self.fileName, NodeCoords(self.fileName))
if (ndm_v(self.fileName))==2:
self.p.view_xy() # initial setting for 2d interface considering x-y axis view
else:
self.p.view_isometric() # initial setting for 3d interface considering isometric view
# read number of modes as "numModes"
numModes=modeNumber(self.fileName)
# clear previous item from "Mode Num." Combobox
self.cb_numNodes.clear()
if numModes.size>0:
for i in range(int(numModes)):
# add item to "Mode Num." combobox as Mode_1...
self.cb_numNodes.addItem('Mode_'+str(i+1))
self.recorder_disp, self.recorder_rot, self.recorder_force, self.recorder_moment, self.recorder_accel, self.recorder_vel = recorder_types(
self.fileName)
if self.recorder_disp==1:
# add item to "Component" combobox for displacement in static analysis result
self.cb_node_contour_static.addItem('Displacement, Ux',)
self.cb_node_contour_static.addItem('Displacement, Uy')
self.cb_node_contour_static.addItem('Displacement, Uz')
self.cb_node_contour_static.addItem('Displacement, Uxyz')
if self.recorder_rot==1:
# add item to "Component" combobox for rotation in static analysis result
self.cb_node_contour_static.addItem('Rotation, Rx')
self.cb_node_contour_static.addItem('Rotation, Ry')
self.cb_node_contour_static.addItem('Rotation, Rz')
self.cb_node_contour_static.addItem('Rotation, Rxyz')
if self.recorder_force==1:
# add item to "Component" combobox for force reaction in static analysis result
self.cb_node_contour_static.addItem('Force Reaction, RFx')
self.cb_node_contour_static.addItem('Force Reaction, RFy')
self.cb_node_contour_static.addItem('Force Reaction, RFz')
self.cb_node_contour_static.addItem('Force Reaction, RFxyz')
if self.recorder_moment==1:
# add item to "Component" combobox for moment reaction in static analysis result
self.cb_node_contour_static.addItem('Moment Reaction, RMx')
self.cb_node_contour_static.addItem('Moment Reaction, RMy')
self.cb_node_contour_static.addItem('Moment Reaction, RMz')
self.cb_node_contour_static.addItem('Moment Reaction, RMxyz')
if self.recorder_disp == 1:
# add item to "Component" combobox for displacement in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Displacement, Ux')
self.cb_node_contour_dynamic.addItem('Displacement, Uy')
self.cb_node_contour_dynamic.addItem('Displacement, Uz')
self.cb_node_contour_dynamic.addItem('Displacement, Uxyz')
if self.recorder_rot == 1:
# add item to "Component" combobox for rotation in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Rotation, Rx')
self.cb_node_contour_dynamic.addItem('Rotation, Ry')
self.cb_node_contour_dynamic.addItem('Rotation, Rz')
self.cb_node_contour_dynamic.addItem('Rotation, Rxyz')
if self.recorder_force == 1:
# add item to "Component" combobox for force reaction in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Force Reaction, RFx')
self.cb_node_contour_dynamic.addItem('Force Reaction, RFy')
self.cb_node_contour_dynamic.addItem('Force Reaction, RFz')
self.cb_node_contour_dynamic.addItem('Force Reaction, RFxyz')
if self.recorder_moment == 1:
# add item to "Component" combobox for moment reaction in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Moment Reaction, RMx')
self.cb_node_contour_dynamic.addItem('Moment Reaction, RMy')
self.cb_node_contour_dynamic.addItem('Moment Reaction, RMz')
self.cb_node_contour_dynamic.addItem('Moment Reaction, RMxyz')
if self.recorder_accel == 1:
# add item to "Component" combobox for acceleration in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Acceleration, Ax')
self.cb_node_contour_dynamic.addItem('Acceleration, Ay')
self.cb_node_contour_dynamic.addItem('Acceleration, Az')
self.cb_node_contour_dynamic.addItem('Acceleration, Axyz')
if self.recorder_vel == 1:
# add item to "Component" combobox for velocity in dynamic analysis result
self.cb_node_contour_dynamic.addItem('Velocity, Vx')
self.cb_node_contour_dynamic.addItem('Velocity, Vy')
self.cb_node_contour_dynamic.addItem('Velocity, Vz')
self.cb_node_contour_dynamic.addItem('Velocity, Vxyz')
self.setWindowTitle(
# windows title to show file path and filename
"{}[*] - {}".format((self.fileName + ' ['+filename0)+']', 'FeView'))
try:
# show total node and element in status bar
self.statusBar().showMessage('Total Node : '+str(len(node(self.fileName)))+'; Total Element :'+total_element(self.fileName))
except:
QMessageBox.critical(self, "Error", "No node or element found")
if self.actionView_load.isChecked()==True:
# show point load
point_load(self.fileName,self.p,load_arrow_size, load_font_size,load_arrow_color,load_font_color)
if self.actionView_Support.isChecked() == True:
# show support
support(self.fileName,self.p)
self.addInfoText("Successfully loaded file \n" + self.fileName)
except:
QMessageBox.critical(self, "Error", "Please check TCL file")
def DispStatic(self):
try:
self.btn_apply_modal.setChecked(False)
self.btn_apply_dynamic.setChecked(False)
scalefactor = float(self.tb_sef_scale_factor.text()) # scale factor for diformation (static, modal and dynamic analysis)
if self.recorder_disp==1:
# read output files for displacement
self.outdispFile = OpenSeesOutputRead(os.path.join(self.result_directory,'Node_displacements.out'))
if step_static(self.fileName).size>0:
# number of steps for static (if dynamic/transient analysis also included)
self.step_statics=int(step_static(self.fileName))
else:
# number of steps for only static analysis
self.step_statics = len(self.outdispFile[:, 1])
self.step_dynamic = len(self.outdispFile[:, 1]) - self.step_statics # steps for transient analysis
if self.recorder_disp == 1:
# read output files for displacement
self.deformation=(out_response((os.path.join(self.result_directory,'Node_displacements.out')), self.step_statics, ndm_v(self.fileName),'all'))
self.dispNodeCoords = NodeCoords(self.fileName) + (scalefactor * self.deformation)
if self.recorder_rot == 1:
# read output files for rotation
self.rotation=(out_response((os.path.join(self.result_directory,'Node_rotations.out')), self.step_statics, ndm_v(self.fileName),'rotation_moment'))
self.outrotFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_rotations.out'))
if self.recorder_force == 1:
# read output files for force reaction
self.forcereaction=(out_response((os.path.join(self.result_directory,'Node_forceReactions.out')), self.step_statics, ndm_v(self.fileName),'all'))
self.outfreactFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_forceReactions.out'))
if self.recorder_moment == 1:
# read output files for moment reaction
self.momentreaction = (out_response((os.path.join(self.result_directory, 'Node_momentReactions.out')), self.step_statics,ndm_v(self.fileName),'rotation_moment'))
self.outmreactFile = OpenSeesOutputRead(os.path.join(self.result_directory, 'Node_momentReactions.out'))
self.p.clear()
node_contour_type = (self.cb_node_contour_static.currentText()) # get current text from "Component" combobox (Static result)
if self.actionMesh_View_2.isChecked() == True:
if node_contour_type=='Displacement, Ux':
scalars = self.deformation[:, 0]
d_max_x= np.max(np.abs(self.deformation[:, 0]))
stitle = 'Displacement, Ux (Max. = '+str(d_max_x)+')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Displacement, Uy':
scalars = self.deformation[:, 1]
d_max_y = np.max(np.abs(self.deformation[:, 1]))
stitle = 'Displacement, Uy (Max. = ' + str(d_max_y) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Displacement, Uz':
scalars = self.deformation[:, 2]
d_max_z = np.max(np.abs(self.deformation[:, 2]))
stitle = 'Displacement, Uz (Max. = ' + str(d_max_z) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Displacement, Uxyz':
scalars = self.deformation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
d_max_xyz = np.max(np.abs(scalars))
stitle = 'Displacement, Uxyz (Max. = ' + str(d_max_xyz) + ')\n'
plotter(self.p, self.fileName, 'mesh_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Rx':
scalars = self.rotation[:, 0]
stitle = 'Rotation, Rx (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Ry':
scalars = self.rotation[:, 1]
stitle = 'Rotation, Ry (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Rz':
scalars = self.rotation[:, 2]
stitle = 'Rotation, Rz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Rotation, Rxyz':
scalars = self.rotation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Rotation, Rxyz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Force Reaction, RFx':
scalars = self.forcereaction[:, 0]
stitle = 'Force Reaction, RFx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Force Reaction, RFy':
scalars = self.forcereaction[:, 1]
stitle = 'Force Reaction, RFy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Force Reaction, RFz':
scalars = self.forcereaction[:, 2]
stitle = 'Force Reaction, RFz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Force Reaction, RFxyz':
scalars = self.forcereaction[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Force Reaction, RFxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Moment Reaction, RMx':
scalars = self.momentreaction[:, 0]
stitle = 'Moment Reaction, RMx (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Moment Reaction, RMy':
scalars = self.momentreaction[:, 1]
stitle = 'Moment Reaction, RMy (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
#stitle = 'Moment Reaction, RMx\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Moment Reaction, RMz':
scalars = self.momentreaction[:, 2]
stitle = 'Moment Reaction, RMz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
#stitle = 'Moment Reaction, RMz\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif node_contour_type=='Moment Reaction, RMxyz':
scalars = self.momentreaction[:, :3]
stitle = 'Moment Reaction, RMxyz (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'mesh_view',self.dispNodeCoords, scalars, stitle)
elif self.actionSmooth_View_2.isChecked() == True:
if node_contour_type == 'Displacement, Ux':
scalars = self.deformation[:, 0]
d_max_x = np.max(np.abs(self.deformation[:, 0]))
stitle = 'Displacement, Ux (Max. = ' + str(d_max_x) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Displacement, Uy':
scalars = self.deformation[:, 1]
d_max_y = np.max(np.abs(self.deformation[:, 1]))
stitle = 'Displacement, Uy (Max. = ' + str(d_max_y) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Displacement, Uz':
scalars = self.deformation[:, 2]
d_max_z = np.max(np.abs(self.deformation[:, 2]))
stitle = 'Displacement, Uz (Max. = ' + str(d_max_z) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Displacement, Uxyz':
scalars = self.deformation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
d_max_xyz = np.max(np.abs(scalars))
stitle = 'Displacement, Uxyz (Max. = ' + str(d_max_xyz) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Rotation, Rx':
scalars = self.rotation[:, 2]
stitle = 'Rotation, Rx (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Rotation, Ry':
scalars = self.rotation[:, 1]
stitle = 'Rotation, Ry (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Rotation, Rz':
scalars = self.rotation[:, 0]
stitle = 'Rotation, Rz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Rotation, Rxyz':
scalars = self.rotation[:, :3]
scalars = (scalars * scalars).sum(1) ** 0.5
stitle = 'Rotation, Rxyz (rad) (Max. = ' + str(np.max(np.abs(scalars))) + ')\n'
plotter(self.p, self.fileName, 'smooth_view', self.dispNodeCoords, scalars, stitle)
elif node_contour_type == 'Force Reaction, RFx':
scalars = self.forcereaction[:, 0]
stitle = 'Force Reaction, RFx (Max. = ' + str(np.max( | np.abs(scalars) | numpy.abs |
import os
import traceback
import tensorflow as tf
import numpy as np
import ray
from deephyper.nas.metrics import selectMetric
from deephyper.ensemble import BaseEnsemble
from deephyper.nas.run._util import set_memory_growth_for_visible_gpus
def mse(y_true, y_pred):
return tf.square(y_true - y_pred)
@ray.remote(num_cpus=1)
def model_predict(model_path, X, batch_size=32, verbose=0):
"""Perform an inference of the model located at ``model_path``.
:meta private:
Args:
model_path (str): Path to the ``h5`` file to load to perform the inferencec.
X (array): array of input data for which we perform the inference.
batch_size (int, optional): Batch size used to perform the inferencec. Defaults to 32.
verbose (int, optional): Verbose option. Defaults to 0.
Returns:
array: The prediction based on the provided input data.
"""
# GPU Configuration if available
set_memory_growth_for_visible_gpus(True)
tf.keras.backend.clear_session()
model_file = model_path.split("/")[-1]
try:
if verbose:
print(f"Loading model {model_file}", flush=True)
model = tf.keras.models.load_model(model_path, compile=False)
except:
if verbose:
print(f"Could not load model {model_file}", flush=True)
traceback.print_exc()
model = None
if model:
y = model.predict(X, batch_size=batch_size)
else:
y = None
return y
class BaggingEnsemble(BaseEnsemble):
"""Ensemble based on uniform averaging of the predictions of each members.
:meta private:
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``["topk"]``. Default to ``topk``.
mode (str, optional): Value in ``["regression", "classification"]``. Default to ``"regression"``.
"""
def __init__(
self,
model_dir,
loss=mse,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
batch_size=32,
selection="topk",
mode="regression",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
batch_size,
)
assert selection in ["topk"]
self.selection = selection
assert mode in ["regression", "classification"]
self.mode = mode
def __repr__(self) -> str:
out = super().__repr__()
out += f"Mode: {self.mode}\n"
out += f"Selection: {self.selection}\n"
return out
def fit(self, X, y):
"""Fit the current algorithm to the provided data.
Args:
X (array): The input data.
y (array): The output data.
Returns:
BaseEnsemble: The current fitted instance.
"""
X_id = ray.put(X)
model_files = self._list_files_in_model_dir()
model_path = lambda f: os.path.join(self.model_dir, f)
y_pred = ray.get(
[
model_predict.options(
num_cpus=self.num_cpus, num_gpus=self.num_gpus
).remote(model_path(f), X_id, self.batch_size, self.verbose)
for f in model_files
]
)
y_pred = np.array([arr for arr in y_pred if arr is not None])
members_indexes = topk(self.loss, y_true=y, y_pred=y_pred, k=self.size)
self.members_files = [model_files[i] for i in members_indexes]
return self
def predict(self, X) -> np.ndarray:
"""Execute an inference of the ensemble for the provided data.
Args:
X (array): An array of input data.
Returns:
array: The prediction.
"""
# make predictions
X_id = ray.put(X)
model_path = lambda f: os.path.join(self.model_dir, f)
y_pred = ray.get(
[
model_predict.options(
num_cpus=self.num_cpus, num_gpus=self.num_gpus
).remote(model_path(f), X_id, self.batch_size, self.verbose)
for f in self.members_files
]
)
y_pred = | np.array([arr for arr in y_pred if arr is not None]) | numpy.array |
"""Auditory Filterbanks and scales for Speech and Audio Analysis.
The Gammatone filterbank is a direct translation of <NAME>' Gammatone-like
spectrograms package [1], which is partly and a direct translation of Malcolm
Slaney's Auditory toolbox [2].
References:
[1]: https://labrosa.ee.columbia.edu/matlab/gammatonegram/
[2]: https://engineering.purdue.edu/~malcolm/interval/1998-010/
"""
import numpy as np
from scipy import signal
from .util import fftfreqz, freqz
def dft2mel(nfft, sr=8000., nfilts=0, width=1., minfrq=0., maxfrq=4000.,
sphinx=False, constamp=True):
"""Map linear discrete frequencies to Mel scale."""
if nfilts == 0:
nfilts = np.int(np.ceil(hz2mel(np.array([maxfrq]), sphinx)[0]/2))
weights = np.zeros((nfilts, nfft))
# dft index -> linear frequency in hz
dftfrqs = np.arange(nfft/2+1, dtype=np.float)/nfft * sr
maxmel, minmel = hz2mel(np.array([maxfrq, minfrq]), sphinx)
binfrqs = mel2hz(minmel+np.linspace(0., 1., nfilts+2)
* (maxmel-minmel), sphinx)
for i in range(nfilts):
fs = binfrqs[i:i+3].copy()
fs = fs[1] + width*(fs-fs[1]) # adjust bandwidth if needed
loslope = (dftfrqs - fs[0])/(fs[1] - fs[0])
hislope = (fs[2] - dftfrqs)/(fs[2] - fs[1])
weights[i, 0:nfft/2+1] = np.maximum(0, np.minimum(loslope, hislope))
if constamp:
# Slaney-style mel is scaled to be approx constant E per channel
weights = np.diag(
2/(binfrqs[2:nfilts+2]-binfrqs[:nfilts])).dot(weights)
weights[:, nfft/2+1:] = 0 # avoid aliasing
return weights, binfrqs[1:]
def hz2dft(freq, sr, nfft):
"""Map frequency in Hz to discrete Fourier transform bins.
Parameters
----------
freq: array_like
Frequency in hz
sr: int
Sampling rate in hz
nfft: int
Number of DFT bins in range [0, 2*pi)
Returns
-------
bins: array_like
Frequency bin numbers
"""
return (freq/sr * nfft).astype('int')
def hz2mel(f, sphinx=True):
"""Convert linear frequency to mel frequency scale."""
if sphinx:
return 2595. * np.log10(1+f/700.)
# match Slaney's toolbox
f0, f_sp, brkfrq = 0., 200./3, 1000.
brkpt = (brkfrq - f0) / f_sp
logstep = np.exp(np.log(6.4)/27.)
z = np.empty_like(f)
lower = f < brkfrq # np.less(f,brkfrq)
higher = np.logical_not(lower)
z[lower] = (f[lower] - f0) / f_sp
z[higher] = brkpt + np.log(f[higher]/brkfrq) / np.log(logstep)
return z
def mel2hz(z, sphinx=True):
"""Convert Mel frequency to linear frequency scale."""
if sphinx:
return 700*(10**(z/2595.)-1)
f0, f_sp, brkfrq = 0., 200./3, 1000.
brkpt = (brkfrq - f0) / f_sp
logstep = np.exp(np.log(6.4)/27.)
f = np.empty_like(z)
lower = z < brkpt # np.less(z,brkpt)
higher = np.logical_not(lower)
f[lower] = f0 + z[lower] * f_sp
f[higher] = brkfrq * np.exp(np.log(logstep)*(z[higher]-brkpt))
return f
# ERB-related Functions starting below
# Global Parameters
# Change the following three parameters if you wish to use a different
# ERB scale. Must change in MakeERBCoeffs too.
ERB_EAR_Q = 9.26449 # Glasberg and Moore Parameters
ERB_MIN_BW = 24.7
ERB_ORDER = 1
# Process an input waveform with a gammatone filter bank. This function
# takes a single sound vector, and returns an array of filter outputs, one
# channel per row.
#
# The fcoefs parameter, which completely specifies the Gammatone filterbank,
# should be designed with the MakeERBFilters function. If it is omitted,
# the filter coefficients are computed for you assuming a 22050Hz sampling
# rate and 64 filters regularly spaced on an ERB scale from fs/2 down to 100Hz.
#
# <NAME> @ Interval, June 11, 1998.
# (c) 1998 Interval Research Corporation
# Thanks to <NAME>' for his suggestions and improvements.
def erb_fbank(sig, A0, A11, A12, A13, A14, A2, B0, B1, B2, gain, cascade=True):
"""Filter a signal using ERB filterbanks."""
if cascade: # original implementation. Might be numerically more stable.
y1 = signal.lfilter([A0/gain, A11/gain, A2/gain], [B0, B1, B2], sig)
y2 = signal.lfilter([A0, A12, A2], [B0, B1, B2], y1)
y3 = signal.lfilter([A0, A13, A2], [B0, B1, B2], y2)
y = signal.lfilter([A0, A14, A2], [B0, B1, B2], y3)
return y
else: # merge the difference EQ above into one
b = np.convolve(np.convolve([A0, A11, A2], [A0, A12, A2]),
np.convolve([A0, A13, A2], [A0, A14, A2])) / gain
a = np.convolve(np.convolve([B0, B1, B2], [B0, B1, B2]),
np.convolve([B0, B1, B2], [B0, B1, B2]))
return signal.lfilter(b, a, sig)
def erb_fftfreqz(A0, A11, A12, A13, A14, A2, B0, B1, B2, gain, nfft):
"""Compute frequency reponse given one ERB filter parameters."""
ww, h1 = fftfreqz([A0/gain, A11/gain, A2/gain], [B0, B1, B2], nfft)
_, h2 = fftfreqz([A0, A12, A2], [B0, B1, B2], nfft)
_, h3 = fftfreqz([A0, A13, A2], [B0, B1, B2], nfft)
_, h4 = fftfreqz([A0, A14, A2], [B0, B1, B2], nfft)
return ww, h1*h2*h3*h4
def erb_freqz(A0, A11, A12, A13, A14, A2, B0, B1, B2, gain, omegas):
h1 = freqz([A0/gain, A11/gain, A2/gain], [B0, B1, B2], omegas)
h2 = freqz([A0, A12, A2], [B0, B1, B2], omegas)
h3 = freqz([A0, A13, A2], [B0, B1, B2], omegas)
h4 = freqz([A0, A14, A2], [B0, B1, B2], omegas)
return h1 * h2 * h3 * h4
# Directly copy from Ellis' package. Below is his description:
# This function computes the filter coefficients for a bank of
# Gammatone filters. These filters were defined by Patterson and
# Holdworth for simulating the cochlea.
#
# The result is returned as an array of filter coefficients. Each row
# of the filter arrays contains the coefficients for four second order
# filters. The transfer function for these four filters share the same
# denominator (poles) but have different numerators (zeros). All of these
# coefficients are assembled into one vector that the **ERBFilterBank**
# can take apart to implement the filter.
#
# The filter bank contains **num_chan** channels that extend from
# half the sampling rate (fs) to **low_freq**. Alternatively, if the num_chan
# input argument is a vector, then the values of this vector are taken to
# be the center frequency of each desired filter. (The low_freq argument is
# ignored in this case.)
#
# Note this implementation fixes a problem in the original code by
# computing four separate second order filters. This avoids a big
# problem with round off errors in cases of very small cfs (100Hz) and
# large sample rates (44kHz). The problem is caused by roundoff error
# when a number of poles are combined, all very close to the unit
# circle. Small errors in the eigth order coefficient, are multiplied
# when the eigth root is taken to give the pole location. These small
# errors lead to poles outside the unit circle and instability. Thanks
# to <NAME> for leading me to the proper explanation.
def erb_filters(sr, cf):
"""Construct ERB filterbanks."""
T = 1./sr
ERB = ((cf/ERB_EAR_Q)**ERB_ORDER + ERB_MIN_BW**ERB_ORDER)**(1/ERB_ORDER)
B = 1.019*2*np.pi*ERB
A0 = T
A2 = 0.
B0 = 1.
B1 = -2*np.cos(2*cf*np.pi*T) / np.exp(B*T)
B2 = np.exp(-2*B*T)
A11 = -(2*T*np.cos(2*cf*np.pi*T)/np.exp(B*T)
+ 2*np.sqrt(3+2**1.5)*T*np.sin(2*cf*np.pi*T) / np.exp(B*T))/2
A12 = -(2*T*np.cos(2*cf*np.pi*T)/np.exp(B*T)
- 2*np.sqrt(3+2**1.5)*T*np.sin(2*cf*np.pi*T) / np.exp(B*T))/2
A13 = -(2*T*np.cos(2*cf*np.pi*T)/np.exp(B*T)
+ 2*np.sqrt(3-2**1.5)*T*np.sin(2*cf*np.pi*T) / np.exp(B*T))/2
A14 = -(2*T*np.cos(2*cf*np.pi*T)/np.exp(B*T)
- 2*np.sqrt(3-2**1.5)*T*np.sin(2*cf*np.pi*T) / np.exp(B*T))/2
gain = np.abs(
(-2*np.exp(4*1j*cf*np.pi*T)*T
+ 2*np.exp(-(B*T) + 2*1j*cf*np.pi*T)*T
* (np.cos(2*cf*np.pi*T) - np.sqrt(3 - 2**(3./2))
* np.sin(2*cf*np.pi*T)))
* (-2*np.exp(4*1j*cf*np.pi*T)*T
+ 2*np.exp(-(B*T) + 2*1j*cf*np.pi*T)*T
* (np.cos(2*cf*np.pi*T) + np.sqrt(3 - 2**(3./2))
* np.sin(2*cf*np.pi*T)))
* (-2*np.exp(4*1j*cf*np.pi*T)*T
+ 2*np.exp(-(B*T) + 2*1j*cf*np.pi*T)*T
* (np.cos(2*cf*np.pi*T)
- np.sqrt(3 + 2**(3./2))*np.sin(2*cf*np.pi*T)))
* (-2*np.exp(4*1j*cf*np.pi*T)*T+2*np.exp(-(B*T)+2*1j*cf*np.pi*T)*T
* (np.cos(2*cf*np.pi*T)
+ | np.sqrt(3+2**(3./2)) | numpy.sqrt |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Module for data simulation, to test algorithms utilized in diagnostics.
<NAME> <<EMAIL>>
2017-03-27 11:22:25 AM EDT
"""
from phantasy.library.physics import Point
import numpy as np
class Distribution(object):
"""Particle distribution for transverse plane, i.e. ``x-o-y`` plane,
default is Gaussian distribution.
Parameters
----------
x0 : float
Mean value along ``x`` direction.
y0 : float
Mean value along ``y`` direction.
sx : float
Standard deviation along ``x`` direction.
sy : float
Standard deviation along ``y`` direction.
N : int
Total point number of particle distribution.
Keyword Arguments
-----------------
mean : list
Central point, ``[x0, y0]``, overrides *x0* and *y0*.
cov : list
Covariance matrix, overrides *sx* and *sy*.
rho : float
Correlation between ``x`` and ``y``, should be within ``[-1, 1]``.
distfile : string
Name of data file to load distribution, contains x and y data,
if *distfile* is valid, the internal data generation would be
ignored.
distdata : array
Array with shape of ``(2,n)`` to initialize distribution.
"""
def __init__(self, x0=0, y0=0, sx=0.1, sy=0.1, N=1000, **kws):
self.distype = None
distfile = kws.get('distfile', None)
distdata = kws.get('distdata', None)
# try to load data from array
if distdata is not None:
self.particles = distdata
else:
# generate internally
if not self.load_distfile(distfile):
self._x, self._y = None, None
if kws.get('mean', None) is not None:
mean = kws.get('mean')
else:
mean = [x0, y0]
if kws.get('cov', None) is not None:
cov = kws.get('cov')
else:
rho = kws.get('rho', None)
if -1.0 <= rho <= 1.0:
cxy = rho * sx * sy
else:
cxy = 0
cov = [[sx ** 2, cxy], [cxy, sy ** 2]]
self.distype = 'gaussian'
self.particles = Distribution.generate_gaussian_distrubution(
mean, cov, N)
else:
# load from external file
print("Load distribution from '{}'".format(distfile))
def load_distfile(self, distfile):
try:
data = np.loadtxt(distfile)
if data.shape[0] == 2:
self._x, self._y = data
else:
self._x, self._y = data.T
self.distype = 'external'
return True
except:
return False
@property
def particles(self):
"""tuple: Array of x, y distribution."""
return self._x, self._y
@particles.setter
def particles(self, p):
self._x, self._y = p
@staticmethod
def generate_gaussian_distrubution(mean, cov, N):
"""Generate random two-dimensional distribution.
"""
x, y = | np.random.multivariate_normal(mean, cov, N) | numpy.random.multivariate_normal |
# Copyright (C) by <NAME>. See LICENSE.txt for licensing information.
import unittest
from PMC import *
class TestOperatorOverloads(unittest.TestCase):
def test_pmc_numbers_equal(self):
self.assertEqual(Py2PMC(42), Py2PMC(42))
self.assertEqual(Py2PMC(4.2), Py2PMC(4.2))
self.assertNotEqual(Py2PMC(4.2), Py2PMC(2.4))
def test_pmc_arrays_equal(self):
try: import numpy
except ImportError: return
self.assertEqual(Py2PMC(numpy.array([], numpy.int16)), Py2PMC(numpy.array([], numpy.int16)))
self.assertEqual(Py2PMC( | numpy.array([1, 2, 3, 4], numpy.int16) | numpy.array |
import matplotlib.pyplot as plt
import os
import numpy as np
SMALL_SIZE = 24
MEDIUM_SIZE = 24
BIGGER_SIZE = 24
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
base_dir = './results_usrnet_8iter'
result_list = sorted(os.listdir(base_dir))
default_psnr = list()
default_ssim = list()
k_gan_psnr = list()
k_gan_ssim = list()
noise_est_psnr = list()
noise_est_ssim = list()
noise_kernel_psnr = list()
noise_kernel_ssim = list()
for ind,folder in enumerate(result_list):
if not os.path.exists(os.path.join(base_dir,folder,'psnr_log.txt')):
continue
with open(os.path.join(base_dir,folder,'psnr_log.txt'), 'r') as f:
data = f.readlines()
default_psnr.append(float(data[0].split()[4]))
default_ssim.append(float(data[0].split()[-1]))
k_gan_psnr.append(float(data[1].split()[4]))
k_gan_ssim.append(float(data[1].split()[-1]))
noise_est_psnr.append(float(data[2].split()[4]))
noise_est_ssim.append(float(data[2].split()[-1]))
noise_kernel_psnr.append(float(data[3].split()[4]))
noise_kernel_ssim.append(float(data[3].split()[-1]))
print(f'Default PSNR: {np.mean(default_psnr):.3f}, STD: {np.std(default_psnr):.3f}')
print(f'Default SSIM: {np.mean(default_ssim):.3f}, STD: {np.std(default_ssim):.3f}')
print(f'noise PSNR: {np.mean(noise_est_psnr):.3f}, STD: {np.std(noise_est_psnr):.3f}')
print(f'noise SSIM: { | np.mean(noise_est_ssim) | numpy.mean |
import glob
import os
from typing import List, Tuple
import cv2
import h5py
import numpy as np
import scipy.io as sio
from tqdm import tqdm
from mmhuman3d.core.conventions.keypoints_mapping import convert_kps
from mmhuman3d.data.data_structures.human_data import HumanData
from .base_converter import BaseModeConverter
from .builder import DATA_CONVERTERS
@DATA_CONVERTERS.register_module()
class MpiInf3dhpConverter(BaseModeConverter):
"""MPI-INF-3DHP dataset `Monocular 3D Human Pose Estimation In The Wild
Using Improved CNN Supervision' 3DC`2017 More details can be found in the
`paper.
<https://arxiv.org/pdf/1611.09813.pdf>`__.
Args:
modes (list): 'test' or 'train' for accepted modes
extract_img (bool): Store True to extract images into a separate
folder. Default: False.
"""
ACCEPTED_MODES = ['test', 'train']
def __init__(self, modes: List = [], extract_img: bool = False) -> None:
super(MpiInf3dhpConverter, self).__init__(modes)
self.extract_img = extract_img
def extract_keypoints(
self, keypoints2d: np.ndarray, keypoints3d: np.ndarray,
num_keypoints: int
) -> Tuple[bool, np.ndarray, np.ndarray, List[float]]:
"""Check keypoints validiy and add confidence and bbox."""
bbox_xyxy = [
min(keypoints2d[:, 0]),
min(keypoints2d[:, 1]),
max(keypoints2d[:, 0]),
max(keypoints2d[:, 1])
]
bbox_xyxy = self._bbox_expand(bbox_xyxy, scale_factor=1.2)
bbox_xywh = self._xyxy2xywh(bbox_xyxy)
# check that all joints are visible
h, w = 2048, 2048
x_in = np.logical_and(keypoints2d[:, 0] < w, keypoints2d[:, 0] >= 0)
y_in = np.logical_and(keypoints2d[:, 1] < h, keypoints2d[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < num_keypoints:
valid = False
# add confidence column
keypoints2d = np.hstack([keypoints2d, np.ones((num_keypoints, 1))])
keypoints3d = np.hstack([keypoints3d, np.ones((num_keypoints, 1))])
valid = True
return valid, keypoints2d, keypoints3d, bbox_xywh
def convert_by_mode(self, dataset_path: str, out_path: str,
mode: str) -> dict:
"""
Args:
dataset_path (str): Path to directory where raw images and
annotations are stored.
out_path (str): Path to directory to save preprocessed npz file
mode (str): Mode in accepted modes
Returns:
dict:
A dict containing keys image_path, bbox_xywh, keypoints2d,
keypoints2d_mask, keypoints3d, keypoints3d_mask stored in
HumanData() format
"""
# use HumanData to store all data
human_data = HumanData()
image_path_, bbox_xywh_, keypoints2d_, keypoints3d_ = [], [], [], []
# training data
if mode == 'train':
user_list = range(1, 9)
seq_list = range(1, 3)
vid_list = list(range(3)) + list(range(4, 9))
counter = 0
for user_i in tqdm(user_list, desc='user list'):
for seq_i in seq_list:
seq_path = os.path.join(dataset_path, 'S' + str(user_i),
'Seq' + str(seq_i))
# mat file with annotations
annot_file = os.path.join(seq_path, 'annot.mat')
annot2 = sio.loadmat(annot_file)['annot2']
annot3 = sio.loadmat(annot_file)['annot3']
for j, vid_i in tqdm(enumerate(vid_list), desc='vid list'):
# image folder
imgs_path = os.path.join(seq_path,
'video_' + str(vid_i))
# extract frames from video file
if self.extract_img:
# if doesn't exist
if not os.path.isdir(imgs_path):
os.makedirs(imgs_path)
# video file
vid_file = os.path.join(
seq_path, 'imageSequence',
'video_' + str(vid_i) + '.avi')
vidcap = cv2.VideoCapture(vid_file)
# process video
frame = 0
while 1:
# extract all frames
success, image = vidcap.read()
if not success:
break
frame += 1
# image name
imgname = os.path.join(
imgs_path, 'frame_%06d.jpg' % frame)
# save image
cv2.imwrite(imgname, image)
# per frame
pattern = os.path.join(imgs_path, '*.jpg')
img_list = glob.glob(pattern)
for i, img_i in enumerate(sorted(img_list)):
# for each image we store the relevant annotations
img_name = img_i.split('/')[-1]
image_path = os.path.join('S' + str(user_i),
'Seq' + str(seq_i),
'video_' + str(vid_i),
img_name)
# 2D keypoints
keypoints2d = np.reshape(annot2[vid_i][0][i],
(28, 2))
# 3D keypoints
keypoints3d = np.reshape(annot3[vid_i][0][i],
(28, 3)) / 1000
keypoints3d = keypoints3d - keypoints3d[
4] # 4 is the root
valid, keypoints2d, keypoints3d, bbox_xywh = \
self.extract_keypoints(
keypoints2d, keypoints3d, 28)
if not valid:
continue
# because of the dataset size,
# we only keep every 10th frame
counter += 1
if counter % 10 != 1:
continue
# store the data
image_path_.append(image_path)
bbox_xywh_.append(bbox_xywh)
keypoints2d_.append(keypoints2d)
keypoints3d_.append(keypoints3d)
bbox_xywh_ = np.array(bbox_xywh_).reshape((-1, 4))
bbox_xywh_ = np.hstack(
[bbox_xywh_, np.ones([bbox_xywh_.shape[0], 1])])
keypoints2d_ = np.array(keypoints2d_).reshape((-1, 28, 3))
keypoints2d_, mask = convert_kps(keypoints2d_, 'mpi_inf_3dhp',
'human_data')
keypoints3d_ = np.array(keypoints3d_).reshape((-1, 28, 4))
keypoints3d_, _ = convert_kps(keypoints3d_, 'mpi_inf_3dhp',
'human_data')
elif mode == 'test':
# test data
user_list = range(1, 7)
for user_i in tqdm(user_list, desc='user'):
seq_path = os.path.join(dataset_path, 'mpi_inf_3dhp_test_set',
'TS' + str(user_i))
# mat file with annotations
annot_file = os.path.join(seq_path, 'annot_data.mat')
mat_as_h5 = h5py.File(annot_file, 'r')
annot2 = np.array(mat_as_h5['annot2'])
annot3 = np.array(mat_as_h5['univ_annot3'])
valid = np.array(mat_as_h5['valid_frame'])
for frame_i, valid_i in tqdm(enumerate(valid), desc='frame'):
if valid_i == 0:
continue
image_path = os.path.join(
'mpi_inf_3dhp_test_set', 'TS' + str(user_i),
'imageSequence',
'img_' + str(frame_i + 1).zfill(6) + '.jpg')
keypoints2d = annot2[frame_i, 0, :, :]
keypoints3d = annot3[frame_i, 0, :, :] / 1000
keypoints3d = keypoints3d - keypoints3d[14] # 14 is pelvis
valid, keypoints2d, keypoints3d, bbox_xywh = \
self.extract_keypoints(keypoints2d, keypoints3d, 17)
if not valid:
continue
# store the data
image_path_.append(image_path)
bbox_xywh_.append(bbox_xywh)
keypoints2d_.append(keypoints2d)
keypoints3d_.append(keypoints3d)
bbox_xywh_ = | np.array(bbox_xywh_) | numpy.array |
import numpy as np
from .Gaussianformula.baseFunc import *
from .Gaussianformula.ordering import *
import matplotlib.pyplot as plt
class Gaussian():
def __init__(self, N):
self.N = N
self.V = (np.eye(2 * N)) * 0.5
self.mu = np.zeros(2 * N)
def mean(self, idx):
res = np.copy(self.mu[2 * idx:2 * idx + 2])
return res
def cov(self, idx):
res = np.copy(self.V[(2 * idx):(2 * idx + 2), (2 * idx):(2 * idx + 2)])
return res
def S(self, idx, r):
self.Xsqueeze(idx, r)
def Xsqueeze(self, idx, r):
idx = 2 * idx
S = np.eye(2 * self.N)
S[idx:idx+2, idx:idx+2] = np.array([[np.exp(-r), 0], [0, np.exp(r)]])
self.V = np.dot(S, np.dot(self.V, S.T))
self.mu = np.dot(S, self.mu)
def Psqueeze(self, idx, r):
idx = 2 * idx
S = np.eye(2 * self.N)
S[idx:idx+2, idx:idx+2] = np.array([[np.exp(r), 0], [0, np.exp(-r)]])
self.V = np.dot(S, np.dot(self.V, S.T))
self.mu = np.dot(S, self.mu)
def R(self, idx, theta):
idx = 2 * idx
S = np.eye(2 * self.N)
S[idx:idx+2, idx:idx+2] = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
self.V = np.dot(S, np.dot(self.V, S.T))
self.mu = np.dot(S, self.mu)
# 10.1103/RevModPhys.77.513
def BS(self, idx1, idx2, theta):
idx1 = 2 * idx1
idx2 = 2 * idx2
S = np.eye(2 * self.N)
S[idx1:idx1+2, idx1:idx1+2] = np.array([[np.cos(theta), 0], [0, np.cos(theta)]])
S[idx1:idx1+2, idx2:idx2+2] = np.array([[np.sin(theta), 0], [0, np.sin(theta)]])
S[idx2:idx2+2, idx1:idx1+2] = np.array([[-np.sin(theta), 0], [0, -np.sin(theta)]])
S[idx2:idx2+2, idx2:idx2+2] = np.array([[np.cos(theta), 0], [0, np.cos(theta)]])
self.V = np.dot(S, np.dot(self.V, S.T))
self.mu = np.dot(S, self.mu)
def twoModeSqueezing(self, idx1, idx2, r):
idx1 = 2 * idx1
idx2 = 2 * idx2
S = np.eye(2 * self.N)
S[idx1:idx1+2, idx1:idx1+2] = np.array([[np.cosh(r), 0], [0, np.cosh(r)]])
S[idx1:idx1+2, idx2:idx2+2] = np.array([[np.sinh(r), 0], [0, -np.sinh(r)]])
S[idx2:idx2+2, idx1:idx1+2] = np.array([[np.sinh(r), 0], [0, -np.sinh(r)]])
S[idx2:idx2+2, idx2:idx2+2] = np.array([[np.cosh(r), 0], [0, np.cosh(r)]])
self.V = np.dot(S, np.dot(self.V, S.T))
self.mu = np.dot(S, self.mu)
def D(self, idx, alpha):
dx = | np.real(alpha) | numpy.real |
""" Layout helper functions.
Author: <NAME> @thomaslima
and <NAME> @lukasc-ubc
The following functions are useful for scripted layout, or making
PDK Pcells.
TODO: enhance documentation
TODO: make some of the functions in util use these.
"""
from itertools import repeat
import pya
import numpy as np
from numpy import cos, sin, pi, sqrt
from functools import reduce
from .sampling import sample_function
from .geometry import rotate90, rotate, bezier_optimal, curve_length
def insert_shape(cell, layer, shape):
if layer is not None:
cell.shapes(layer).insert(shape)
class DSimplePolygon(pya.DSimplePolygon):
""" DSimplePolygon with some added functionalities:
- transform_and_rotate
- clip
- layout
- layout_drc_exclude
- resize
- round_corners
"""
def transform_and_rotate(self, center, ex=None):
""" Translates the polygon by 'center' and rotates by the 'ex' orientation.
Example: if current polygon is a unit square with bottom-left corner at (0,0),
then square.transform_and_rotate(DPoint(0, 1), DVector(0, 1)) will
rotate the square by 90 degrees and translate it by 1 y-unit.
The new square's bottom-left corner will be at (-1, 1).
"""
if ex is None:
ex = pya.DPoint(1, 0)
ey = rotate90(ex)
polygon_dpoints_transformed = [center + p.x *
ex + p.y * ey for p in self.each_point()]
self.assign(pya.DSimplePolygon(polygon_dpoints_transformed))
return self
def clip(self, x_bounds=(-np.inf, np.inf), y_bounds=(-np.inf, np.inf)):
''' Clips the polygon at four possible boundaries.
The boundaries are tuples based on absolute coordinates and cartesian axes.
This method is very powerful when used with transform_and_rotate.
'''
# Add points exactly at the boundary, so that the filter below works.
x_bounds = (np.min(x_bounds), np.max(x_bounds))
y_bounds = (np.min(y_bounds), np.max(y_bounds))
check_within_bounds = lambda p: x_bounds[0] <= p.x and x_bounds[1] >= p.x and \
y_bounds[0] <= p.y and y_bounds[1] >= p.y
def intersect_left_boundary(p1, p2, x_bounds, y_bounds):
left_most, right_most = (p1, p2) if p1.x < p2.x else (p2, p1)
bottom_most, top_most = (p1, p2) if p1.y < p2.y else (p2, p1)
if left_most.x < x_bounds[0]:
# intersection only if right_most crosses x_bound[0]
if right_most.x > x_bounds[0]:
# outside the box, on the left
y_intersect = np.interp(x_bounds[0], [left_most.x, right_most.x], [
left_most.y, right_most.y])
if y_bounds[0] < y_intersect and y_bounds[1] > y_intersect:
return pya.DPoint(float(x_bounds[0]), float(y_intersect))
return None
def intersect(p1, p2, x_bounds, y_bounds):
intersect_list = list()
last_intersect = None
def rotate_bounds90(x_bounds, y_bounds, i_times):
for i in range(i_times):
x_bounds, y_bounds = (-y_bounds[1], -y_bounds[0]), (x_bounds[0], x_bounds[1])
return x_bounds, y_bounds
for i in range(4):
p1i, p2i = rotate(p1, i * pi / 2), rotate(p2, i * pi / 2)
x_boundsi, y_boundsi = rotate_bounds90(x_bounds, y_bounds, i)
p = intersect_left_boundary(p1i, p2i, x_boundsi, y_boundsi)
if p is not None:
last_intersect = i
intersect_list.append(rotate(p, -i * pi / 2))
return intersect_list, last_intersect
polygon_dpoints_clipped = list()
polygon_dpoints = list(self.each_point())
def boundary_vertex(edge_from, edge_to):
# left edge:0, top edge:1 etc.
# returns the vertex between two edges
assert abs(edge_from - edge_to) == 1
if edge_from % 2 == 0:
vertical_edge = edge_from
horizontal_edge = edge_to
else:
vertical_edge = edge_to
horizontal_edge = edge_from
x = x_bounds[(vertical_edge // 2) % 2]
y = y_bounds[((horizontal_edge - 1) // 2) % 2]
return pya.DPoint(x, y)
# Rotate point list so we can start from a point inside
# (helps the boundary_vertex algorithm)
for idx, point in enumerate(polygon_dpoints):
if check_within_bounds(point):
break
else:
# polygon was never within bounds
# this can only happen if boundaries are finite
# return boundary vertices
boundary_vertices = [boundary_vertex(i, i - 1) for i in range(4, 0, -1)]
self.assign(pya.DSimplePolygon(boundary_vertices))
return self
idx += 1 # make previous_point below already be inside
polygon_dpoints = polygon_dpoints[idx:] + polygon_dpoints[:idx]
previous_point = polygon_dpoints[-1]
previous_intersect = None
for point in polygon_dpoints:
# compute new intersecting point and add to list
intersected_points, last_intersect = intersect(
previous_point, point, x_bounds, y_bounds)
if previous_intersect is not None and last_intersect is not None and \
last_intersect != previous_intersect:
if check_within_bounds(point):
# this means that we are entering the box at a different edge
# need to add the edge points
# this assumes a certain polygon orientation
# assume points go counterlockwise, which means that
# from edge 0 to 2, it goes through 3
i = previous_intersect
while i % 4 != last_intersect:
polygon_dpoints_clipped.append(boundary_vertex(i, i - 1))
i = i - 1
polygon_dpoints_clipped.extend(intersected_points)
if check_within_bounds(point):
polygon_dpoints_clipped.append(point)
previous_point = point
if last_intersect is not None:
previous_intersect = last_intersect
self.assign(pya.DSimplePolygon(polygon_dpoints_clipped))
return self
def layout(self, cell, layer):
""" Places polygon as a shape into a cell at a particular layer."""
return insert_shape(cell, layer, self)
def layout_drc_exclude(self, cell, drclayer, ex):
""" Places a drc exclude square at every corner.
A corner is defined by an outer angle greater than 85 degrees (conservative)
"""
if drclayer is not None:
points = list(self.each_point())
assert len(points) > 3
prev_delta = points[-1] - points[-2]
prev_angle = np.arctan2(prev_delta.y, prev_delta.x)
for i in range(len(points)):
delta = points[i] - points[i - 1]
angle = np.arctan2(delta.y, delta.x)
if delta.y == 0 or delta.x == 0:
thresh_angle = pi / 2
else:
thresh_angle = pi * 85 / 180
delta_angle = angle - prev_angle
delta_angle = abs(((delta_angle + pi) % (2 * pi)) - pi)
if delta_angle > thresh_angle:
layout_square(cell, drclayer, points[i - 1], 0.1, ex)
prev_delta, prev_angle = delta, angle
def resize(self, dx, dbu):
""" Resizes the polygon by a positive or negative quantity dx.
Args:
dbu: typically 0.001
"""
dpoly = pya.DPolygon(self)
dpoly.size(dx, 5)
dpoly = pya.EdgeProcessor().simple_merge_p2p([dpoly.to_itype(dbu)], False, False, 1)
dpoly = dpoly[0].to_dtype(dbu) # pya.DPolygon
def norm(p):
return sqrt(p.x**2 + p.y**2)
# Filter edges if they are too small
points = list(dpoly.each_point_hull())
new_points = list([points[0]])
for i in range(0, len(points)):
delta = points[i] - new_points[-1]
if norm(delta) > min(10 * dbu, abs(dx)):
new_points.append(points[i])
sdpoly = DSimplePolygon(new_points) # convert to SimplePolygon
self.assign(sdpoly)
return self
def round_corners(self, radius, N):
""" This only works if the polygon edges are longer than the radius."""
dpoly = super().round_corners(radius, radius, N)
self.assign(dpoly)
return self
def moved(self, dx_or_dpoint, dy=None):
if isinstance(dx_or_dpoint, (pya.DPoint, pya.DVector)):
dx_or_dpoint = dx_or_dpoint.x
dy = dx_or_dpoint.y
pya_dpoly = super().moved(dx_or_dpoint, dy)
siepic_dpoly = self.__class__()
siepic_dpoly.__dict__.update(pya_dpoly)
return siepic_dpoly
def waveguide_dpolygon(points_list, width, dbu, smooth=True):
""" Returns a polygon outlining a waveguide.
This was updated over many iterations of failure. It can be used for both
smooth optical waveguides or DC metal traces with corners. It is better
than klayout's Path because it can have varying width.
Args:
points_list: list of pya.DPoint (at least 2 points)
width (microns): constant or list. If list, then it has to have the same length as points
dbu: dbu: typically 0.001, only used for accuracy calculations.
smooth: tries to smooth final polygons to avoid very sharp edges (greater than 130 deg)
Returns:
polygon DPoints
"""
if len(points_list) < 2:
raise NotImplementedError("ERROR: points_list too short")
return
def norm(self):
return sqrt(self.x**2 + self.y**2)
try:
if len(width) == len(points_list):
width_iterator = iter(width)
elif len(width) == 2:
# assume width[0] is initial width and
# width[1] is final width
# interpolate with points_list
L = curve_length(points_list)
distance = 0
widths_list = [width[0]]
widths_func = lambda t: (1 - t) * width[0] + t * width[1]
old_point = points_list[0]
for point in points_list[1:]:
distance += norm(point - old_point)
old_point = point
widths_list.append(widths_func(distance / L))
width_iterator = iter(widths_list)
else:
width_iterator = repeat(width[0])
except TypeError:
width_iterator = repeat(width)
finally:
points_iterator = iter(points_list)
points_low = list()
points_high = list()
def cos_angle(point1, point2):
cos_angle = point1 * point2 / norm(point1) / norm(point2)
# ensure it's between -1 and 1 (nontrivial numerically)
if abs(cos_angle) > 1:
return cos_angle / abs(cos_angle)
else:
return cos_angle
def sin_angle(point1, point2):
return sin(np.arccos(cos_angle(point1, point2)))
point_width_list = list(zip(points_iterator, width_iterator))
N = len(point_width_list)
first_point, first_width = point_width_list[0]
next_point, next_width = point_width_list[1]
delta = next_point - first_point
theta = np.arctan2(delta.y, delta.x)
first_high_point = first_point + 0.5 * first_width * \
pya.DPoint(cos(theta + pi / 2), sin(theta + pi / 2))
first_low_point = first_point + 0.5 * first_width * \
pya.DPoint(cos(theta - pi / 2), sin(theta - pi / 2))
points_high.append(first_high_point)
points_low.append(first_low_point)
for i in range(1, N - 1):
prev_point, prev_width = point_width_list[i - 1]
point, width = point_width_list[i]
next_point, next_width = point_width_list[i + 1]
delta_prev = point - prev_point
delta_next = next_point - point
theta_prev = np.arctan2(delta_prev.y, delta_prev.x)
theta_next = np.arctan2(delta_next.y, delta_next.x)
next_point_high = (next_point + 0.5 * next_width *
pya.DPoint(cos(theta_next + pi / 2), sin(theta_next + pi / 2)))
next_point_low = (next_point + 0.5 * next_width *
pya.DPoint(cos(theta_next - pi / 2), sin(theta_next - pi / 2)))
forward_point_high = (point + 0.5 * width *
pya.DPoint(cos(theta_next + pi / 2), sin(theta_next + pi / 2)))
forward_point_low = (point + 0.5 * width *
pya.DPoint(cos(theta_next - pi / 2), sin(theta_next - pi / 2)))
prev_point_high = (prev_point + 0.5 * prev_width *
pya.DPoint(cos(theta_prev + pi / 2), sin(theta_prev + pi / 2)))
prev_point_low = (prev_point + 0.5 * prev_width *
pya.DPoint(cos(theta_prev - pi / 2), sin(theta_prev - pi / 2)))
backward_point_high = (point + 0.5 * width *
pya.DPoint(cos(theta_prev + pi / 2), sin(theta_prev + pi / 2)))
backward_point_low = (point + 0.5 * width *
pya.DPoint(cos(theta_prev - pi / 2), sin(theta_prev - pi / 2)))
fix_angle = lambda theta: ((theta + pi) % (2 * pi)) - pi
# High point decision
next_high_edge = pya.DEdge(forward_point_high, next_point_high)
prev_high_edge = pya.DEdge(backward_point_high, prev_point_high)
if next_high_edge.crossed_by(prev_high_edge):
intersect_point = next_high_edge.crossing_point(prev_high_edge)
points_high.append(intersect_point)
else:
cos_dd = cos_angle(delta_next, delta_prev)
if width * (1 - cos_dd) > dbu and fix_angle(theta_next - theta_prev) < 0:
points_high.append(backward_point_high)
points_high.append(forward_point_high)
else:
points_high.append((backward_point_high + forward_point_high) * 0.5)
# Low point decision
next_low_edge = pya.DEdge(forward_point_low, next_point_low)
prev_low_edge = pya.DEdge(backward_point_low, prev_point_low)
if next_low_edge.crossed_by(prev_low_edge):
intersect_point = next_low_edge.crossing_point(prev_low_edge)
points_low.append(intersect_point)
else:
cos_dd = cos_angle(delta_next, delta_prev)
if width * (1 - cos_dd) > dbu and fix_angle(theta_next - theta_prev) > 0:
points_low.append(backward_point_low)
points_low.append(forward_point_low)
else:
points_low.append((backward_point_low + forward_point_low) * 0.5)
last_point, last_width = point_width_list[-1]
point, width = point_width_list[-2]
delta = last_point - point
theta = np.arctan2(delta.y, delta.x)
final_high_point = last_point + 0.5 * last_width * \
pya.DPoint(cos(theta + pi / 2), sin(theta + pi / 2))
final_low_point = last_point + 0.5 * last_width * \
pya.DPoint(cos(theta - pi / 2), sin(theta - pi / 2))
if (final_high_point - points_high[-1]) * delta > 0:
points_high.append(final_high_point)
if (final_low_point - points_low[-1]) * delta > 0:
points_low.append(final_low_point)
# Append point only if change in direction is less than 130 degrees.
def smooth_append(point_list, point):
if len(point_list) < 1:
point_list.append(point)
return point_list
elif len(point_list) < 2:
curr_edge = point - point_list[-1]
if norm(curr_edge) >= dbu:
point_list.append(point)
return point_list
curr_edge = point - point_list[-1]
if norm(curr_edge) >= dbu:
prev_edge = point_list[-1] - point_list[-2]
if norm(prev_edge) * abs(sin_angle(curr_edge + prev_edge, prev_edge)) > dbu:
if smooth:
# avoid corners when smoothing
if cos_angle(curr_edge, prev_edge) > cos(130 / 180 * pi):
point_list.append(point)
else:
# edge case when there is prev_edge is small and
# needs to be deleted to get rid of the corner
if norm(curr_edge) > norm(prev_edge):
point_list[-1] = point
else:
point_list.append(point)
# avoid unnecessary points
else:
point_list[-1] = point
return point_list
if debug and False:
print("Points to be smoothed:")
for point, width in point_width_list:
print(point, width)
smooth_points_high = list(reduce(smooth_append, points_high, list()))
smooth_points_low = list(reduce(smooth_append, points_low, list()))
# smooth_points_low = points_low
# polygon_dpoints = points_high + list(reversed(points_low))
# polygon_dpoints = list(reduce(smooth_append, polygon_dpoints, list()))
polygon_dpoints = smooth_points_high + list(reversed(smooth_points_low))
return DSimplePolygon(polygon_dpoints)
def layout_waveguide(cell, layer, points_list, width):
""" Lays out a waveguide (or trace) with a certain width with along given points.
This is very useful for laying out Bezier curves with or without adiabatic tapers.
Args:
cell: cell to place into
layer: layer to place into. It is done with cell.shapes(layer).insert(pya.Polygon)
points_list: list of pya.DPoint (at least 2 points)
width (microns): constant or list. If list, then it has to have the same length as points
"""
if len(points_list) < 2:
raise NotImplemented("ERROR: points_list too short")
return
try:
if len(width) == len(points_list):
width_iterator = iter(width)
else:
width_iterator = repeat(width[0])
except TypeError:
width_iterator = repeat(width)
finally:
points_iterator = iter(points_list)
dbu = cell.layout().dbu
points_low = list()
points_high = list()
def norm(self):
return sqrt(self.x**2 + self.y**2)
def cos_angle(point1, point2):
return point1 * point2 / norm(point1) / norm(point2)
point_width_list = list(zip(points_iterator, width_iterator))
N = len(point_width_list)
first_point, first_width = point_width_list[0]
next_point, next_width = point_width_list[1]
delta = next_point - first_point
theta = np.arctan2(delta.y, delta.x)
first_high_point = first_point + 0.5 * first_width * \
pya.DPoint(cos(theta + pi / 2), sin(theta + pi / 2))
first_low_point = first_point + 0.5 * first_width * \
pya.DPoint(cos(theta - pi / 2), sin(theta - pi / 2))
points_high.append(first_high_point)
points_low.append(first_low_point)
for i in range(1, N - 1):
prev_point, prev_width = point_width_list[i - 1]
point, width = point_width_list[i]
next_point, next_width = point_width_list[i + 1]
delta_prev = point - prev_point
delta_next = next_point - point
theta_prev = np.arctan2(delta_prev.y, delta_prev.x)
theta_next = np.arctan2(delta_next.y, delta_next.x)
next_point_high = (next_point + 0.5 * next_width *
pya.DPoint(cos(theta_next + pi / 2), sin(theta_next + pi / 2)))
next_point_low = (next_point + 0.5 * next_width *
pya.DPoint(cos(theta_next - pi / 2), sin(theta_next - pi / 2)))
forward_point_high = (point + 0.5 * width *
pya.DPoint(cos(theta_next + pi / 2), sin(theta_next + pi / 2)))
forward_point_low = (point + 0.5 * width *
pya.DPoint(cos(theta_next - pi / 2), sin(theta_next - pi / 2)))
prev_point_high = (prev_point + 0.5 * prev_width *
pya.DPoint(cos(theta_prev + pi / 2), sin(theta_prev + pi / 2)))
prev_point_low = (prev_point + 0.5 * prev_width *
pya.DPoint(cos(theta_prev - pi / 2), sin(theta_prev - pi / 2)))
backward_point_high = (point + 0.5 * width *
pya.DPoint(cos(theta_prev + pi / 2), sin(theta_prev + pi / 2)))
backward_point_low = (point + 0.5 * width *
pya.DPoint(cos(theta_prev - pi / 2), sin(theta_prev - pi / 2)))
# High point decision
next_high_edge = pya.DEdge(forward_point_high, next_point_high)
prev_high_edge = pya.DEdge(backward_point_high, prev_point_high)
if next_high_edge.crossed_by(prev_high_edge):
intersect_point = next_high_edge.crossing_point(prev_high_edge)
points_high.append(intersect_point)
else:
if width * (1 - cos_angle(delta_next, delta_prev)) > dbu:
points_high.append(backward_point_high)
points_high.append(forward_point_high)
else:
points_high.append((backward_point_high + forward_point_high) * 0.5)
# Low point decision
next_low_edge = pya.DEdge(forward_point_low, next_point_low)
prev_low_edge = pya.DEdge(backward_point_low, prev_point_low)
if next_low_edge.crossed_by(prev_low_edge):
intersect_point = next_low_edge.crossing_point(prev_low_edge)
points_low.append(intersect_point)
else:
if width * (1 - cos_angle(delta_next, delta_prev)) > dbu:
points_low.append(backward_point_low)
points_low.append(forward_point_low)
else:
points_low.append((backward_point_low + forward_point_low) * 0.5)
last_point, last_width = point_width_list[-1]
point, width = point_width_list[-2]
delta = last_point - point
theta = np.arctan2(delta.y, delta.x)
final_high_point = last_point + 0.5 * last_width * \
pya.DPoint(cos(theta + pi / 2), sin(theta + pi / 2))
final_low_point = last_point + 0.5 * last_width * \
pya.DPoint(cos(theta - pi / 2), sin(theta - pi / 2))
if (final_high_point - points_high[-1]) * delta > 0:
points_high.append(final_high_point)
if (final_low_point - points_low[-1]) * delta > 0:
points_low.append(final_low_point)
# Append point only if change in direction is less than 120 degrees.
def smooth_append(point_list, point):
if point_list is None:
print(point)
if len(point_list) < 1:
point_list.append(point)
return point_list
elif len(point_list) < 2:
curr_edge = point - point_list[-1]
if norm(curr_edge) > dbu:
point_list.append(point)
return point_list
curr_edge = point - point_list[-1]
if norm(curr_edge) > dbu:
prev_edge = point_list[-1] - point_list[-2]
if cos_angle(curr_edge, prev_edge) > cos(120 / 180 * pi):
point_list.append(point)
return point_list
polygon_points = points_low + list(reversed(points_high))
polygon_points = list(reduce(smooth_append, polygon_points, list()))
poly = pya.DPolygon(polygon_points)
cell.shapes(layer).insert(poly)
def layout_waveguide2(TECHNOLOGY, layout, cell, layers, widths, offsets, pts, radius, adiab, bezier):
'''
Create a waveguide, in a specific technology
inputs
- TECHNOLOGY, layout, cell:
from SiEPIC.utils import get_layout_variables
TECHNOLOGY, lv, layout, cell = get_layout_variables()
- layers: list of text names, e.g., ['Waveguide']
- widths: list of floats in units Microns, e.g., [0.50]
- offsets: list of floats in units Microns, e.g., [0]
- pts: a list of pya.Points, e.g.
L=15/dbu
pts = [Point(0,0), Point(L,0), Point(L,L)]
- radius: in Microns, e.g., 5
- adiab: 1 = Bezier curve, 0 = radial bend (arc)
- bezier: the bezier parameter, between 0 and 0.45 (almost a radial bend)
Note: bezier parameters need to be simulated and optimized, and will depend on
wavelength, polarization, width, etc. TM and rib waveguides don't benefit from bezier curves
most useful for TE
'''
from SiEPIC.utils import arc_xy, arc_bezier, angle_vector, angle_b_vectors, inner_angle_b_vectors, translate_from_normal
from SiEPIC.extend import to_itype
from pya import Path, Polygon, Trans
dbu = layout.dbu
width=widths[0]
turn=0
waveguide_length = 0
for lr in range(0, len(layers)):
wg_pts = [pts[0]]
layer = layout.layer(TECHNOLOGY[layers[lr]])
width = to_itype(widths[lr],dbu)
offset = to_itype(offsets[lr],dbu)
for i in range(1,len(pts)-1):
turn = ((angle_b_vectors(pts[i]-pts[i-1],pts[i+1]-pts[i])+90)%360-90)/90
dis1 = pts[i].distance(pts[i-1])
dis2 = pts[i].distance(pts[i+1])
angle = angle_vector(pts[i]-pts[i-1])/90
pt_radius = to_itype(radius,dbu)
# determine the radius, based on how much space is available
if len(pts)==3:
pt_radius = min (dis1, dis2, pt_radius)
else:
if i==1:
if dis1 <= pt_radius:
pt_radius = dis1
elif dis1 < 2*pt_radius:
pt_radius = dis1/2
if i==len(pts)-2:
if dis2 <= pt_radius:
pt_radius = dis2
elif dis2 < 2*pt_radius:
pt_radius = dis2/2
# waveguide bends:
if abs(turn)==1:
if(adiab):
wg_pts += Path(arc_bezier(pt_radius, 270, 270 + inner_angle_b_vectors(pts[i-1]-pts[i], pts[i+1]-pts[i]), bezier, DevRec='DevRec' in layers[lr]), 0).transformed(Trans(angle, turn < 0, pts[i])).get_points()
else:
wg_pts += Path(arc_xy(-pt_radius, pt_radius, pt_radius, 270, 270 + inner_angle_b_vectors(pts[i-1]-pts[i], pts[i+1]-pts[i]),DevRec='DevRec' in layers[lr]), 0).transformed(Trans(angle, turn < 0, pts[i])).get_points()
wg_pts += [pts[-1]]
wg_pts = pya.Path(wg_pts, 0).unique_points().get_points()
wg_polygon = Polygon(translate_from_normal(wg_pts, width/2 + (offset if turn > 0 else - offset))+translate_from_normal(wg_pts, -width/2 + (offset if turn > 0 else - offset))[::-1])
cell.shapes(layer).insert(wg_polygon)
if layout.layer(TECHNOLOGY['Waveguide']) == layer:
waveguide_length = wg_polygon.area() / width * dbu
return waveguide_length
def layout_rectangle(cell, layer, center, width, height, ex):
""" Lays out a rectangle
Args:
center: pya.DPoint (um units)
width: float (um units)
height: float (um unit)
ex: orientation
"""
rectangle = rectangle_dpolygon(center, width, height, ex=ex)
insert_shape(cell, layer, rectangle)
return rectangle
def layout_disk(cell, layer, center, r):
"""
function to produce the layout of a disk
cell: layout cell to place the layout
layer: which layer to use
center: origin DPoint
r: radius
units in microns
"""
# outer arc
# optimal sampling
radius = r
assert radius > 0
arc_function = lambda t: np.array([radius * np.cos(t), radius * np.sin(t)])
t, coords = sample_function(arc_function,
[0, 2 * pi], tol=0.002 / radius)
# create original waveguide poligon prior to clipping and rotation
points_hull = [center + pya.DPoint(x, y) for x, y in zip(*coords)]
del points_hull[-1]
dpoly = pya.DPolygon(points_hull)
insert_shape(cell, layer, dpoly)
return dpoly
def layout_ring(cell, layer, center, r, w):
"""
function to produce the layout of a ring
cell: layout cell to place the layout
layer: which layer to use
center: origin DPoint
r: radius
w: waveguide width
units in microns
"""
# outer arc
# optimal sampling
assert r - w / 2 > 0
radius = r + w / 2
arc_function = lambda t: np.array([radius * np.cos(t), radius * np.sin(t)])
t, coords = sample_function(arc_function,
[0, 2 * pi], tol=0.002 / radius)
# create original waveguide poligon prior to clipping and rotation
points_hull = [center + pya.DPoint(x, y) for x, y in zip(*coords)]
del points_hull[-1]
radius = r - w / 2
arc_function = lambda t: np.array([radius * np.cos(t), radius * np.sin(t)])
t, coords = sample_function(arc_function,
[0, 2 * pi], tol=0.002 / radius)
# create original waveguide poligon prior to clipping and rotation
points_hole = [center + pya.DPoint(x, y) for x, y in zip(*coords)]
del points_hole[-1]
dpoly = pya.DPolygon(list(reversed(points_hull)))
dpoly.insert_hole(points_hole)
dpoly.compress(True)
insert_shape(cell, layer, dpoly)
return dpoly
def layout_arc(cell, layer, center, r, w, theta_start, theta_end, ex=None,
x_bounds=(-np.inf, np.inf), y_bounds=(-np.inf, np.inf)):
""" function to produce the layout of an arc
cell: layout cell to place the layout
layer: which layer to use
center: origin DPoint (not affected by ex)
r: radius
w: waveguide width
theta_start, theta_end: angle in radians
x_bounds and y_bounds relative to the center, before rotation by ex.
units in microns
returns a dpolygon
"""
# fetch the database parameters
if r <= 0:
raise RuntimeError(f"Please give me a positive radius. Bad r={r}")
# optimal sampling
if theta_end < theta_start:
theta_start, theta_end = theta_end, theta_start
arc_function = lambda t: np.array([r * np.cos(t), r * np.sin(t)])
t, coords = sample_function(arc_function,
[theta_start, theta_end], tol=0.002 / r)
# # This yields a better polygon
coords = np.insert(coords, 0, arc_function(theta_start - 0.001),
axis=1) # start the waveguide a little bit before
coords = np.append(coords, np.atleast_2d(arc_function(theta_end + 0.001)).T,
axis=1) # finish the waveguide a little bit after
# create original waveguide poligon prior to clipping and rotation
dpoints_list = [pya.DPoint(x, y) for x, y in zip(*coords)]
dpolygon = waveguide_dpolygon(dpoints_list, w, cell.layout().dbu)
# clip dpolygon to bounds
dpolygon.clip(x_bounds=x_bounds, y_bounds=y_bounds)
# Transform points (translation + rotation)
dpolygon.transform_and_rotate(center, ex)
dpolygon.compress(True)
dpolygon.layout(cell, layer)
return dpolygon
def layout_arc2(cell, layer, center, r1, r2, theta_start, theta_end, ex=None,
x_bounds=(-np.inf, np.inf), y_bounds=(-np.inf, np.inf)):
''' modified layout_arc with r1 and r2, instead of r (radius) and w (width). '''
r1, r2 = min(r1, r2), max(r1, r2)
r = (r1 + r2) / 2
w = (r2 - r1)
return layout_arc(cell, layer, center, r, w, theta_start, theta_end,
ex=ex, x_bounds=x_bounds, y_bounds=y_bounds)
def layout_section(cell, layer, center, r2, theta_start, theta_end, ex=None,
x_bounds=(-np.inf, np.inf), y_bounds=(-np.inf, np.inf)):
''' Layout section of a circle.
cell: layout cell to place the layout
layer: which layer to use
center: origin DPoint (not affected by ex)
r2: radius
theta_start, theta_end: angle in radians
x_bounds and y_bounds relative to the center, before rotation by ex.
units in microns
returns a dpolygon
'''
assert r2 > 0
# optimal sampling
arc_function = lambda t: np.array([r2 * np.cos(t), r2 * np.sin(t)])
t, coords = sample_function(arc_function,
[theta_start, theta_end], tol=0.002 / r2)
# # This yields a better polygon
if theta_end < theta_start:
theta_start, theta_end = theta_end, theta_start
coords = np.insert(coords, 0, arc_function(theta_start - 0.001),
axis=1) # start the waveguide a little bit before
coords = np.append(coords, np.atleast_2d(arc_function(theta_end + 0.001)).T,
axis=1) # finish the waveguide a little bit after
# create original waveguide poligon prior to clipping and rotation
dpoints_list = [pya.DPoint(x, y) for x, y in zip(*coords)]
dpolygon = DSimplePolygon(dpoints_list + [pya.DPoint(0, 0)])
# clip dpolygon to bounds
dpolygon.clip(x_bounds=x_bounds, y_bounds=y_bounds)
# Transform points (translation + rotation)
dpolygon.transform_and_rotate(center, ex)
dpolygon.compress(True)
dpolygon.layout(cell, layer)
return dpolygon
def layout_arc_drc_exclude(cell, drc_layer, center, r, w, theta_start, theta_end, ex=None):
''' Places squares of 100nm size in the drc_layer located in corners of arc.
Try to use DSimplePolygon.layout_drc_exclude instead.
'''
corner_points = [center + (r + w / 2) * rotate(ex, theta_start),
center + (r - w / 2) * rotate(ex, theta_start),
center + (r + w / 2) * rotate(ex, theta_end),
center + (r - w / 2) * rotate(ex, theta_end)]
for corner_point in corner_points:
layout_square(cell, drc_layer, corner_point, 0.1, ex)
def layout_arc_with_drc_exclude(cell, layer, drc_layer, center, r, w, theta_start, theta_end, ex=None, **kwargs):
''' Layout arc with drc exclude squares '''
dpoly = layout_arc(cell, layer, center, r, w, theta_start, theta_end, ex, **kwargs)
dpoly.layout_drc_exclude(cell, drc_layer, ex)
return dpoly
def layout_circle(cell, layer, center, r):
"""
function to produce the layout of a filled circle
cell: layout cell to place the layout
layer: which layer to use
center: origin DPoint
r: radius
w: waveguide width
theta_start, theta_end: angle in radians
units in microns
optimal sampling
"""
arc_function = lambda t: np.array([center.x + r * | np.cos(t) | numpy.cos |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the available built-in discrete-variable quantum operations.
"""
import pytest
import functools
import numpy as np
from numpy.linalg import multi_dot
from scipy.linalg import block_diag
import pennylane as qml
from pennylane.templates.layers import StronglyEntanglingLayers
from gate_data import I, X, Y, Z, H, CNOT, SWAP, CZ, S, T, CSWAP, Toffoli
# Standard observables, their matrix representation, and eigenvlaues
OBSERVABLES = [
(qml.PauliX, X, [1, -1]),
(qml.PauliY, Y, [1, -1]),
(qml.PauliZ, Z, [1, -1]),
(qml.Hadamard, H, [1, -1]),
(qml.Identity, I, [1, 1]),
]
# Hermitian matrices, their corresponding eigenvalues and eigenvectors.
EIGVALS_TEST_DATA = [
(np.array([[1, 0], [0, 1]]), np.array([1.0, 1.0]), np.array([[1.0, 0.0], [0.0, 1.0]])),
(
np.array([[0, 1], [1, 0]]),
np.array([-1.0, 1.0]),
np.array([[-0.70710678, 0.70710678], [0.70710678, 0.70710678]]),
),
(
np.array([[0, -1j], [1j, 0]]),
np.array([-1.0, 1.0]),
np.array(
[[-0.70710678 + 0.0j, -0.70710678 + 0.0j], [0.0 + 0.70710678j, 0.0 - 0.70710678j]]
),
),
(np.array([[1, 0], [0, -1]]), np.array([-1.0, 1.0]), np.array([[0.0, 1.0], [1.0, 0.0]])),
(
1 / np.sqrt(2) * np.array([[1, 1], [1, -1]]),
np.array([-1.0, 1.0]),
np.array([[0.38268343, -0.92387953], [-0.92387953, -0.38268343]]),
),
]
EIGVALS_TEST_DATA_MULTI_WIRES = [
functools.reduce(np.kron, [Y, I, Z])
]
@pytest.mark.usefixtures("tear_down_hermitian")
class TestObservables:
"""Tests for observables"""
@pytest.mark.parametrize("obs, mat, eigs", OBSERVABLES)
def test_diagonalization(self, obs, mat, eigs, tol):
"""Test the method transforms standard observables into the Z-gate."""
ob = obs(wires=0)
A = ob.matrix
diag_gates = ob.diagonalizing_gates()
U = np.eye(2)
if diag_gates:
mats = [i.matrix for i in diag_gates]
# Need to revert the order in which the matrices are applied such that they adhere to the order
# of matrix multiplication
# E.g. for PauliY: [PauliZ(wires=self.wires), S(wires=self.wires), Hadamard(wires=self.wires)]
# becomes Hadamard @ S @ PauliZ, where @ stands for matrix multiplication
mats = mats[::-1]
U = multi_dot([np.eye(2)] + mats)
res = U @ A @ U.conj().T
expected = np.diag(eigs)
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("obs, mat, eigs", OBSERVABLES)
def test_eigvals(self, obs, mat, eigs, tol):
"""Test eigenvalues of standard observables are correct"""
obs = obs(wires=0)
res = obs.eigvals
assert np.allclose(res, eigs, atol=tol, rtol=0)
@pytest.mark.parametrize("obs, mat, eigs", OBSERVABLES)
def test_matrices(self, obs, mat, eigs, tol):
"""Test matrices of standard observables are correct"""
obs = obs(wires=0)
res = obs.matrix
assert np.allclose(res, mat, atol=tol, rtol=0)
@pytest.mark.parametrize("observable, eigvals, eigvecs", EIGVALS_TEST_DATA)
def test_hermitian_eigegendecomposition_single_wire(
self, observable, eigvals, eigvecs, tol
):
"""Tests that the eigendecomposition property of the Hermitian class returns the correct results
for a single wire."""
eigendecomp = qml.Hermitian(observable, wires=0).eigendecomposition
assert np.allclose(eigendecomp["eigval"], eigvals, atol=tol, rtol=0)
assert np.allclose(eigendecomp["eigvec"], eigvecs, atol=tol, rtol=0)
key = tuple(observable.flatten().tolist())
assert np.allclose(qml.Hermitian._eigs[key]["eigval"], eigvals, atol=tol, rtol=0)
assert np.allclose(qml.Hermitian._eigs[key]["eigvec"], eigvecs, atol=tol, rtol=0)
assert len(qml.Hermitian._eigs) == 1
@pytest.mark.parametrize("observable", EIGVALS_TEST_DATA_MULTI_WIRES)
def test_hermitian_eigegendecomposition_multiple_wires(
self, observable, tol
):
"""Tests that the eigendecomposition property of the Hermitian class returns the correct results
for multiple wires."""
num_wires = int(np.log2(len(observable)))
eigendecomp = qml.Hermitian(observable, wires=list(range(num_wires))).eigendecomposition
eigvals, eigvecs = np.linalg.eigh(observable)
assert np.allclose(eigendecomp["eigval"], eigvals, atol=tol, rtol=0)
assert np.allclose(eigendecomp["eigvec"], eigvecs, atol=tol, rtol=0)
key = tuple(observable.flatten().tolist())
assert np.allclose(qml.Hermitian._eigs[key]["eigval"], eigvals, atol=tol, rtol=0)
assert np.allclose(qml.Hermitian._eigs[key]["eigvec"], eigvecs, atol=tol, rtol=0)
assert len(qml.Hermitian._eigs) == 1
@pytest.mark.parametrize("obs1", EIGVALS_TEST_DATA)
@pytest.mark.parametrize("obs2", EIGVALS_TEST_DATA)
def test_hermitian_eigvals_eigvecs_two_different_observables(self, obs1, obs2, tol):
"""Tests that the eigvals method of the Hermitian class returns the correct results
for two observables."""
if np.all(obs1[0] == obs2[0]):
pytest.skip("Test only runs for pairs of differing observable")
observable_1 = obs1[0]
observable_1_eigvals = obs1[1]
observable_1_eigvecs = obs1[2]
key = tuple(observable_1.flatten().tolist())
qml.Hermitian(observable_1, 0).eigvals
assert np.allclose(
qml.Hermitian._eigs[key]["eigval"], observable_1_eigvals, atol=tol, rtol=0
)
assert np.allclose(
qml.Hermitian._eigs[key]["eigvec"], observable_1_eigvecs, atol=tol, rtol=0
)
assert len(qml.Hermitian._eigs) == 1
observable_2 = obs2[0]
observable_2_eigvals = obs2[1]
observable_2_eigvecs = obs2[2]
key_2 = tuple(observable_2.flatten().tolist())
qml.Hermitian(observable_2, 0).eigvals
assert np.allclose(
qml.Hermitian._eigs[key_2]["eigval"], observable_2_eigvals, atol=tol, rtol=0
)
assert np.allclose(
qml.Hermitian._eigs[key_2]["eigvec"], observable_2_eigvecs, atol=tol, rtol=0
)
assert len(qml.Hermitian._eigs) == 2
@pytest.mark.parametrize("observable, eigvals, eigvecs", EIGVALS_TEST_DATA)
def test_hermitian_eigvals_eigvecs_same_observable_twice(
self, observable, eigvals, eigvecs, tol
):
"""Tests that the eigvals method of the Hermitian class keeps the same dictionary entries upon multiple calls."""
key = tuple(observable.flatten().tolist())
qml.Hermitian(observable, 0).eigvals
assert np.allclose(qml.Hermitian._eigs[key]["eigval"], eigvals, atol=tol, rtol=0)
assert np.allclose(qml.Hermitian._eigs[key]["eigvec"], eigvecs, atol=tol, rtol=0)
assert len(qml.Hermitian._eigs) == 1
qml.Hermitian(observable, 0).eigvals
assert np.allclose(qml.Hermitian._eigs[key]["eigval"], eigvals, atol=tol, rtol=0)
assert np.allclose(qml.Hermitian._eigs[key]["eigvec"], eigvecs, atol=tol, rtol=0)
assert len(qml.Hermitian._eigs) == 1
@pytest.mark.parametrize("observable, eigvals, eigvecs", EIGVALS_TEST_DATA)
def test_hermitian_diagonalizing_gates(self, observable, eigvals, eigvecs, tol):
"""Tests that the diagonalizing_gates method of the Hermitian class returns the correct results."""
qubit_unitary = qml.Hermitian(observable, wires=[0]).diagonalizing_gates()
key = tuple(observable.flatten().tolist())
assert np.allclose(qml.Hermitian._eigs[key]["eigval"], eigvals, atol=tol, rtol=0)
assert np.allclose(qml.Hermitian._eigs[key]["eigvec"], eigvecs, atol=tol, rtol=0)
assert np.allclose(qubit_unitary[0].params, eigvecs.conj().T, atol=tol, rtol=0)
assert len(qml.Hermitian._eigs) == 1
@pytest.mark.parametrize("obs1", EIGVALS_TEST_DATA)
@pytest.mark.parametrize("obs2", EIGVALS_TEST_DATA)
def test_hermitian_diagonalizing_gates_two_different_observables(self, obs1, obs2, tol):
"""Tests that the diagonalizing_gates method of the Hermitian class returns the correct results
for two observables."""
if np.all(obs1[0] == obs2[0]):
pytest.skip("Test only runs for pairs of differing observable")
observable_1 = obs1[0]
observable_1_eigvals = obs1[1]
observable_1_eigvecs = obs1[2]
qubit_unitary = qml.Hermitian(observable_1, wires=[0]).diagonalizing_gates()
key = tuple(observable_1.flatten().tolist())
assert np.allclose(
qml.Hermitian._eigs[key]["eigval"], observable_1_eigvals, atol=tol, rtol=0
)
assert np.allclose(
qml.Hermitian._eigs[key]["eigvec"], observable_1_eigvecs, atol=tol, rtol=0
)
assert np.allclose(qubit_unitary[0].params, observable_1_eigvecs.conj().T, atol=tol, rtol=0)
assert len(qml.Hermitian._eigs) == 1
observable_2 = obs2[0]
observable_2_eigvals = obs2[1]
observable_2_eigvecs = obs2[2]
qubit_unitary_2 = qml.Hermitian(observable_2, wires=[0]).diagonalizing_gates()
key = tuple(observable_2.flatten().tolist())
assert np.allclose(
qml.Hermitian._eigs[key]["eigval"], observable_2_eigvals, atol=tol, rtol=0
)
assert np.allclose(
qml.Hermitian._eigs[key]["eigvec"], observable_2_eigvecs, atol=tol, rtol=0
)
assert np.allclose(
qubit_unitary_2[0].params, observable_2_eigvecs.conj().T, atol=tol, rtol=0
)
assert len(qml.Hermitian._eigs) == 2
@pytest.mark.parametrize("observable, eigvals, eigvecs", EIGVALS_TEST_DATA)
def test_hermitian_diagonalizing_gatesi_same_observable_twice(
self, observable, eigvals, eigvecs, tol
):
"""Tests that the diagonalizing_gates method of the Hermitian class keeps the same dictionary entries upon multiple calls."""
qubit_unitary = qml.Hermitian(observable, wires=[0]).diagonalizing_gates()
key = tuple(observable.flatten().tolist())
assert np.allclose(qml.Hermitian._eigs[key]["eigval"], eigvals, atol=tol, rtol=0)
assert np.allclose(qml.Hermitian._eigs[key]["eigvec"], eigvecs, atol=tol, rtol=0)
assert np.allclose(qubit_unitary[0].params, eigvecs.conj().T, atol=tol, rtol=0)
assert len(qml.Hermitian._eigs) == 1
qubit_unitary = qml.Hermitian(observable, wires=[0]).diagonalizing_gates()
key = tuple(observable.flatten().tolist())
assert np.allclose(qml.Hermitian._eigs[key]["eigval"], eigvals, atol=tol, rtol=0)
assert np.allclose(qml.Hermitian._eigs[key]["eigvec"], eigvecs, atol=tol, rtol=0)
assert np.allclose(qubit_unitary[0].params, eigvecs.conj().T, atol=tol, rtol=0)
assert len(qml.Hermitian._eigs) == 1
@pytest.mark.parametrize("observable, eigvals, eigvecs", EIGVALS_TEST_DATA)
def test_hermitian_diagonalizing_gates_integration(self, observable, eigvals, eigvecs, tol):
"""Tests that the diagonalizing_gates method of the Hermitian class
diagonalizes the given observable."""
tensor_obs = np.kron(observable, observable)
eigvals = | np.kron(eigvals, eigvals) | numpy.kron |
# MIT License
#
# Copyright (c) 2019 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Equations for 9-spin Ising model.
# Written on 2019/03/12.
from numpy import zeros, exp, array, prod, isnan
from ..enumerate import fast_logsumexp
def calc_observables(params):
"""
Give all parameters concatenated into one array from lowest to highest order.
Returns all correlations.
"""
Cout = zeros((45))
H = params[0:9]
J = params[9:45]
energyTerms = array([ +0, +H[8]+0, +H[7]+0, +H[7]+H[8]+J[35], +H[6]+0, +H[6]+H[8]+J[34], +H[6]+H[7]+J[33], +H[6]+H[7]+H[8]+
J[33]+J[34]+J[35], +H[5]+0, +H[5]+H[8]+J[32], +H[5]+H[7]+J[31], +H[5]+H[7]+H[8]+J[31]+J[32]+J[35], +
H[5]+H[6]+J[30], +H[5]+H[6]+H[8]+J[30]+J[32]+J[34], +H[5]+H[6]+H[7]+J[30]+J[31]+J[33], +H[5]+H[6]+H[7]+
H[8]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[4]+0, +H[4]+H[8]+J[29], +H[4]+H[7]+J[28], +H[4]+H[7]+H[8]+
J[28]+J[29]+J[35], +H[4]+H[6]+J[27], +H[4]+H[6]+H[8]+J[27]+J[29]+J[34], +H[4]+H[6]+H[7]+J[27]+J[28]+
J[33], +H[4]+H[6]+H[7]+H[8]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[4]+H[5]+J[26], +H[4]+H[5]+H[8]+J[26]+
J[29]+J[32], +H[4]+H[5]+H[7]+J[26]+J[28]+J[31], +H[4]+H[5]+H[7]+H[8]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +
H[4]+H[5]+H[6]+J[26]+J[27]+J[30], +H[4]+H[5]+H[6]+H[8]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[4]+H[5]+
H[6]+H[7]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[4]+H[5]+H[6]+H[7]+H[8]+J[26]+J[27]+J[28]+J[29]+J[30]+
J[31]+J[32]+J[33]+J[34]+J[35], +H[3]+0, +H[3]+H[8]+J[25], +H[3]+H[7]+J[24], +H[3]+H[7]+H[8]+J[24]+J[25]+
J[35], +H[3]+H[6]+J[23], +H[3]+H[6]+H[8]+J[23]+J[25]+J[34], +H[3]+H[6]+H[7]+J[23]+J[24]+J[33], +H[3]+
H[6]+H[7]+H[8]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[3]+H[5]+J[22], +H[3]+H[5]+H[8]+J[22]+J[25]+J[32], +
H[3]+H[5]+H[7]+J[22]+J[24]+J[31], +H[3]+H[5]+H[7]+H[8]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[3]+H[5]+
H[6]+J[22]+J[23]+J[30], +H[3]+H[5]+H[6]+H[8]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34], +H[3]+H[5]+H[6]+H[7]+
J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +H[3]+H[5]+H[6]+H[7]+H[8]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+
J[33]+J[34]+J[35], +H[3]+H[4]+J[21], +H[3]+H[4]+H[8]+J[21]+J[25]+J[29], +H[3]+H[4]+H[7]+J[21]+J[24]+
J[28], +H[3]+H[4]+H[7]+H[8]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[3]+H[4]+H[6]+J[21]+J[23]+J[27], +
H[3]+H[4]+H[6]+H[8]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[3]+H[4]+H[6]+H[7]+J[21]+J[23]+J[24]+J[27]+
J[28]+J[33], +H[3]+H[4]+H[6]+H[7]+H[8]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +
H[3]+H[4]+H[5]+J[21]+J[22]+J[26], +H[3]+H[4]+H[5]+H[8]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32], +H[3]+H[4]+
H[5]+H[7]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[3]+H[4]+H[5]+H[7]+H[8]+J[21]+J[22]+J[24]+J[25]+J[26]+
J[28]+J[29]+J[31]+J[32]+J[35], +H[3]+H[4]+H[5]+H[6]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[3]+H[4]+
H[5]+H[6]+H[8]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[3]+H[4]+H[5]+H[6]+H[7]+
J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[21]+J[22]+
J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[2]+0, +H[2]+H[8]+J[20], +
H[2]+H[7]+J[19], +H[2]+H[7]+H[8]+J[19]+J[20]+J[35], +H[2]+H[6]+J[18], +H[2]+H[6]+H[8]+J[18]+J[20]+J[34], +
H[2]+H[6]+H[7]+J[18]+J[19]+J[33], +H[2]+H[6]+H[7]+H[8]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35], +H[2]+H[5]+
J[17], +H[2]+H[5]+H[8]+J[17]+J[20]+J[32], +H[2]+H[5]+H[7]+J[17]+J[19]+J[31], +H[2]+H[5]+H[7]+H[8]+J[17]+
J[19]+J[20]+J[31]+J[32]+J[35], +H[2]+H[5]+H[6]+J[17]+J[18]+J[30], +H[2]+H[5]+H[6]+H[8]+J[17]+J[18]+J[20]+
J[30]+J[32]+J[34], +H[2]+H[5]+H[6]+H[7]+J[17]+J[18]+J[19]+J[30]+J[31]+J[33], +H[2]+H[5]+H[6]+H[7]+H[8]+
J[17]+J[18]+J[19]+J[20]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[2]+H[4]+J[16], +H[2]+H[4]+H[8]+J[16]+
J[20]+J[29], +H[2]+H[4]+H[7]+J[16]+J[19]+J[28], +H[2]+H[4]+H[7]+H[8]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35], +
H[2]+H[4]+H[6]+J[16]+J[18]+J[27], +H[2]+H[4]+H[6]+H[8]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34], +H[2]+H[4]+
H[6]+H[7]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33], +H[2]+H[4]+H[6]+H[7]+H[8]+J[16]+J[18]+J[19]+J[20]+J[27]+
J[28]+J[29]+J[33]+J[34]+J[35], +H[2]+H[4]+H[5]+J[16]+J[17]+J[26], +H[2]+H[4]+H[5]+H[8]+J[16]+J[17]+J[20]+
J[26]+J[29]+J[32], +H[2]+H[4]+H[5]+H[7]+J[16]+J[17]+J[19]+J[26]+J[28]+J[31], +H[2]+H[4]+H[5]+H[7]+H[8]+
J[16]+J[17]+J[19]+J[20]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[2]+H[4]+H[5]+H[6]+J[16]+J[17]+J[18]+
J[26]+J[27]+J[30], +H[2]+H[4]+H[5]+H[6]+H[8]+J[16]+J[17]+J[18]+J[20]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +
H[2]+H[4]+H[5]+H[6]+H[7]+J[16]+J[17]+J[18]+J[19]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[2]+H[4]+H[5]+
H[6]+H[7]+H[8]+J[16]+J[17]+J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +
H[2]+H[3]+J[15], +H[2]+H[3]+H[8]+J[15]+J[20]+J[25], +H[2]+H[3]+H[7]+J[15]+J[19]+J[24], +H[2]+H[3]+H[7]+
H[8]+J[15]+J[19]+J[20]+J[24]+J[25]+J[35], +H[2]+H[3]+H[6]+J[15]+J[18]+J[23], +H[2]+H[3]+H[6]+H[8]+J[15]+
J[18]+J[20]+J[23]+J[25]+J[34], +H[2]+H[3]+H[6]+H[7]+J[15]+J[18]+J[19]+J[23]+J[24]+J[33], +H[2]+H[3]+
H[6]+H[7]+H[8]+J[15]+J[18]+J[19]+J[20]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[2]+H[3]+H[5]+J[15]+J[17]+
J[22], +H[2]+H[3]+H[5]+H[8]+J[15]+J[17]+J[20]+J[22]+J[25]+J[32], +H[2]+H[3]+H[5]+H[7]+J[15]+J[17]+J[19]+
J[22]+J[24]+J[31], +H[2]+H[3]+H[5]+H[7]+H[8]+J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +
H[2]+H[3]+H[5]+H[6]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30], +H[2]+H[3]+H[5]+H[6]+H[8]+J[15]+J[17]+J[18]+
J[20]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34], +H[2]+H[3]+H[5]+H[6]+H[7]+J[15]+J[17]+J[18]+J[19]+J[22]+J[23]+
J[24]+J[30]+J[31]+J[33], +H[2]+H[3]+H[5]+H[6]+H[7]+H[8]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+
J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[2]+H[3]+H[4]+J[15]+J[16]+J[21], +H[2]+H[3]+H[4]+H[8]+J[15]+
J[16]+J[20]+J[21]+J[25]+J[29], +H[2]+H[3]+H[4]+H[7]+J[15]+J[16]+J[19]+J[21]+J[24]+J[28], +H[2]+H[3]+
H[4]+H[7]+H[8]+J[15]+J[16]+J[19]+J[20]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[2]+H[3]+H[4]+H[6]+J[15]+
J[16]+J[18]+J[21]+J[23]+J[27], +H[2]+H[3]+H[4]+H[6]+H[8]+J[15]+J[16]+J[18]+J[20]+J[21]+J[23]+J[25]+J[27]+
J[29]+J[34], +H[2]+H[3]+H[4]+H[6]+H[7]+J[15]+J[16]+J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +
H[2]+H[3]+H[4]+H[6]+H[7]+H[8]+J[15]+J[16]+J[18]+J[19]+J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+
J[33]+J[34]+J[35], +H[2]+H[3]+H[4]+H[5]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26], +H[2]+H[3]+H[4]+H[5]+H[8]+
J[15]+J[16]+J[17]+J[20]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32], +H[2]+H[3]+H[4]+H[5]+H[7]+J[15]+J[16]+J[17]+
J[19]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[2]+H[3]+H[4]+H[5]+H[7]+H[8]+J[15]+J[16]+J[17]+J[19]+J[20]+
J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[2]+H[3]+H[4]+H[5]+H[6]+J[15]+J[16]+J[17]+
J[18]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[15]+J[16]+J[17]+J[18]+J[20]+
J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[15]+J[16]+
J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[2]+H[3]+H[4]+H[5]+
H[6]+H[7]+H[8]+J[15]+J[16]+J[17]+J[18]+J[19]+J[20]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+
J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+0, +H[1]+H[8]+J[14], +H[1]+H[7]+J[13], +H[1]+H[7]+H[8]+J[13]+
J[14]+J[35], +H[1]+H[6]+J[12], +H[1]+H[6]+H[8]+J[12]+J[14]+J[34], +H[1]+H[6]+H[7]+J[12]+J[13]+J[33], +
H[1]+H[6]+H[7]+H[8]+J[12]+J[13]+J[14]+J[33]+J[34]+J[35], +H[1]+H[5]+J[11], +H[1]+H[5]+H[8]+J[11]+J[14]+
J[32], +H[1]+H[5]+H[7]+J[11]+J[13]+J[31], +H[1]+H[5]+H[7]+H[8]+J[11]+J[13]+J[14]+J[31]+J[32]+J[35], +
H[1]+H[5]+H[6]+J[11]+J[12]+J[30], +H[1]+H[5]+H[6]+H[8]+J[11]+J[12]+J[14]+J[30]+J[32]+J[34], +H[1]+H[5]+
H[6]+H[7]+J[11]+J[12]+J[13]+J[30]+J[31]+J[33], +H[1]+H[5]+H[6]+H[7]+H[8]+J[11]+J[12]+J[13]+J[14]+J[30]+
J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+H[4]+J[10], +H[1]+H[4]+H[8]+J[10]+J[14]+J[29], +H[1]+H[4]+H[7]+
J[10]+J[13]+J[28], +H[1]+H[4]+H[7]+H[8]+J[10]+J[13]+J[14]+J[28]+J[29]+J[35], +H[1]+H[4]+H[6]+J[10]+J[12]+
J[27], +H[1]+H[4]+H[6]+H[8]+J[10]+J[12]+J[14]+J[27]+J[29]+J[34], +H[1]+H[4]+H[6]+H[7]+J[10]+J[12]+J[13]+
J[27]+J[28]+J[33], +H[1]+H[4]+H[6]+H[7]+H[8]+J[10]+J[12]+J[13]+J[14]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +
H[1]+H[4]+H[5]+J[10]+J[11]+J[26], +H[1]+H[4]+H[5]+H[8]+J[10]+J[11]+J[14]+J[26]+J[29]+J[32], +H[1]+H[4]+
H[5]+H[7]+J[10]+J[11]+J[13]+J[26]+J[28]+J[31], +H[1]+H[4]+H[5]+H[7]+H[8]+J[10]+J[11]+J[13]+J[14]+J[26]+
J[28]+J[29]+J[31]+J[32]+J[35], +H[1]+H[4]+H[5]+H[6]+J[10]+J[11]+J[12]+J[26]+J[27]+J[30], +H[1]+H[4]+
H[5]+H[6]+H[8]+J[10]+J[11]+J[12]+J[14]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[1]+H[4]+H[5]+H[6]+H[7]+
J[10]+J[11]+J[12]+J[13]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[1]+H[4]+H[5]+H[6]+H[7]+H[8]+J[10]+J[11]+
J[12]+J[13]+J[14]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+H[3]+J[9], +H[1]+
H[3]+H[8]+J[9]+J[14]+J[25], +H[1]+H[3]+H[7]+J[9]+J[13]+J[24], +H[1]+H[3]+H[7]+H[8]+J[9]+J[13]+J[14]+
J[24]+J[25]+J[35], +H[1]+H[3]+H[6]+J[9]+J[12]+J[23], +H[1]+H[3]+H[6]+H[8]+J[9]+J[12]+J[14]+J[23]+J[25]+
J[34], +H[1]+H[3]+H[6]+H[7]+J[9]+J[12]+J[13]+J[23]+J[24]+J[33], +H[1]+H[3]+H[6]+H[7]+H[8]+J[9]+J[12]+
J[13]+J[14]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[1]+H[3]+H[5]+J[9]+J[11]+J[22], +H[1]+H[3]+H[5]+H[8]+
J[9]+J[11]+J[14]+J[22]+J[25]+J[32], +H[1]+H[3]+H[5]+H[7]+J[9]+J[11]+J[13]+J[22]+J[24]+J[31], +H[1]+H[3]+
H[5]+H[7]+H[8]+J[9]+J[11]+J[13]+J[14]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[1]+H[3]+H[5]+H[6]+J[9]+
J[11]+J[12]+J[22]+J[23]+J[30], +H[1]+H[3]+H[5]+H[6]+H[8]+J[9]+J[11]+J[12]+J[14]+J[22]+J[23]+J[25]+J[30]+
J[32]+J[34], +H[1]+H[3]+H[5]+H[6]+H[7]+J[9]+J[11]+J[12]+J[13]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +
H[1]+H[3]+H[5]+H[6]+H[7]+H[8]+J[9]+J[11]+J[12]+J[13]+J[14]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+
J[33]+J[34]+J[35], +H[1]+H[3]+H[4]+J[9]+J[10]+J[21], +H[1]+H[3]+H[4]+H[8]+J[9]+J[10]+J[14]+J[21]+J[25]+
J[29], +H[1]+H[3]+H[4]+H[7]+J[9]+J[10]+J[13]+J[21]+J[24]+J[28], +H[1]+H[3]+H[4]+H[7]+H[8]+J[9]+J[10]+
J[13]+J[14]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[1]+H[3]+H[4]+H[6]+J[9]+J[10]+J[12]+J[21]+J[23]+J[27], +
H[1]+H[3]+H[4]+H[6]+H[8]+J[9]+J[10]+J[12]+J[14]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[1]+H[3]+H[4]+
H[6]+H[7]+J[9]+J[10]+J[12]+J[13]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[1]+H[3]+H[4]+H[6]+H[7]+H[8]+
J[9]+J[10]+J[12]+J[13]+J[14]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[1]+H[3]+
H[4]+H[5]+J[9]+J[10]+J[11]+J[21]+J[22]+J[26], +H[1]+H[3]+H[4]+H[5]+H[8]+J[9]+J[10]+J[11]+J[14]+J[21]+
J[22]+J[25]+J[26]+J[29]+J[32], +H[1]+H[3]+H[4]+H[5]+H[7]+J[9]+J[10]+J[11]+J[13]+J[21]+J[22]+J[24]+J[26]+
J[28]+J[31], +H[1]+H[3]+H[4]+H[5]+H[7]+H[8]+J[9]+J[10]+J[11]+J[13]+J[14]+J[21]+J[22]+J[24]+J[25]+J[26]+
J[28]+J[29]+J[31]+J[32]+J[35], +H[1]+H[3]+H[4]+H[5]+H[6]+J[9]+J[10]+J[11]+J[12]+J[21]+J[22]+J[23]+J[26]+
J[27]+J[30], +H[1]+H[3]+H[4]+H[5]+H[6]+H[8]+J[9]+J[10]+J[11]+J[12]+J[14]+J[21]+J[22]+J[23]+J[25]+J[26]+
J[27]+J[29]+J[30]+J[32]+J[34], +H[1]+H[3]+H[4]+H[5]+H[6]+H[7]+J[9]+J[10]+J[11]+J[12]+J[13]+J[21]+J[22]+
J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[1]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[9]+J[10]+J[11]+
J[12]+J[13]+J[14]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+
J[35], +H[1]+H[2]+J[8], +H[1]+H[2]+H[8]+J[8]+J[14]+J[20], +H[1]+H[2]+H[7]+J[8]+J[13]+J[19], +H[1]+H[2]+
H[7]+H[8]+J[8]+J[13]+J[14]+J[19]+J[20]+J[35], +H[1]+H[2]+H[6]+J[8]+J[12]+J[18], +H[1]+H[2]+H[6]+H[8]+
J[8]+J[12]+J[14]+J[18]+J[20]+J[34], +H[1]+H[2]+H[6]+H[7]+J[8]+J[12]+J[13]+J[18]+J[19]+J[33], +H[1]+H[2]+
H[6]+H[7]+H[8]+J[8]+J[12]+J[13]+J[14]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35], +H[1]+H[2]+H[5]+J[8]+J[11]+
J[17], +H[1]+H[2]+H[5]+H[8]+J[8]+J[11]+J[14]+J[17]+J[20]+J[32], +H[1]+H[2]+H[5]+H[7]+J[8]+J[11]+J[13]+
J[17]+J[19]+J[31], +H[1]+H[2]+H[5]+H[7]+H[8]+J[8]+J[11]+J[13]+J[14]+J[17]+J[19]+J[20]+J[31]+J[32]+J[35], +
H[1]+H[2]+H[5]+H[6]+J[8]+J[11]+J[12]+J[17]+J[18]+J[30], +H[1]+H[2]+H[5]+H[6]+H[8]+J[8]+J[11]+J[12]+J[14]+
J[17]+J[18]+J[20]+J[30]+J[32]+J[34], +H[1]+H[2]+H[5]+H[6]+H[7]+J[8]+J[11]+J[12]+J[13]+J[17]+J[18]+J[19]+
J[30]+J[31]+J[33], +H[1]+H[2]+H[5]+H[6]+H[7]+H[8]+J[8]+J[11]+J[12]+J[13]+J[14]+J[17]+J[18]+J[19]+J[20]+
J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+H[2]+H[4]+J[8]+J[10]+J[16], +H[1]+H[2]+H[4]+H[8]+J[8]+J[10]+
J[14]+J[16]+J[20]+J[29], +H[1]+H[2]+H[4]+H[7]+J[8]+J[10]+J[13]+J[16]+J[19]+J[28], +H[1]+H[2]+H[4]+H[7]+
H[8]+J[8]+J[10]+J[13]+J[14]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35], +H[1]+H[2]+H[4]+H[6]+J[8]+J[10]+J[12]+
J[16]+J[18]+J[27], +H[1]+H[2]+H[4]+H[6]+H[8]+J[8]+J[10]+J[12]+J[14]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34], +
H[1]+H[2]+H[4]+H[6]+H[7]+J[8]+J[10]+J[12]+J[13]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33], +H[1]+H[2]+H[4]+
H[6]+H[7]+H[8]+J[8]+J[10]+J[12]+J[13]+J[14]+J[16]+J[18]+J[19]+J[20]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +
H[1]+H[2]+H[4]+H[5]+J[8]+J[10]+J[11]+J[16]+J[17]+J[26], +H[1]+H[2]+H[4]+H[5]+H[8]+J[8]+J[10]+J[11]+J[14]+
J[16]+J[17]+J[20]+J[26]+J[29]+J[32], +H[1]+H[2]+H[4]+H[5]+H[7]+J[8]+J[10]+J[11]+J[13]+J[16]+J[17]+J[19]+
J[26]+J[28]+J[31], +H[1]+H[2]+H[4]+H[5]+H[7]+H[8]+J[8]+J[10]+J[11]+J[13]+J[14]+J[16]+J[17]+J[19]+J[20]+
J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[1]+H[2]+H[4]+H[5]+H[6]+J[8]+J[10]+J[11]+J[12]+J[16]+J[17]+J[18]+
J[26]+J[27]+J[30], +H[1]+H[2]+H[4]+H[5]+H[6]+H[8]+J[8]+J[10]+J[11]+J[12]+J[14]+J[16]+J[17]+J[18]+J[20]+
J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[1]+H[2]+H[4]+H[5]+H[6]+H[7]+J[8]+J[10]+J[11]+J[12]+J[13]+J[16]+
J[17]+J[18]+J[19]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[1]+H[2]+H[4]+H[5]+H[6]+H[7]+H[8]+J[8]+J[10]+
J[11]+J[12]+J[13]+J[14]+J[16]+J[17]+J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+
J[34]+J[35], +H[1]+H[2]+H[3]+J[8]+J[9]+J[15], +H[1]+H[2]+H[3]+H[8]+J[8]+J[9]+J[14]+J[15]+J[20]+J[25], +
H[1]+H[2]+H[3]+H[7]+J[8]+J[9]+J[13]+J[15]+J[19]+J[24], +H[1]+H[2]+H[3]+H[7]+H[8]+J[8]+J[9]+J[13]+J[14]+
J[15]+J[19]+J[20]+J[24]+J[25]+J[35], +H[1]+H[2]+H[3]+H[6]+J[8]+J[9]+J[12]+J[15]+J[18]+J[23], +H[1]+H[2]+
H[3]+H[6]+H[8]+J[8]+J[9]+J[12]+J[14]+J[15]+J[18]+J[20]+J[23]+J[25]+J[34], +H[1]+H[2]+H[3]+H[6]+H[7]+
J[8]+J[9]+J[12]+J[13]+J[15]+J[18]+J[19]+J[23]+J[24]+J[33], +H[1]+H[2]+H[3]+H[6]+H[7]+H[8]+J[8]+J[9]+
J[12]+J[13]+J[14]+J[15]+J[18]+J[19]+J[20]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[1]+H[2]+H[3]+H[5]+
J[8]+J[9]+J[11]+J[15]+J[17]+J[22], +H[1]+H[2]+H[3]+H[5]+H[8]+J[8]+J[9]+J[11]+J[14]+J[15]+J[17]+J[20]+
J[22]+J[25]+J[32], +H[1]+H[2]+H[3]+H[5]+H[7]+J[8]+J[9]+J[11]+J[13]+J[15]+J[17]+J[19]+J[22]+J[24]+J[31], +
H[1]+H[2]+H[3]+H[5]+H[7]+H[8]+J[8]+J[9]+J[11]+J[13]+J[14]+J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+
J[31]+J[32]+J[35], +H[1]+H[2]+H[3]+H[5]+H[6]+J[8]+J[9]+J[11]+J[12]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30], +
H[1]+H[2]+H[3]+H[5]+H[6]+H[8]+J[8]+J[9]+J[11]+J[12]+J[14]+J[15]+J[17]+J[18]+J[20]+J[22]+J[23]+J[25]+
J[30]+J[32]+J[34], +H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+J[8]+J[9]+J[11]+J[12]+J[13]+J[15]+J[17]+J[18]+J[19]+
J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+H[8]+J[8]+J[9]+J[11]+J[12]+J[13]+
J[14]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+
H[2]+H[3]+H[4]+J[8]+J[9]+J[10]+J[15]+J[16]+J[21], +H[1]+H[2]+H[3]+H[4]+H[8]+J[8]+J[9]+J[10]+J[14]+J[15]+
J[16]+J[20]+J[21]+J[25]+J[29], +H[1]+H[2]+H[3]+H[4]+H[7]+J[8]+J[9]+J[10]+J[13]+J[15]+J[16]+J[19]+J[21]+
J[24]+J[28], +H[1]+H[2]+H[3]+H[4]+H[7]+H[8]+J[8]+J[9]+J[10]+J[13]+J[14]+J[15]+J[16]+J[19]+J[20]+J[21]+
J[24]+J[25]+J[28]+J[29]+J[35], +H[1]+H[2]+H[3]+H[4]+H[6]+J[8]+J[9]+J[10]+J[12]+J[15]+J[16]+J[18]+J[21]+
J[23]+J[27], +H[1]+H[2]+H[3]+H[4]+H[6]+H[8]+J[8]+J[9]+J[10]+J[12]+J[14]+J[15]+J[16]+J[18]+J[20]+J[21]+
J[23]+J[25]+J[27]+J[29]+J[34], +H[1]+H[2]+H[3]+H[4]+H[6]+H[7]+J[8]+J[9]+J[10]+J[12]+J[13]+J[15]+J[16]+
J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[1]+H[2]+H[3]+H[4]+H[6]+H[7]+H[8]+J[8]+J[9]+J[10]+
J[12]+J[13]+J[14]+J[15]+J[16]+J[18]+J[19]+J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+
J[35], +H[1]+H[2]+H[3]+H[4]+H[5]+J[8]+J[9]+J[10]+J[11]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26], +H[1]+H[2]+
H[3]+H[4]+H[5]+H[8]+J[8]+J[9]+J[10]+J[11]+J[14]+J[15]+J[16]+J[17]+J[20]+J[21]+J[22]+J[25]+J[26]+J[29]+
J[32], +H[1]+H[2]+H[3]+H[4]+H[5]+H[7]+J[8]+J[9]+J[10]+J[11]+J[13]+J[15]+J[16]+J[17]+J[19]+J[21]+J[22]+
J[24]+J[26]+J[28]+J[31], +H[1]+H[2]+H[3]+H[4]+H[5]+H[7]+H[8]+J[8]+J[9]+J[10]+J[11]+J[13]+J[14]+J[15]+
J[16]+J[17]+J[19]+J[20]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[1]+H[2]+H[3]+
H[4]+H[5]+H[6]+J[8]+J[9]+J[10]+J[11]+J[12]+J[15]+J[16]+J[17]+J[18]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +
H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[8]+J[9]+J[10]+J[11]+J[12]+J[14]+J[15]+J[16]+J[17]+J[18]+J[20]+J[21]+
J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[8]+J[9]+
J[10]+J[11]+J[12]+J[13]+J[15]+J[16]+J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+
J[31]+J[33], +H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[8]+J[9]+J[10]+J[11]+J[12]+J[13]+J[14]+J[15]+
J[16]+J[17]+J[18]+J[19]+J[20]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+
J[33]+J[34]+J[35], +H[0]+0, +H[0]+H[8]+J[7], +H[0]+H[7]+J[6], +H[0]+H[7]+H[8]+J[6]+J[7]+J[35], +H[0]+
H[6]+J[5], +H[0]+H[6]+H[8]+J[5]+J[7]+J[34], +H[0]+H[6]+H[7]+J[5]+J[6]+J[33], +H[0]+H[6]+H[7]+H[8]+J[5]+
J[6]+J[7]+J[33]+J[34]+J[35], +H[0]+H[5]+J[4], +H[0]+H[5]+H[8]+J[4]+J[7]+J[32], +H[0]+H[5]+H[7]+J[4]+
J[6]+J[31], +H[0]+H[5]+H[7]+H[8]+J[4]+J[6]+J[7]+J[31]+J[32]+J[35], +H[0]+H[5]+H[6]+J[4]+J[5]+J[30], +
H[0]+H[5]+H[6]+H[8]+J[4]+J[5]+J[7]+J[30]+J[32]+J[34], +H[0]+H[5]+H[6]+H[7]+J[4]+J[5]+J[6]+J[30]+J[31]+
J[33], +H[0]+H[5]+H[6]+H[7]+H[8]+J[4]+J[5]+J[6]+J[7]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[4]+
J[3], +H[0]+H[4]+H[8]+J[3]+J[7]+J[29], +H[0]+H[4]+H[7]+J[3]+J[6]+J[28], +H[0]+H[4]+H[7]+H[8]+J[3]+J[6]+
J[7]+J[28]+J[29]+J[35], +H[0]+H[4]+H[6]+J[3]+J[5]+J[27], +H[0]+H[4]+H[6]+H[8]+J[3]+J[5]+J[7]+J[27]+J[29]+
J[34], +H[0]+H[4]+H[6]+H[7]+J[3]+J[5]+J[6]+J[27]+J[28]+J[33], +H[0]+H[4]+H[6]+H[7]+H[8]+J[3]+J[5]+J[6]+
J[7]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[4]+H[5]+J[3]+J[4]+J[26], +H[0]+H[4]+H[5]+H[8]+J[3]+
J[4]+J[7]+J[26]+J[29]+J[32], +H[0]+H[4]+H[5]+H[7]+J[3]+J[4]+J[6]+J[26]+J[28]+J[31], +H[0]+H[4]+H[5]+
H[7]+H[8]+J[3]+J[4]+J[6]+J[7]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[4]+H[5]+H[6]+J[3]+J[4]+J[5]+
J[26]+J[27]+J[30], +H[0]+H[4]+H[5]+H[6]+H[8]+J[3]+J[4]+J[5]+J[7]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +
H[0]+H[4]+H[5]+H[6]+H[7]+J[3]+J[4]+J[5]+J[6]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[4]+H[5]+H[6]+
H[7]+H[8]+J[3]+J[4]+J[5]+J[6]+J[7]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+
H[3]+J[2], +H[0]+H[3]+H[8]+J[2]+J[7]+J[25], +H[0]+H[3]+H[7]+J[2]+J[6]+J[24], +H[0]+H[3]+H[7]+H[8]+J[2]+
J[6]+J[7]+J[24]+J[25]+J[35], +H[0]+H[3]+H[6]+J[2]+J[5]+J[23], +H[0]+H[3]+H[6]+H[8]+J[2]+J[5]+J[7]+J[23]+
J[25]+J[34], +H[0]+H[3]+H[6]+H[7]+J[2]+J[5]+J[6]+J[23]+J[24]+J[33], +H[0]+H[3]+H[6]+H[7]+H[8]+J[2]+J[5]+
J[6]+J[7]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[0]+H[3]+H[5]+J[2]+J[4]+J[22], +H[0]+H[3]+H[5]+H[8]+
J[2]+J[4]+J[7]+J[22]+J[25]+J[32], +H[0]+H[3]+H[5]+H[7]+J[2]+J[4]+J[6]+J[22]+J[24]+J[31], +H[0]+H[3]+
H[5]+H[7]+H[8]+J[2]+J[4]+J[6]+J[7]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[0]+H[3]+H[5]+H[6]+J[2]+J[4]+
J[5]+J[22]+J[23]+J[30], +H[0]+H[3]+H[5]+H[6]+H[8]+J[2]+J[4]+J[5]+J[7]+J[22]+J[23]+J[25]+J[30]+J[32]+
J[34], +H[0]+H[3]+H[5]+H[6]+H[7]+J[2]+J[4]+J[5]+J[6]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +H[0]+H[3]+
H[5]+H[6]+H[7]+H[8]+J[2]+J[4]+J[5]+J[6]+J[7]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +
H[0]+H[3]+H[4]+J[2]+J[3]+J[21], +H[0]+H[3]+H[4]+H[8]+J[2]+J[3]+J[7]+J[21]+J[25]+J[29], +H[0]+H[3]+H[4]+
H[7]+J[2]+J[3]+J[6]+J[21]+J[24]+J[28], +H[0]+H[3]+H[4]+H[7]+H[8]+J[2]+J[3]+J[6]+J[7]+J[21]+J[24]+J[25]+
J[28]+J[29]+J[35], +H[0]+H[3]+H[4]+H[6]+J[2]+J[3]+J[5]+J[21]+J[23]+J[27], +H[0]+H[3]+H[4]+H[6]+H[8]+
J[2]+J[3]+J[5]+J[7]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[0]+H[3]+H[4]+H[6]+H[7]+J[2]+J[3]+J[5]+J[6]+
J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[0]+H[3]+H[4]+H[6]+H[7]+H[8]+J[2]+J[3]+J[5]+J[6]+J[7]+J[21]+J[23]+
J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[3]+H[4]+H[5]+J[2]+J[3]+J[4]+J[21]+J[22]+J[26], +
H[0]+H[3]+H[4]+H[5]+H[8]+J[2]+J[3]+J[4]+J[7]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32], +H[0]+H[3]+H[4]+H[5]+
H[7]+J[2]+J[3]+J[4]+J[6]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[0]+H[3]+H[4]+H[5]+H[7]+H[8]+J[2]+J[3]+
J[4]+J[6]+J[7]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[3]+H[4]+H[5]+H[6]+
J[2]+J[3]+J[4]+J[5]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[0]+H[3]+H[4]+H[5]+H[6]+H[8]+J[2]+J[3]+J[4]+
J[5]+J[7]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[3]+H[4]+H[5]+H[6]+H[7]+
J[2]+J[3]+J[4]+J[5]+J[6]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[3]+H[4]+
H[5]+H[6]+H[7]+H[8]+J[2]+J[3]+J[4]+J[5]+J[6]+J[7]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+
J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[2]+J[1], +H[0]+H[2]+H[8]+J[1]+J[7]+J[20], +H[0]+H[2]+H[7]+
J[1]+J[6]+J[19], +H[0]+H[2]+H[7]+H[8]+J[1]+J[6]+J[7]+J[19]+J[20]+J[35], +H[0]+H[2]+H[6]+J[1]+J[5]+J[18], +
H[0]+H[2]+H[6]+H[8]+J[1]+J[5]+J[7]+J[18]+J[20]+J[34], +H[0]+H[2]+H[6]+H[7]+J[1]+J[5]+J[6]+J[18]+J[19]+
J[33], +H[0]+H[2]+H[6]+H[7]+H[8]+J[1]+J[5]+J[6]+J[7]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35], +H[0]+H[2]+
H[5]+J[1]+J[4]+J[17], +H[0]+H[2]+H[5]+H[8]+J[1]+J[4]+J[7]+J[17]+J[20]+J[32], +H[0]+H[2]+H[5]+H[7]+J[1]+
J[4]+J[6]+J[17]+J[19]+J[31], +H[0]+H[2]+H[5]+H[7]+H[8]+J[1]+J[4]+J[6]+J[7]+J[17]+J[19]+J[20]+J[31]+J[32]+
J[35], +H[0]+H[2]+H[5]+H[6]+J[1]+J[4]+J[5]+J[17]+J[18]+J[30], +H[0]+H[2]+H[5]+H[6]+H[8]+J[1]+J[4]+J[5]+
J[7]+J[17]+J[18]+J[20]+J[30]+J[32]+J[34], +H[0]+H[2]+H[5]+H[6]+H[7]+J[1]+J[4]+J[5]+J[6]+J[17]+J[18]+
J[19]+J[30]+J[31]+J[33], +H[0]+H[2]+H[5]+H[6]+H[7]+H[8]+J[1]+J[4]+J[5]+J[6]+J[7]+J[17]+J[18]+J[19]+J[20]+
J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[2]+H[4]+J[1]+J[3]+J[16], +H[0]+H[2]+H[4]+H[8]+J[1]+J[3]+
J[7]+J[16]+J[20]+J[29], +H[0]+H[2]+H[4]+H[7]+J[1]+J[3]+J[6]+J[16]+J[19]+J[28], +H[0]+H[2]+H[4]+H[7]+
H[8]+J[1]+J[3]+J[6]+J[7]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35], +H[0]+H[2]+H[4]+H[6]+J[1]+J[3]+J[5]+J[16]+
J[18]+J[27], +H[0]+H[2]+H[4]+H[6]+H[8]+J[1]+J[3]+J[5]+J[7]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34], +H[0]+
H[2]+H[4]+H[6]+H[7]+J[1]+J[3]+J[5]+J[6]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33], +H[0]+H[2]+H[4]+H[6]+H[7]+
H[8]+J[1]+J[3]+J[5]+J[6]+J[7]+J[16]+J[18]+J[19]+J[20]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[2]+
H[4]+H[5]+J[1]+J[3]+J[4]+J[16]+J[17]+J[26], +H[0]+H[2]+H[4]+H[5]+H[8]+J[1]+J[3]+J[4]+J[7]+J[16]+J[17]+
J[20]+J[26]+J[29]+J[32], +H[0]+H[2]+H[4]+H[5]+H[7]+J[1]+J[3]+J[4]+J[6]+J[16]+J[17]+J[19]+J[26]+J[28]+
J[31], +H[0]+H[2]+H[4]+H[5]+H[7]+H[8]+J[1]+J[3]+J[4]+J[6]+J[7]+J[16]+J[17]+J[19]+J[20]+J[26]+J[28]+J[29]+
J[31]+J[32]+J[35], +H[0]+H[2]+H[4]+H[5]+H[6]+J[1]+J[3]+J[4]+J[5]+J[16]+J[17]+J[18]+J[26]+J[27]+J[30], +
H[0]+H[2]+H[4]+H[5]+H[6]+H[8]+J[1]+J[3]+J[4]+J[5]+J[7]+J[16]+J[17]+J[18]+J[20]+J[26]+J[27]+J[29]+J[30]+
J[32]+J[34], +H[0]+H[2]+H[4]+H[5]+H[6]+H[7]+J[1]+J[3]+J[4]+J[5]+J[6]+J[16]+J[17]+J[18]+J[19]+J[26]+J[27]+
J[28]+J[30]+J[31]+J[33], +H[0]+H[2]+H[4]+H[5]+H[6]+H[7]+H[8]+J[1]+J[3]+J[4]+J[5]+J[6]+J[7]+J[16]+J[17]+
J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[2]+H[3]+J[1]+
J[2]+J[15], +H[0]+H[2]+H[3]+H[8]+J[1]+J[2]+J[7]+J[15]+J[20]+J[25], +H[0]+H[2]+H[3]+H[7]+J[1]+J[2]+J[6]+
J[15]+J[19]+J[24], +H[0]+H[2]+H[3]+H[7]+H[8]+J[1]+J[2]+J[6]+J[7]+J[15]+J[19]+J[20]+J[24]+J[25]+J[35], +
H[0]+H[2]+H[3]+H[6]+J[1]+J[2]+J[5]+J[15]+J[18]+J[23], +H[0]+H[2]+H[3]+H[6]+H[8]+J[1]+J[2]+J[5]+J[7]+
J[15]+J[18]+J[20]+J[23]+J[25]+J[34], +H[0]+H[2]+H[3]+H[6]+H[7]+J[1]+J[2]+J[5]+J[6]+J[15]+J[18]+J[19]+
J[23]+J[24]+J[33], +H[0]+H[2]+H[3]+H[6]+H[7]+H[8]+J[1]+J[2]+J[5]+J[6]+J[7]+J[15]+J[18]+J[19]+J[20]+J[23]+
J[24]+J[25]+J[33]+J[34]+J[35], +H[0]+H[2]+H[3]+H[5]+J[1]+J[2]+J[4]+J[15]+J[17]+J[22], +H[0]+H[2]+H[3]+
H[5]+H[8]+J[1]+J[2]+J[4]+J[7]+J[15]+J[17]+J[20]+J[22]+J[25]+J[32], +H[0]+H[2]+H[3]+H[5]+H[7]+J[1]+J[2]+
J[4]+J[6]+J[15]+J[17]+J[19]+J[22]+J[24]+J[31], +H[0]+H[2]+H[3]+H[5]+H[7]+H[8]+J[1]+J[2]+J[4]+J[6]+J[7]+
J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[0]+H[2]+H[3]+H[5]+H[6]+J[1]+J[2]+J[4]+
J[5]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30], +H[0]+H[2]+H[3]+H[5]+H[6]+H[8]+J[1]+J[2]+J[4]+J[5]+J[7]+J[15]+
J[17]+J[18]+J[20]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34], +H[0]+H[2]+H[3]+H[5]+H[6]+H[7]+J[1]+J[2]+J[4]+
J[5]+J[6]+J[15]+J[17]+J[18]+J[19]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +H[0]+H[2]+H[3]+H[5]+H[6]+H[7]+
H[8]+J[1]+J[2]+J[4]+J[5]+J[6]+J[7]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+
J[32]+J[33]+J[34]+J[35], +H[0]+H[2]+H[3]+H[4]+J[1]+J[2]+J[3]+J[15]+J[16]+J[21], +H[0]+H[2]+H[3]+H[4]+
H[8]+J[1]+J[2]+J[3]+J[7]+J[15]+J[16]+J[20]+J[21]+J[25]+J[29], +H[0]+H[2]+H[3]+H[4]+H[7]+J[1]+J[2]+J[3]+
J[6]+J[15]+J[16]+J[19]+J[21]+J[24]+J[28], +H[0]+H[2]+H[3]+H[4]+H[7]+H[8]+J[1]+J[2]+J[3]+J[6]+J[7]+J[15]+
J[16]+J[19]+J[20]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[0]+H[2]+H[3]+H[4]+H[6]+J[1]+J[2]+J[3]+J[5]+
J[15]+J[16]+J[18]+J[21]+J[23]+J[27], +H[0]+H[2]+H[3]+H[4]+H[6]+H[8]+J[1]+J[2]+J[3]+J[5]+J[7]+J[15]+J[16]+
J[18]+J[20]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[0]+H[2]+H[3]+H[4]+H[6]+H[7]+J[1]+J[2]+J[3]+J[5]+
J[6]+J[15]+J[16]+J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[0]+H[2]+H[3]+H[4]+H[6]+H[7]+H[8]+
J[1]+J[2]+J[3]+J[5]+J[6]+J[7]+J[15]+J[16]+J[18]+J[19]+J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+
J[33]+J[34]+J[35], +H[0]+H[2]+H[3]+H[4]+H[5]+J[1]+J[2]+J[3]+J[4]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26], +
H[0]+H[2]+H[3]+H[4]+H[5]+H[8]+J[1]+J[2]+J[3]+J[4]+J[7]+J[15]+J[16]+J[17]+J[20]+J[21]+J[22]+J[25]+J[26]+
J[29]+J[32], +H[0]+H[2]+H[3]+H[4]+H[5]+H[7]+J[1]+J[2]+J[3]+J[4]+J[6]+J[15]+J[16]+J[17]+J[19]+J[21]+J[22]+
J[24]+J[26]+J[28]+J[31], +H[0]+H[2]+H[3]+H[4]+H[5]+H[7]+H[8]+J[1]+J[2]+J[3]+J[4]+J[6]+J[7]+J[15]+J[16]+
J[17]+J[19]+J[20]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[2]+H[3]+H[4]+
H[5]+H[6]+J[1]+J[2]+J[3]+J[4]+J[5]+J[15]+J[16]+J[17]+J[18]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[0]+
H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[1]+J[2]+J[3]+J[4]+J[5]+J[7]+J[15]+J[16]+J[17]+J[18]+J[20]+J[21]+J[22]+
J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[1]+J[2]+J[3]+
J[4]+J[5]+J[6]+J[15]+J[16]+J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +
H[0]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[1]+J[2]+J[3]+J[4]+J[5]+J[6]+J[7]+J[15]+J[16]+J[17]+J[18]+J[19]+
J[20]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+
H[1]+J[0], +H[0]+H[1]+H[8]+J[0]+J[7]+J[14], +H[0]+H[1]+H[7]+J[0]+J[6]+J[13], +H[0]+H[1]+H[7]+H[8]+J[0]+
J[6]+J[7]+J[13]+J[14]+J[35], +H[0]+H[1]+H[6]+J[0]+J[5]+J[12], +H[0]+H[1]+H[6]+H[8]+J[0]+J[5]+J[7]+J[12]+
J[14]+J[34], +H[0]+H[1]+H[6]+H[7]+J[0]+J[5]+J[6]+J[12]+J[13]+J[33], +H[0]+H[1]+H[6]+H[7]+H[8]+J[0]+J[5]+
J[6]+J[7]+J[12]+J[13]+J[14]+J[33]+J[34]+J[35], +H[0]+H[1]+H[5]+J[0]+J[4]+J[11], +H[0]+H[1]+H[5]+H[8]+
J[0]+J[4]+J[7]+J[11]+J[14]+J[32], +H[0]+H[1]+H[5]+H[7]+J[0]+J[4]+J[6]+J[11]+J[13]+J[31], +H[0]+H[1]+
H[5]+H[7]+H[8]+J[0]+J[4]+J[6]+J[7]+J[11]+J[13]+J[14]+J[31]+J[32]+J[35], +H[0]+H[1]+H[5]+H[6]+J[0]+J[4]+
J[5]+J[11]+J[12]+J[30], +H[0]+H[1]+H[5]+H[6]+H[8]+J[0]+J[4]+J[5]+J[7]+J[11]+J[12]+J[14]+J[30]+J[32]+
J[34], +H[0]+H[1]+H[5]+H[6]+H[7]+J[0]+J[4]+J[5]+J[6]+J[11]+J[12]+J[13]+J[30]+J[31]+J[33], +H[0]+H[1]+
H[5]+H[6]+H[7]+H[8]+J[0]+J[4]+J[5]+J[6]+J[7]+J[11]+J[12]+J[13]+J[14]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +
H[0]+H[1]+H[4]+J[0]+J[3]+J[10], +H[0]+H[1]+H[4]+H[8]+J[0]+J[3]+J[7]+J[10]+J[14]+J[29], +H[0]+H[1]+H[4]+
H[7]+J[0]+J[3]+J[6]+J[10]+J[13]+J[28], +H[0]+H[1]+H[4]+H[7]+H[8]+J[0]+J[3]+J[6]+J[7]+J[10]+J[13]+J[14]+
J[28]+J[29]+J[35], +H[0]+H[1]+H[4]+H[6]+J[0]+J[3]+J[5]+J[10]+J[12]+J[27], +H[0]+H[1]+H[4]+H[6]+H[8]+
J[0]+J[3]+J[5]+J[7]+J[10]+J[12]+J[14]+J[27]+J[29]+J[34], +H[0]+H[1]+H[4]+H[6]+H[7]+J[0]+J[3]+J[5]+J[6]+
J[10]+J[12]+J[13]+J[27]+J[28]+J[33], +H[0]+H[1]+H[4]+H[6]+H[7]+H[8]+J[0]+J[3]+J[5]+J[6]+J[7]+J[10]+J[12]+
J[13]+J[14]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[1]+H[4]+H[5]+J[0]+J[3]+J[4]+J[10]+J[11]+J[26], +
H[0]+H[1]+H[4]+H[5]+H[8]+J[0]+J[3]+J[4]+J[7]+J[10]+J[11]+J[14]+J[26]+J[29]+J[32], +H[0]+H[1]+H[4]+H[5]+
H[7]+J[0]+J[3]+J[4]+J[6]+J[10]+J[11]+J[13]+J[26]+J[28]+J[31], +H[0]+H[1]+H[4]+H[5]+H[7]+H[8]+J[0]+J[3]+
J[4]+J[6]+J[7]+J[10]+J[11]+J[13]+J[14]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[1]+H[4]+H[5]+H[6]+
J[0]+J[3]+J[4]+J[5]+J[10]+J[11]+J[12]+J[26]+J[27]+J[30], +H[0]+H[1]+H[4]+H[5]+H[6]+H[8]+J[0]+J[3]+J[4]+
J[5]+J[7]+J[10]+J[11]+J[12]+J[14]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[1]+H[4]+H[5]+H[6]+H[7]+
J[0]+J[3]+J[4]+J[5]+J[6]+J[10]+J[11]+J[12]+J[13]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[1]+H[4]+
H[5]+H[6]+H[7]+H[8]+J[0]+J[3]+J[4]+J[5]+J[6]+J[7]+J[10]+J[11]+J[12]+J[13]+J[14]+J[26]+J[27]+J[28]+J[29]+
J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[3]+J[0]+J[2]+J[9], +H[0]+H[1]+H[3]+H[8]+J[0]+J[2]+
J[7]+J[9]+J[14]+J[25], +H[0]+H[1]+H[3]+H[7]+J[0]+J[2]+J[6]+J[9]+J[13]+J[24], +H[0]+H[1]+H[3]+H[7]+H[8]+
J[0]+J[2]+J[6]+J[7]+J[9]+J[13]+J[14]+J[24]+J[25]+J[35], +H[0]+H[1]+H[3]+H[6]+J[0]+J[2]+J[5]+J[9]+J[12]+
J[23], +H[0]+H[1]+H[3]+H[6]+H[8]+J[0]+J[2]+J[5]+J[7]+J[9]+J[12]+J[14]+J[23]+J[25]+J[34], +H[0]+H[1]+
H[3]+H[6]+H[7]+J[0]+J[2]+J[5]+J[6]+J[9]+J[12]+J[13]+J[23]+J[24]+J[33], +H[0]+H[1]+H[3]+H[6]+H[7]+H[8]+
J[0]+J[2]+J[5]+J[6]+J[7]+J[9]+J[12]+J[13]+J[14]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[0]+H[1]+H[3]+
H[5]+J[0]+J[2]+J[4]+J[9]+J[11]+J[22], +H[0]+H[1]+H[3]+H[5]+H[8]+J[0]+J[2]+J[4]+J[7]+J[9]+J[11]+J[14]+
J[22]+J[25]+J[32], +H[0]+H[1]+H[3]+H[5]+H[7]+J[0]+J[2]+J[4]+J[6]+J[9]+J[11]+J[13]+J[22]+J[24]+J[31], +
H[0]+H[1]+H[3]+H[5]+H[7]+H[8]+J[0]+J[2]+J[4]+J[6]+J[7]+J[9]+J[11]+J[13]+J[14]+J[22]+J[24]+J[25]+J[31]+
J[32]+J[35], +H[0]+H[1]+H[3]+H[5]+H[6]+J[0]+J[2]+J[4]+J[5]+J[9]+J[11]+J[12]+J[22]+J[23]+J[30], +H[0]+
H[1]+H[3]+H[5]+H[6]+H[8]+J[0]+J[2]+J[4]+J[5]+J[7]+J[9]+J[11]+J[12]+J[14]+J[22]+J[23]+J[25]+J[30]+J[32]+
J[34], +H[0]+H[1]+H[3]+H[5]+H[6]+H[7]+J[0]+J[2]+J[4]+J[5]+J[6]+J[9]+J[11]+J[12]+J[13]+J[22]+J[23]+J[24]+
J[30]+J[31]+J[33], +H[0]+H[1]+H[3]+H[5]+H[6]+H[7]+H[8]+J[0]+J[2]+J[4]+J[5]+J[6]+J[7]+J[9]+J[11]+J[12]+
J[13]+J[14]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[3]+H[4]+J[0]+J[2]+
J[3]+J[9]+J[10]+J[21], +H[0]+H[1]+H[3]+H[4]+H[8]+J[0]+J[2]+J[3]+J[7]+J[9]+J[10]+J[14]+J[21]+J[25]+J[29], +
H[0]+H[1]+H[3]+H[4]+H[7]+J[0]+J[2]+J[3]+J[6]+J[9]+J[10]+J[13]+J[21]+J[24]+J[28], +H[0]+H[1]+H[3]+H[4]+
H[7]+H[8]+J[0]+J[2]+J[3]+J[6]+J[7]+J[9]+J[10]+J[13]+J[14]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[0]+
H[1]+H[3]+H[4]+H[6]+J[0]+J[2]+J[3]+J[5]+J[9]+J[10]+J[12]+J[21]+J[23]+J[27], +H[0]+H[1]+H[3]+H[4]+H[6]+
H[8]+J[0]+J[2]+J[3]+J[5]+J[7]+J[9]+J[10]+J[12]+J[14]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[0]+H[1]+
H[3]+H[4]+H[6]+H[7]+J[0]+J[2]+J[3]+J[5]+J[6]+J[9]+J[10]+J[12]+J[13]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +
H[0]+H[1]+H[3]+H[4]+H[6]+H[7]+H[8]+J[0]+J[2]+J[3]+J[5]+J[6]+J[7]+J[9]+J[10]+J[12]+J[13]+J[14]+J[21]+
J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[1]+H[3]+H[4]+H[5]+J[0]+J[2]+J[3]+J[4]+
J[9]+J[10]+J[11]+J[21]+J[22]+J[26], +H[0]+H[1]+H[3]+H[4]+H[5]+H[8]+J[0]+J[2]+J[3]+J[4]+J[7]+J[9]+J[10]+
J[11]+J[14]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32], +H[0]+H[1]+H[3]+H[4]+H[5]+H[7]+J[0]+J[2]+J[3]+J[4]+
J[6]+J[9]+J[10]+J[11]+J[13]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[0]+H[1]+H[3]+H[4]+H[5]+H[7]+H[8]+
J[0]+J[2]+J[3]+J[4]+J[6]+J[7]+J[9]+J[10]+J[11]+J[13]+J[14]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+
J[31]+J[32]+J[35], +H[0]+H[1]+H[3]+H[4]+H[5]+H[6]+J[0]+J[2]+J[3]+J[4]+J[5]+J[9]+J[10]+J[11]+J[12]+J[21]+
J[22]+J[23]+J[26]+J[27]+J[30], +H[0]+H[1]+H[3]+H[4]+H[5]+H[6]+H[8]+J[0]+J[2]+J[3]+J[4]+J[5]+J[7]+J[9]+
J[10]+J[11]+J[12]+J[14]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[1]+H[3]+
H[4]+H[5]+H[6]+H[7]+J[0]+J[2]+J[3]+J[4]+J[5]+J[6]+J[9]+J[10]+J[11]+J[12]+J[13]+J[21]+J[22]+J[23]+J[24]+
J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[1]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[0]+J[2]+J[3]+J[4]+J[5]+
J[6]+J[7]+J[9]+J[10]+J[11]+J[12]+J[13]+J[14]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+
J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+J[0]+J[1]+J[8], +H[0]+H[1]+H[2]+H[8]+J[0]+J[1]+J[7]+J[8]+
J[14]+J[20], +H[0]+H[1]+H[2]+H[7]+J[0]+J[1]+J[6]+J[8]+J[13]+J[19], +H[0]+H[1]+H[2]+H[7]+H[8]+J[0]+J[1]+
J[6]+J[7]+J[8]+J[13]+J[14]+J[19]+J[20]+J[35], +H[0]+H[1]+H[2]+H[6]+J[0]+J[1]+J[5]+J[8]+J[12]+J[18], +
H[0]+H[1]+H[2]+H[6]+H[8]+J[0]+J[1]+J[5]+J[7]+J[8]+J[12]+J[14]+J[18]+J[20]+J[34], +H[0]+H[1]+H[2]+H[6]+
H[7]+J[0]+J[1]+J[5]+J[6]+J[8]+J[12]+J[13]+J[18]+J[19]+J[33], +H[0]+H[1]+H[2]+H[6]+H[7]+H[8]+J[0]+J[1]+
J[5]+J[6]+J[7]+J[8]+J[12]+J[13]+J[14]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[5]+J[0]+
J[1]+J[4]+J[8]+J[11]+J[17], +H[0]+H[1]+H[2]+H[5]+H[8]+J[0]+J[1]+J[4]+J[7]+J[8]+J[11]+J[14]+J[17]+J[20]+
J[32], +H[0]+H[1]+H[2]+H[5]+H[7]+J[0]+J[1]+J[4]+J[6]+J[8]+J[11]+J[13]+J[17]+J[19]+J[31], +H[0]+H[1]+
H[2]+H[5]+H[7]+H[8]+J[0]+J[1]+J[4]+J[6]+J[7]+J[8]+J[11]+J[13]+J[14]+J[17]+J[19]+J[20]+J[31]+J[32]+J[35], +
H[0]+H[1]+H[2]+H[5]+H[6]+J[0]+J[1]+J[4]+J[5]+J[8]+J[11]+J[12]+J[17]+J[18]+J[30], +H[0]+H[1]+H[2]+H[5]+
H[6]+H[8]+J[0]+J[1]+J[4]+J[5]+J[7]+J[8]+J[11]+J[12]+J[14]+J[17]+J[18]+J[20]+J[30]+J[32]+J[34], +H[0]+
H[1]+H[2]+H[5]+H[6]+H[7]+J[0]+J[1]+J[4]+J[5]+J[6]+J[8]+J[11]+J[12]+J[13]+J[17]+J[18]+J[19]+J[30]+J[31]+
J[33], +H[0]+H[1]+H[2]+H[5]+H[6]+H[7]+H[8]+J[0]+J[1]+J[4]+J[5]+J[6]+J[7]+J[8]+J[11]+J[12]+J[13]+J[14]+
J[17]+J[18]+J[19]+J[20]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[4]+J[0]+J[1]+J[3]+J[8]+
J[10]+J[16], +H[0]+H[1]+H[2]+H[4]+H[8]+J[0]+J[1]+J[3]+J[7]+J[8]+J[10]+J[14]+J[16]+J[20]+J[29], +H[0]+
H[1]+H[2]+H[4]+H[7]+J[0]+J[1]+J[3]+J[6]+J[8]+J[10]+J[13]+J[16]+J[19]+J[28], +H[0]+H[1]+H[2]+H[4]+H[7]+
H[8]+J[0]+J[1]+J[3]+J[6]+J[7]+J[8]+J[10]+J[13]+J[14]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35], +H[0]+H[1]+
H[2]+H[4]+H[6]+J[0]+J[1]+J[3]+J[5]+J[8]+J[10]+J[12]+J[16]+J[18]+J[27], +H[0]+H[1]+H[2]+H[4]+H[6]+H[8]+
J[0]+J[1]+J[3]+J[5]+J[7]+J[8]+J[10]+J[12]+J[14]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34], +H[0]+H[1]+H[2]+
H[4]+H[6]+H[7]+J[0]+J[1]+J[3]+J[5]+J[6]+J[8]+J[10]+J[12]+J[13]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33], +
H[0]+H[1]+H[2]+H[4]+H[6]+H[7]+H[8]+J[0]+J[1]+J[3]+J[5]+J[6]+J[7]+J[8]+J[10]+J[12]+J[13]+J[14]+J[16]+
J[18]+J[19]+J[20]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[4]+H[5]+J[0]+J[1]+J[3]+J[4]+
J[8]+J[10]+J[11]+J[16]+J[17]+J[26], +H[0]+H[1]+H[2]+H[4]+H[5]+H[8]+J[0]+J[1]+J[3]+J[4]+J[7]+J[8]+J[10]+
J[11]+J[14]+J[16]+J[17]+J[20]+J[26]+J[29]+J[32], +H[0]+H[1]+H[2]+H[4]+H[5]+H[7]+J[0]+J[1]+J[3]+J[4]+
J[6]+J[8]+J[10]+J[11]+J[13]+J[16]+J[17]+J[19]+J[26]+J[28]+J[31], +H[0]+H[1]+H[2]+H[4]+H[5]+H[7]+H[8]+
J[0]+J[1]+J[3]+J[4]+J[6]+J[7]+J[8]+J[10]+J[11]+J[13]+J[14]+J[16]+J[17]+J[19]+J[20]+J[26]+J[28]+J[29]+
J[31]+J[32]+J[35], +H[0]+H[1]+H[2]+H[4]+H[5]+H[6]+J[0]+J[1]+J[3]+J[4]+J[5]+J[8]+J[10]+J[11]+J[12]+J[16]+
J[17]+J[18]+J[26]+J[27]+J[30], +H[0]+H[1]+H[2]+H[4]+H[5]+H[6]+H[8]+J[0]+J[1]+J[3]+J[4]+J[5]+J[7]+J[8]+
J[10]+J[11]+J[12]+J[14]+J[16]+J[17]+J[18]+J[20]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[1]+H[2]+
H[4]+H[5]+H[6]+H[7]+J[0]+J[1]+J[3]+J[4]+J[5]+J[6]+J[8]+J[10]+J[11]+J[12]+J[13]+J[16]+J[17]+J[18]+J[19]+
J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[1]+H[2]+H[4]+H[5]+H[6]+H[7]+H[8]+J[0]+J[1]+J[3]+J[4]+J[5]+
J[6]+J[7]+J[8]+J[10]+J[11]+J[12]+J[13]+J[14]+J[16]+J[17]+J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+
J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[3]+J[0]+J[1]+J[2]+J[8]+J[9]+J[15], +H[0]+H[1]+H[2]+
H[3]+H[8]+J[0]+J[1]+J[2]+J[7]+J[8]+J[9]+J[14]+J[15]+J[20]+J[25], +H[0]+H[1]+H[2]+H[3]+H[7]+J[0]+J[1]+
J[2]+J[6]+J[8]+J[9]+J[13]+J[15]+J[19]+J[24], +H[0]+H[1]+H[2]+H[3]+H[7]+H[8]+J[0]+J[1]+J[2]+J[6]+J[7]+
J[8]+J[9]+J[13]+J[14]+J[15]+J[19]+J[20]+J[24]+J[25]+J[35], +H[0]+H[1]+H[2]+H[3]+H[6]+J[0]+J[1]+J[2]+
J[5]+J[8]+J[9]+J[12]+J[15]+J[18]+J[23], +H[0]+H[1]+H[2]+H[3]+H[6]+H[8]+J[0]+J[1]+J[2]+J[5]+J[7]+J[8]+
J[9]+J[12]+J[14]+J[15]+J[18]+J[20]+J[23]+J[25]+J[34], +H[0]+H[1]+H[2]+H[3]+H[6]+H[7]+J[0]+J[1]+J[2]+
J[5]+J[6]+J[8]+J[9]+J[12]+J[13]+J[15]+J[18]+J[19]+J[23]+J[24]+J[33], +H[0]+H[1]+H[2]+H[3]+H[6]+H[7]+
H[8]+J[0]+J[1]+J[2]+J[5]+J[6]+J[7]+J[8]+J[9]+J[12]+J[13]+J[14]+J[15]+J[18]+J[19]+J[20]+J[23]+J[24]+J[25]+
J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[3]+H[5]+J[0]+J[1]+J[2]+J[4]+J[8]+J[9]+J[11]+J[15]+J[17]+J[22], +
H[0]+H[1]+H[2]+H[3]+H[5]+H[8]+J[0]+J[1]+J[2]+J[4]+J[7]+J[8]+J[9]+J[11]+J[14]+J[15]+J[17]+J[20]+J[22]+
J[25]+J[32], +H[0]+H[1]+H[2]+H[3]+H[5]+H[7]+J[0]+J[1]+J[2]+J[4]+J[6]+J[8]+J[9]+J[11]+J[13]+J[15]+J[17]+
J[19]+J[22]+J[24]+J[31], +H[0]+H[1]+H[2]+H[3]+H[5]+H[7]+H[8]+J[0]+J[1]+J[2]+J[4]+J[6]+J[7]+J[8]+J[9]+
J[11]+J[13]+J[14]+J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[0]+H[1]+H[2]+H[3]+
H[5]+H[6]+J[0]+J[1]+J[2]+J[4]+J[5]+J[8]+J[9]+J[11]+J[12]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30], +H[0]+
H[1]+H[2]+H[3]+H[5]+H[6]+H[8]+J[0]+J[1]+J[2]+J[4]+J[5]+J[7]+J[8]+J[9]+J[11]+J[12]+J[14]+J[15]+J[17]+
J[18]+J[20]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34], +H[0]+H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+J[0]+J[1]+J[2]+
J[4]+J[5]+J[6]+J[8]+J[9]+J[11]+J[12]+J[13]+J[15]+J[17]+J[18]+J[19]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +
H[0]+H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+H[8]+J[0]+J[1]+J[2]+J[4]+J[5]+J[6]+J[7]+J[8]+J[9]+J[11]+J[12]+J[13]+
J[14]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+
H[1]+H[2]+H[3]+H[4]+J[0]+J[1]+J[2]+J[3]+J[8]+J[9]+J[10]+J[15]+J[16]+J[21], +H[0]+H[1]+H[2]+H[3]+H[4]+
H[8]+J[0]+J[1]+J[2]+J[3]+J[7]+J[8]+J[9]+J[10]+J[14]+J[15]+J[16]+J[20]+J[21]+J[25]+J[29], +H[0]+H[1]+
H[2]+H[3]+H[4]+H[7]+J[0]+J[1]+J[2]+J[3]+J[6]+J[8]+J[9]+J[10]+J[13]+J[15]+J[16]+J[19]+J[21]+J[24]+J[28], +
H[0]+H[1]+H[2]+H[3]+H[4]+H[7]+H[8]+J[0]+J[1]+J[2]+J[3]+J[6]+J[7]+J[8]+J[9]+J[10]+J[13]+J[14]+J[15]+J[16]+
J[19]+J[20]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[0]+H[1]+H[2]+H[3]+H[4]+H[6]+J[0]+J[1]+J[2]+J[3]+
J[5]+J[8]+J[9]+J[10]+J[12]+J[15]+J[16]+J[18]+J[21]+J[23]+J[27], +H[0]+H[1]+H[2]+H[3]+H[4]+H[6]+H[8]+
J[0]+J[1]+J[2]+J[3]+J[5]+J[7]+J[8]+J[9]+J[10]+J[12]+J[14]+J[15]+J[16]+J[18]+J[20]+J[21]+J[23]+J[25]+
J[27]+J[29]+J[34], +H[0]+H[1]+H[2]+H[3]+H[4]+H[6]+H[7]+J[0]+J[1]+J[2]+J[3]+J[5]+J[6]+J[8]+J[9]+J[10]+
J[12]+J[13]+J[15]+J[16]+J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[0]+H[1]+H[2]+H[3]+H[4]+H[6]+
H[7]+H[8]+J[0]+J[1]+J[2]+J[3]+J[5]+J[6]+J[7]+J[8]+J[9]+J[10]+J[12]+J[13]+J[14]+J[15]+J[16]+J[18]+J[19]+
J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+J[0]+
J[1]+J[2]+J[3]+J[4]+J[8]+J[9]+J[10]+J[11]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26], +H[0]+H[1]+H[2]+H[3]+
H[4]+H[5]+H[8]+J[0]+J[1]+J[2]+J[3]+J[4]+J[7]+J[8]+J[9]+J[10]+J[11]+J[14]+J[15]+J[16]+J[17]+J[20]+J[21]+
J[22]+J[25]+J[26]+J[29]+J[32], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+H[7]+J[0]+J[1]+J[2]+J[3]+J[4]+J[6]+J[8]+
J[9]+J[10]+J[11]+J[13]+J[15]+J[16]+J[17]+J[19]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[0]+H[1]+H[2]+
H[3]+H[4]+H[5]+H[7]+H[8]+J[0]+J[1]+J[2]+J[3]+J[4]+J[6]+J[7]+J[8]+J[9]+J[10]+J[11]+J[13]+J[14]+J[15]+
J[16]+J[17]+J[19]+J[20]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[1]+H[2]+
H[3]+H[4]+H[5]+H[6]+J[0]+J[1]+J[2]+J[3]+J[4]+J[5]+J[8]+J[9]+J[10]+J[11]+J[12]+J[15]+J[16]+J[17]+J[18]+
J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[0]+J[1]+J[2]+J[3]+J[4]+
J[5]+J[7]+J[8]+J[9]+J[10]+J[11]+J[12]+J[14]+J[15]+J[16]+J[17]+J[18]+J[20]+J[21]+J[22]+J[23]+J[25]+J[26]+
J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[0]+J[1]+J[2]+J[3]+J[4]+J[5]+
J[6]+J[8]+J[9]+J[10]+J[11]+J[12]+J[13]+J[15]+J[16]+J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+
J[28]+J[30]+J[31]+J[33], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[0]+J[1]+J[2]+J[3]+J[4]+J[5]+
J[6]+J[7]+J[8]+J[9]+J[10]+J[11]+J[12]+J[13]+J[14]+J[15]+J[16]+J[17]+J[18]+J[19]+J[20]+J[21]+J[22]+J[23]+
J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35],])
logZ = fast_logsumexp(energyTerms)[0]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[0] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[1] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[2] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[3] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[4] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,
0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,
1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,
1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,
0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,
1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,
1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,
0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,
1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,
0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
Cout[5] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,
1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,
0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,
1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,
1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,
0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,
1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,
0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,
0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,
1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,
0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,
1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,
1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,
0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1])
Cout[6] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,
1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,
1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,
0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,
1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,
1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,
0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,
1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,
1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,
0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1])
Cout[7] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1])
Cout[8] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[9] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[10] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[11] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[12] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,
1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,
0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,
1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,
0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
Cout[13] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,
0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,
1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,
0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,
1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,
1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,
0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1])
Cout[14] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,
0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,
1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,
1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,
0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1])
Cout[15] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1])
Cout[16] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[17] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[18] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[19] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,
0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,
1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,
1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,
0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
Cout[20] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,
1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,
0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,
1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,
1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,
1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,
0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1])
Cout[21] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,
0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,
1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,
1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,
0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1])
Cout[22] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1])
Cout[23] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[24] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[25] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,
1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,
1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,
0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,
0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
Cout[26] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,
0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,
1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,
1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,
1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,
0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,
0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1])
Cout[27] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,
1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,
1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,
1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,
1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1])
Cout[28] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1])
Cout[29] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Cout[30] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,
1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,
1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,
0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
Cout[31] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,
1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,
0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,
0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,
0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1])
Cout[32] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,
1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,
0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,
1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,
0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1])
Cout[33] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1])
Cout[34] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
Cout[35] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,
0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,
1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,
1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1])
Cout[36] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,
0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,
1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,
1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,
0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1])
Cout[37] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,
0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1])
Cout[38] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,
0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1])
Cout[39] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,
0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,
1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,
0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,
1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,
0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,
0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,
0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,
0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,
0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1])
Cout[40] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,
0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,
1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,
0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,
1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,
0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,
0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,
0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,
0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,
0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,
0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1])
Cout[41] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,
0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,
0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,
0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,
0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,
1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,
0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,
0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,
0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,
0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,
0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1])
Cout[42] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,
0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,
1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,
0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,
0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,
0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,
0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,
0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,
0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,
1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,
0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,
0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1])
Cout[43] = exp( num[0] - logZ ) * num[1]
num = fast_logsumexp(energyTerms, [0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,
0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,
0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,
1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,
0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,
0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,
0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,
1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,
0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,
0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,
0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,
1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,
0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,
0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1])
Cout[44] = exp( num[0] - logZ ) * num[1]
Cout[isnan(Cout)] = 0.
return(Cout)
def p(params):
"""
Give all parameters concatenated into one array from lowest to highest order.
Returns probabilities of all configurations.
"""
Cout = zeros((45))
H = params[0:9]
J = params[9:45]
H = params[0:9]
J = params[9:45]
Pout = zeros((512))
energyTerms = array([ +0, +H[8]+0, +H[7]+0, +H[7]+H[8]+J[35], +H[6]+0, +H[6]+H[8]+J[34], +H[6]+H[7]+J[33], +H[6]+H[7]+H[8]+
J[33]+J[34]+J[35], +H[5]+0, +H[5]+H[8]+J[32], +H[5]+H[7]+J[31], +H[5]+H[7]+H[8]+J[31]+J[32]+J[35], +
H[5]+H[6]+J[30], +H[5]+H[6]+H[8]+J[30]+J[32]+J[34], +H[5]+H[6]+H[7]+J[30]+J[31]+J[33], +H[5]+H[6]+H[7]+
H[8]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[4]+0, +H[4]+H[8]+J[29], +H[4]+H[7]+J[28], +H[4]+H[7]+H[8]+
J[28]+J[29]+J[35], +H[4]+H[6]+J[27], +H[4]+H[6]+H[8]+J[27]+J[29]+J[34], +H[4]+H[6]+H[7]+J[27]+J[28]+
J[33], +H[4]+H[6]+H[7]+H[8]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[4]+H[5]+J[26], +H[4]+H[5]+H[8]+J[26]+
J[29]+J[32], +H[4]+H[5]+H[7]+J[26]+J[28]+J[31], +H[4]+H[5]+H[7]+H[8]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +
H[4]+H[5]+H[6]+J[26]+J[27]+J[30], +H[4]+H[5]+H[6]+H[8]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[4]+H[5]+
H[6]+H[7]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[4]+H[5]+H[6]+H[7]+H[8]+J[26]+J[27]+J[28]+J[29]+J[30]+
J[31]+J[32]+J[33]+J[34]+J[35], +H[3]+0, +H[3]+H[8]+J[25], +H[3]+H[7]+J[24], +H[3]+H[7]+H[8]+J[24]+J[25]+
J[35], +H[3]+H[6]+J[23], +H[3]+H[6]+H[8]+J[23]+J[25]+J[34], +H[3]+H[6]+H[7]+J[23]+J[24]+J[33], +H[3]+
H[6]+H[7]+H[8]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[3]+H[5]+J[22], +H[3]+H[5]+H[8]+J[22]+J[25]+J[32], +
H[3]+H[5]+H[7]+J[22]+J[24]+J[31], +H[3]+H[5]+H[7]+H[8]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[3]+H[5]+
H[6]+J[22]+J[23]+J[30], +H[3]+H[5]+H[6]+H[8]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34], +H[3]+H[5]+H[6]+H[7]+
J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +H[3]+H[5]+H[6]+H[7]+H[8]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+
J[33]+J[34]+J[35], +H[3]+H[4]+J[21], +H[3]+H[4]+H[8]+J[21]+J[25]+J[29], +H[3]+H[4]+H[7]+J[21]+J[24]+
J[28], +H[3]+H[4]+H[7]+H[8]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[3]+H[4]+H[6]+J[21]+J[23]+J[27], +
H[3]+H[4]+H[6]+H[8]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[3]+H[4]+H[6]+H[7]+J[21]+J[23]+J[24]+J[27]+
J[28]+J[33], +H[3]+H[4]+H[6]+H[7]+H[8]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +
H[3]+H[4]+H[5]+J[21]+J[22]+J[26], +H[3]+H[4]+H[5]+H[8]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32], +H[3]+H[4]+
H[5]+H[7]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[3]+H[4]+H[5]+H[7]+H[8]+J[21]+J[22]+J[24]+J[25]+J[26]+
J[28]+J[29]+J[31]+J[32]+J[35], +H[3]+H[4]+H[5]+H[6]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[3]+H[4]+
H[5]+H[6]+H[8]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[3]+H[4]+H[5]+H[6]+H[7]+
J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[21]+J[22]+
J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[2]+0, +H[2]+H[8]+J[20], +
H[2]+H[7]+J[19], +H[2]+H[7]+H[8]+J[19]+J[20]+J[35], +H[2]+H[6]+J[18], +H[2]+H[6]+H[8]+J[18]+J[20]+J[34], +
H[2]+H[6]+H[7]+J[18]+J[19]+J[33], +H[2]+H[6]+H[7]+H[8]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35], +H[2]+H[5]+
J[17], +H[2]+H[5]+H[8]+J[17]+J[20]+J[32], +H[2]+H[5]+H[7]+J[17]+J[19]+J[31], +H[2]+H[5]+H[7]+H[8]+J[17]+
J[19]+J[20]+J[31]+J[32]+J[35], +H[2]+H[5]+H[6]+J[17]+J[18]+J[30], +H[2]+H[5]+H[6]+H[8]+J[17]+J[18]+J[20]+
J[30]+J[32]+J[34], +H[2]+H[5]+H[6]+H[7]+J[17]+J[18]+J[19]+J[30]+J[31]+J[33], +H[2]+H[5]+H[6]+H[7]+H[8]+
J[17]+J[18]+J[19]+J[20]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[2]+H[4]+J[16], +H[2]+H[4]+H[8]+J[16]+
J[20]+J[29], +H[2]+H[4]+H[7]+J[16]+J[19]+J[28], +H[2]+H[4]+H[7]+H[8]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35], +
H[2]+H[4]+H[6]+J[16]+J[18]+J[27], +H[2]+H[4]+H[6]+H[8]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34], +H[2]+H[4]+
H[6]+H[7]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33], +H[2]+H[4]+H[6]+H[7]+H[8]+J[16]+J[18]+J[19]+J[20]+J[27]+
J[28]+J[29]+J[33]+J[34]+J[35], +H[2]+H[4]+H[5]+J[16]+J[17]+J[26], +H[2]+H[4]+H[5]+H[8]+J[16]+J[17]+J[20]+
J[26]+J[29]+J[32], +H[2]+H[4]+H[5]+H[7]+J[16]+J[17]+J[19]+J[26]+J[28]+J[31], +H[2]+H[4]+H[5]+H[7]+H[8]+
J[16]+J[17]+J[19]+J[20]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[2]+H[4]+H[5]+H[6]+J[16]+J[17]+J[18]+
J[26]+J[27]+J[30], +H[2]+H[4]+H[5]+H[6]+H[8]+J[16]+J[17]+J[18]+J[20]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +
H[2]+H[4]+H[5]+H[6]+H[7]+J[16]+J[17]+J[18]+J[19]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[2]+H[4]+H[5]+
H[6]+H[7]+H[8]+J[16]+J[17]+J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +
H[2]+H[3]+J[15], +H[2]+H[3]+H[8]+J[15]+J[20]+J[25], +H[2]+H[3]+H[7]+J[15]+J[19]+J[24], +H[2]+H[3]+H[7]+
H[8]+J[15]+J[19]+J[20]+J[24]+J[25]+J[35], +H[2]+H[3]+H[6]+J[15]+J[18]+J[23], +H[2]+H[3]+H[6]+H[8]+J[15]+
J[18]+J[20]+J[23]+J[25]+J[34], +H[2]+H[3]+H[6]+H[7]+J[15]+J[18]+J[19]+J[23]+J[24]+J[33], +H[2]+H[3]+
H[6]+H[7]+H[8]+J[15]+J[18]+J[19]+J[20]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[2]+H[3]+H[5]+J[15]+J[17]+
J[22], +H[2]+H[3]+H[5]+H[8]+J[15]+J[17]+J[20]+J[22]+J[25]+J[32], +H[2]+H[3]+H[5]+H[7]+J[15]+J[17]+J[19]+
J[22]+J[24]+J[31], +H[2]+H[3]+H[5]+H[7]+H[8]+J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +
H[2]+H[3]+H[5]+H[6]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30], +H[2]+H[3]+H[5]+H[6]+H[8]+J[15]+J[17]+J[18]+
J[20]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34], +H[2]+H[3]+H[5]+H[6]+H[7]+J[15]+J[17]+J[18]+J[19]+J[22]+J[23]+
J[24]+J[30]+J[31]+J[33], +H[2]+H[3]+H[5]+H[6]+H[7]+H[8]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+
J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[2]+H[3]+H[4]+J[15]+J[16]+J[21], +H[2]+H[3]+H[4]+H[8]+J[15]+
J[16]+J[20]+J[21]+J[25]+J[29], +H[2]+H[3]+H[4]+H[7]+J[15]+J[16]+J[19]+J[21]+J[24]+J[28], +H[2]+H[3]+
H[4]+H[7]+H[8]+J[15]+J[16]+J[19]+J[20]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[2]+H[3]+H[4]+H[6]+J[15]+
J[16]+J[18]+J[21]+J[23]+J[27], +H[2]+H[3]+H[4]+H[6]+H[8]+J[15]+J[16]+J[18]+J[20]+J[21]+J[23]+J[25]+J[27]+
J[29]+J[34], +H[2]+H[3]+H[4]+H[6]+H[7]+J[15]+J[16]+J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +
H[2]+H[3]+H[4]+H[6]+H[7]+H[8]+J[15]+J[16]+J[18]+J[19]+J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+
J[33]+J[34]+J[35], +H[2]+H[3]+H[4]+H[5]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26], +H[2]+H[3]+H[4]+H[5]+H[8]+
J[15]+J[16]+J[17]+J[20]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32], +H[2]+H[3]+H[4]+H[5]+H[7]+J[15]+J[16]+J[17]+
J[19]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[2]+H[3]+H[4]+H[5]+H[7]+H[8]+J[15]+J[16]+J[17]+J[19]+J[20]+
J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[2]+H[3]+H[4]+H[5]+H[6]+J[15]+J[16]+J[17]+
J[18]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[15]+J[16]+J[17]+J[18]+J[20]+
J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[15]+J[16]+
J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[2]+H[3]+H[4]+H[5]+
H[6]+H[7]+H[8]+J[15]+J[16]+J[17]+J[18]+J[19]+J[20]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+
J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+0, +H[1]+H[8]+J[14], +H[1]+H[7]+J[13], +H[1]+H[7]+H[8]+J[13]+
J[14]+J[35], +H[1]+H[6]+J[12], +H[1]+H[6]+H[8]+J[12]+J[14]+J[34], +H[1]+H[6]+H[7]+J[12]+J[13]+J[33], +
H[1]+H[6]+H[7]+H[8]+J[12]+J[13]+J[14]+J[33]+J[34]+J[35], +H[1]+H[5]+J[11], +H[1]+H[5]+H[8]+J[11]+J[14]+
J[32], +H[1]+H[5]+H[7]+J[11]+J[13]+J[31], +H[1]+H[5]+H[7]+H[8]+J[11]+J[13]+J[14]+J[31]+J[32]+J[35], +
H[1]+H[5]+H[6]+J[11]+J[12]+J[30], +H[1]+H[5]+H[6]+H[8]+J[11]+J[12]+J[14]+J[30]+J[32]+J[34], +H[1]+H[5]+
H[6]+H[7]+J[11]+J[12]+J[13]+J[30]+J[31]+J[33], +H[1]+H[5]+H[6]+H[7]+H[8]+J[11]+J[12]+J[13]+J[14]+J[30]+
J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+H[4]+J[10], +H[1]+H[4]+H[8]+J[10]+J[14]+J[29], +H[1]+H[4]+H[7]+
J[10]+J[13]+J[28], +H[1]+H[4]+H[7]+H[8]+J[10]+J[13]+J[14]+J[28]+J[29]+J[35], +H[1]+H[4]+H[6]+J[10]+J[12]+
J[27], +H[1]+H[4]+H[6]+H[8]+J[10]+J[12]+J[14]+J[27]+J[29]+J[34], +H[1]+H[4]+H[6]+H[7]+J[10]+J[12]+J[13]+
J[27]+J[28]+J[33], +H[1]+H[4]+H[6]+H[7]+H[8]+J[10]+J[12]+J[13]+J[14]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +
H[1]+H[4]+H[5]+J[10]+J[11]+J[26], +H[1]+H[4]+H[5]+H[8]+J[10]+J[11]+J[14]+J[26]+J[29]+J[32], +H[1]+H[4]+
H[5]+H[7]+J[10]+J[11]+J[13]+J[26]+J[28]+J[31], +H[1]+H[4]+H[5]+H[7]+H[8]+J[10]+J[11]+J[13]+J[14]+J[26]+
J[28]+J[29]+J[31]+J[32]+J[35], +H[1]+H[4]+H[5]+H[6]+J[10]+J[11]+J[12]+J[26]+J[27]+J[30], +H[1]+H[4]+
H[5]+H[6]+H[8]+J[10]+J[11]+J[12]+J[14]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[1]+H[4]+H[5]+H[6]+H[7]+
J[10]+J[11]+J[12]+J[13]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[1]+H[4]+H[5]+H[6]+H[7]+H[8]+J[10]+J[11]+
J[12]+J[13]+J[14]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+H[3]+J[9], +H[1]+
H[3]+H[8]+J[9]+J[14]+J[25], +H[1]+H[3]+H[7]+J[9]+J[13]+J[24], +H[1]+H[3]+H[7]+H[8]+J[9]+J[13]+J[14]+
J[24]+J[25]+J[35], +H[1]+H[3]+H[6]+J[9]+J[12]+J[23], +H[1]+H[3]+H[6]+H[8]+J[9]+J[12]+J[14]+J[23]+J[25]+
J[34], +H[1]+H[3]+H[6]+H[7]+J[9]+J[12]+J[13]+J[23]+J[24]+J[33], +H[1]+H[3]+H[6]+H[7]+H[8]+J[9]+J[12]+
J[13]+J[14]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[1]+H[3]+H[5]+J[9]+J[11]+J[22], +H[1]+H[3]+H[5]+H[8]+
J[9]+J[11]+J[14]+J[22]+J[25]+J[32], +H[1]+H[3]+H[5]+H[7]+J[9]+J[11]+J[13]+J[22]+J[24]+J[31], +H[1]+H[3]+
H[5]+H[7]+H[8]+J[9]+J[11]+J[13]+J[14]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[1]+H[3]+H[5]+H[6]+J[9]+
J[11]+J[12]+J[22]+J[23]+J[30], +H[1]+H[3]+H[5]+H[6]+H[8]+J[9]+J[11]+J[12]+J[14]+J[22]+J[23]+J[25]+J[30]+
J[32]+J[34], +H[1]+H[3]+H[5]+H[6]+H[7]+J[9]+J[11]+J[12]+J[13]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +
H[1]+H[3]+H[5]+H[6]+H[7]+H[8]+J[9]+J[11]+J[12]+J[13]+J[14]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+
J[33]+J[34]+J[35], +H[1]+H[3]+H[4]+J[9]+J[10]+J[21], +H[1]+H[3]+H[4]+H[8]+J[9]+J[10]+J[14]+J[21]+J[25]+
J[29], +H[1]+H[3]+H[4]+H[7]+J[9]+J[10]+J[13]+J[21]+J[24]+J[28], +H[1]+H[3]+H[4]+H[7]+H[8]+J[9]+J[10]+
J[13]+J[14]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[1]+H[3]+H[4]+H[6]+J[9]+J[10]+J[12]+J[21]+J[23]+J[27], +
H[1]+H[3]+H[4]+H[6]+H[8]+J[9]+J[10]+J[12]+J[14]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[1]+H[3]+H[4]+
H[6]+H[7]+J[9]+J[10]+J[12]+J[13]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[1]+H[3]+H[4]+H[6]+H[7]+H[8]+
J[9]+J[10]+J[12]+J[13]+J[14]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[1]+H[3]+
H[4]+H[5]+J[9]+J[10]+J[11]+J[21]+J[22]+J[26], +H[1]+H[3]+H[4]+H[5]+H[8]+J[9]+J[10]+J[11]+J[14]+J[21]+
J[22]+J[25]+J[26]+J[29]+J[32], +H[1]+H[3]+H[4]+H[5]+H[7]+J[9]+J[10]+J[11]+J[13]+J[21]+J[22]+J[24]+J[26]+
J[28]+J[31], +H[1]+H[3]+H[4]+H[5]+H[7]+H[8]+J[9]+J[10]+J[11]+J[13]+J[14]+J[21]+J[22]+J[24]+J[25]+J[26]+
J[28]+J[29]+J[31]+J[32]+J[35], +H[1]+H[3]+H[4]+H[5]+H[6]+J[9]+J[10]+J[11]+J[12]+J[21]+J[22]+J[23]+J[26]+
J[27]+J[30], +H[1]+H[3]+H[4]+H[5]+H[6]+H[8]+J[9]+J[10]+J[11]+J[12]+J[14]+J[21]+J[22]+J[23]+J[25]+J[26]+
J[27]+J[29]+J[30]+J[32]+J[34], +H[1]+H[3]+H[4]+H[5]+H[6]+H[7]+J[9]+J[10]+J[11]+J[12]+J[13]+J[21]+J[22]+
J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[1]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[9]+J[10]+J[11]+
J[12]+J[13]+J[14]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+
J[35], +H[1]+H[2]+J[8], +H[1]+H[2]+H[8]+J[8]+J[14]+J[20], +H[1]+H[2]+H[7]+J[8]+J[13]+J[19], +H[1]+H[2]+
H[7]+H[8]+J[8]+J[13]+J[14]+J[19]+J[20]+J[35], +H[1]+H[2]+H[6]+J[8]+J[12]+J[18], +H[1]+H[2]+H[6]+H[8]+
J[8]+J[12]+J[14]+J[18]+J[20]+J[34], +H[1]+H[2]+H[6]+H[7]+J[8]+J[12]+J[13]+J[18]+J[19]+J[33], +H[1]+H[2]+
H[6]+H[7]+H[8]+J[8]+J[12]+J[13]+J[14]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35], +H[1]+H[2]+H[5]+J[8]+J[11]+
J[17], +H[1]+H[2]+H[5]+H[8]+J[8]+J[11]+J[14]+J[17]+J[20]+J[32], +H[1]+H[2]+H[5]+H[7]+J[8]+J[11]+J[13]+
J[17]+J[19]+J[31], +H[1]+H[2]+H[5]+H[7]+H[8]+J[8]+J[11]+J[13]+J[14]+J[17]+J[19]+J[20]+J[31]+J[32]+J[35], +
H[1]+H[2]+H[5]+H[6]+J[8]+J[11]+J[12]+J[17]+J[18]+J[30], +H[1]+H[2]+H[5]+H[6]+H[8]+J[8]+J[11]+J[12]+J[14]+
J[17]+J[18]+J[20]+J[30]+J[32]+J[34], +H[1]+H[2]+H[5]+H[6]+H[7]+J[8]+J[11]+J[12]+J[13]+J[17]+J[18]+J[19]+
J[30]+J[31]+J[33], +H[1]+H[2]+H[5]+H[6]+H[7]+H[8]+J[8]+J[11]+J[12]+J[13]+J[14]+J[17]+J[18]+J[19]+J[20]+
J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+H[2]+H[4]+J[8]+J[10]+J[16], +H[1]+H[2]+H[4]+H[8]+J[8]+J[10]+
J[14]+J[16]+J[20]+J[29], +H[1]+H[2]+H[4]+H[7]+J[8]+J[10]+J[13]+J[16]+J[19]+J[28], +H[1]+H[2]+H[4]+H[7]+
H[8]+J[8]+J[10]+J[13]+J[14]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35], +H[1]+H[2]+H[4]+H[6]+J[8]+J[10]+J[12]+
J[16]+J[18]+J[27], +H[1]+H[2]+H[4]+H[6]+H[8]+J[8]+J[10]+J[12]+J[14]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34], +
H[1]+H[2]+H[4]+H[6]+H[7]+J[8]+J[10]+J[12]+J[13]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33], +H[1]+H[2]+H[4]+
H[6]+H[7]+H[8]+J[8]+J[10]+J[12]+J[13]+J[14]+J[16]+J[18]+J[19]+J[20]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +
H[1]+H[2]+H[4]+H[5]+J[8]+J[10]+J[11]+J[16]+J[17]+J[26], +H[1]+H[2]+H[4]+H[5]+H[8]+J[8]+J[10]+J[11]+J[14]+
J[16]+J[17]+J[20]+J[26]+J[29]+J[32], +H[1]+H[2]+H[4]+H[5]+H[7]+J[8]+J[10]+J[11]+J[13]+J[16]+J[17]+J[19]+
J[26]+J[28]+J[31], +H[1]+H[2]+H[4]+H[5]+H[7]+H[8]+J[8]+J[10]+J[11]+J[13]+J[14]+J[16]+J[17]+J[19]+J[20]+
J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[1]+H[2]+H[4]+H[5]+H[6]+J[8]+J[10]+J[11]+J[12]+J[16]+J[17]+J[18]+
J[26]+J[27]+J[30], +H[1]+H[2]+H[4]+H[5]+H[6]+H[8]+J[8]+J[10]+J[11]+J[12]+J[14]+J[16]+J[17]+J[18]+J[20]+
J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[1]+H[2]+H[4]+H[5]+H[6]+H[7]+J[8]+J[10]+J[11]+J[12]+J[13]+J[16]+
J[17]+J[18]+J[19]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[1]+H[2]+H[4]+H[5]+H[6]+H[7]+H[8]+J[8]+J[10]+
J[11]+J[12]+J[13]+J[14]+J[16]+J[17]+J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+
J[34]+J[35], +H[1]+H[2]+H[3]+J[8]+J[9]+J[15], +H[1]+H[2]+H[3]+H[8]+J[8]+J[9]+J[14]+J[15]+J[20]+J[25], +
H[1]+H[2]+H[3]+H[7]+J[8]+J[9]+J[13]+J[15]+J[19]+J[24], +H[1]+H[2]+H[3]+H[7]+H[8]+J[8]+J[9]+J[13]+J[14]+
J[15]+J[19]+J[20]+J[24]+J[25]+J[35], +H[1]+H[2]+H[3]+H[6]+J[8]+J[9]+J[12]+J[15]+J[18]+J[23], +H[1]+H[2]+
H[3]+H[6]+H[8]+J[8]+J[9]+J[12]+J[14]+J[15]+J[18]+J[20]+J[23]+J[25]+J[34], +H[1]+H[2]+H[3]+H[6]+H[7]+
J[8]+J[9]+J[12]+J[13]+J[15]+J[18]+J[19]+J[23]+J[24]+J[33], +H[1]+H[2]+H[3]+H[6]+H[7]+H[8]+J[8]+J[9]+
J[12]+J[13]+J[14]+J[15]+J[18]+J[19]+J[20]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[1]+H[2]+H[3]+H[5]+
J[8]+J[9]+J[11]+J[15]+J[17]+J[22], +H[1]+H[2]+H[3]+H[5]+H[8]+J[8]+J[9]+J[11]+J[14]+J[15]+J[17]+J[20]+
J[22]+J[25]+J[32], +H[1]+H[2]+H[3]+H[5]+H[7]+J[8]+J[9]+J[11]+J[13]+J[15]+J[17]+J[19]+J[22]+J[24]+J[31], +
H[1]+H[2]+H[3]+H[5]+H[7]+H[8]+J[8]+J[9]+J[11]+J[13]+J[14]+J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+
J[31]+J[32]+J[35], +H[1]+H[2]+H[3]+H[5]+H[6]+J[8]+J[9]+J[11]+J[12]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30], +
H[1]+H[2]+H[3]+H[5]+H[6]+H[8]+J[8]+J[9]+J[11]+J[12]+J[14]+J[15]+J[17]+J[18]+J[20]+J[22]+J[23]+J[25]+
J[30]+J[32]+J[34], +H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+J[8]+J[9]+J[11]+J[12]+J[13]+J[15]+J[17]+J[18]+J[19]+
J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+H[8]+J[8]+J[9]+J[11]+J[12]+J[13]+
J[14]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+
H[2]+H[3]+H[4]+J[8]+J[9]+J[10]+J[15]+J[16]+J[21], +H[1]+H[2]+H[3]+H[4]+H[8]+J[8]+J[9]+J[10]+J[14]+J[15]+
J[16]+J[20]+J[21]+J[25]+J[29], +H[1]+H[2]+H[3]+H[4]+H[7]+J[8]+J[9]+J[10]+J[13]+J[15]+J[16]+J[19]+J[21]+
J[24]+J[28], +H[1]+H[2]+H[3]+H[4]+H[7]+H[8]+J[8]+J[9]+J[10]+J[13]+J[14]+J[15]+J[16]+J[19]+J[20]+J[21]+
J[24]+J[25]+J[28]+J[29]+J[35], +H[1]+H[2]+H[3]+H[4]+H[6]+J[8]+J[9]+J[10]+J[12]+J[15]+J[16]+J[18]+J[21]+
J[23]+J[27], +H[1]+H[2]+H[3]+H[4]+H[6]+H[8]+J[8]+J[9]+J[10]+J[12]+J[14]+J[15]+J[16]+J[18]+J[20]+J[21]+
J[23]+J[25]+J[27]+J[29]+J[34], +H[1]+H[2]+H[3]+H[4]+H[6]+H[7]+J[8]+J[9]+J[10]+J[12]+J[13]+J[15]+J[16]+
J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[1]+H[2]+H[3]+H[4]+H[6]+H[7]+H[8]+J[8]+J[9]+J[10]+
J[12]+J[13]+J[14]+J[15]+J[16]+J[18]+J[19]+J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+
J[35], +H[1]+H[2]+H[3]+H[4]+H[5]+J[8]+J[9]+J[10]+J[11]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26], +H[1]+H[2]+
H[3]+H[4]+H[5]+H[8]+J[8]+J[9]+J[10]+J[11]+J[14]+J[15]+J[16]+J[17]+J[20]+J[21]+J[22]+J[25]+J[26]+J[29]+
J[32], +H[1]+H[2]+H[3]+H[4]+H[5]+H[7]+J[8]+J[9]+J[10]+J[11]+J[13]+J[15]+J[16]+J[17]+J[19]+J[21]+J[22]+
J[24]+J[26]+J[28]+J[31], +H[1]+H[2]+H[3]+H[4]+H[5]+H[7]+H[8]+J[8]+J[9]+J[10]+J[11]+J[13]+J[14]+J[15]+
J[16]+J[17]+J[19]+J[20]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[1]+H[2]+H[3]+
H[4]+H[5]+H[6]+J[8]+J[9]+J[10]+J[11]+J[12]+J[15]+J[16]+J[17]+J[18]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +
H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[8]+J[9]+J[10]+J[11]+J[12]+J[14]+J[15]+J[16]+J[17]+J[18]+J[20]+J[21]+
J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[8]+J[9]+
J[10]+J[11]+J[12]+J[13]+J[15]+J[16]+J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+
J[31]+J[33], +H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[8]+J[9]+J[10]+J[11]+J[12]+J[13]+J[14]+J[15]+
J[16]+J[17]+J[18]+J[19]+J[20]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+
J[33]+J[34]+J[35], +H[0]+0, +H[0]+H[8]+J[7], +H[0]+H[7]+J[6], +H[0]+H[7]+H[8]+J[6]+J[7]+J[35], +H[0]+
H[6]+J[5], +H[0]+H[6]+H[8]+J[5]+J[7]+J[34], +H[0]+H[6]+H[7]+J[5]+J[6]+J[33], +H[0]+H[6]+H[7]+H[8]+J[5]+
J[6]+J[7]+J[33]+J[34]+J[35], +H[0]+H[5]+J[4], +H[0]+H[5]+H[8]+J[4]+J[7]+J[32], +H[0]+H[5]+H[7]+J[4]+
J[6]+J[31], +H[0]+H[5]+H[7]+H[8]+J[4]+J[6]+J[7]+J[31]+J[32]+J[35], +H[0]+H[5]+H[6]+J[4]+J[5]+J[30], +
H[0]+H[5]+H[6]+H[8]+J[4]+J[5]+J[7]+J[30]+J[32]+J[34], +H[0]+H[5]+H[6]+H[7]+J[4]+J[5]+J[6]+J[30]+J[31]+
J[33], +H[0]+H[5]+H[6]+H[7]+H[8]+J[4]+J[5]+J[6]+J[7]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[4]+
J[3], +H[0]+H[4]+H[8]+J[3]+J[7]+J[29], +H[0]+H[4]+H[7]+J[3]+J[6]+J[28], +H[0]+H[4]+H[7]+H[8]+J[3]+J[6]+
J[7]+J[28]+J[29]+J[35], +H[0]+H[4]+H[6]+J[3]+J[5]+J[27], +H[0]+H[4]+H[6]+H[8]+J[3]+J[5]+J[7]+J[27]+J[29]+
J[34], +H[0]+H[4]+H[6]+H[7]+J[3]+J[5]+J[6]+J[27]+J[28]+J[33], +H[0]+H[4]+H[6]+H[7]+H[8]+J[3]+J[5]+J[6]+
J[7]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[4]+H[5]+J[3]+J[4]+J[26], +H[0]+H[4]+H[5]+H[8]+J[3]+
J[4]+J[7]+J[26]+J[29]+J[32], +H[0]+H[4]+H[5]+H[7]+J[3]+J[4]+J[6]+J[26]+J[28]+J[31], +H[0]+H[4]+H[5]+
H[7]+H[8]+J[3]+J[4]+J[6]+J[7]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[4]+H[5]+H[6]+J[3]+J[4]+J[5]+
J[26]+J[27]+J[30], +H[0]+H[4]+H[5]+H[6]+H[8]+J[3]+J[4]+J[5]+J[7]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +
H[0]+H[4]+H[5]+H[6]+H[7]+J[3]+J[4]+J[5]+J[6]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[4]+H[5]+H[6]+
H[7]+H[8]+J[3]+J[4]+J[5]+J[6]+J[7]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+
H[3]+J[2], +H[0]+H[3]+H[8]+J[2]+J[7]+J[25], +H[0]+H[3]+H[7]+J[2]+J[6]+J[24], +H[0]+H[3]+H[7]+H[8]+J[2]+
J[6]+J[7]+J[24]+J[25]+J[35], +H[0]+H[3]+H[6]+J[2]+J[5]+J[23], +H[0]+H[3]+H[6]+H[8]+J[2]+J[5]+J[7]+J[23]+
J[25]+J[34], +H[0]+H[3]+H[6]+H[7]+J[2]+J[5]+J[6]+J[23]+J[24]+J[33], +H[0]+H[3]+H[6]+H[7]+H[8]+J[2]+J[5]+
J[6]+J[7]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[0]+H[3]+H[5]+J[2]+J[4]+J[22], +H[0]+H[3]+H[5]+H[8]+
J[2]+J[4]+J[7]+J[22]+J[25]+J[32], +H[0]+H[3]+H[5]+H[7]+J[2]+J[4]+J[6]+J[22]+J[24]+J[31], +H[0]+H[3]+
H[5]+H[7]+H[8]+J[2]+J[4]+J[6]+J[7]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[0]+H[3]+H[5]+H[6]+J[2]+J[4]+
J[5]+J[22]+J[23]+J[30], +H[0]+H[3]+H[5]+H[6]+H[8]+J[2]+J[4]+J[5]+J[7]+J[22]+J[23]+J[25]+J[30]+J[32]+
J[34], +H[0]+H[3]+H[5]+H[6]+H[7]+J[2]+J[4]+J[5]+J[6]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +H[0]+H[3]+
H[5]+H[6]+H[7]+H[8]+J[2]+J[4]+J[5]+J[6]+J[7]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +
H[0]+H[3]+H[4]+J[2]+J[3]+J[21], +H[0]+H[3]+H[4]+H[8]+J[2]+J[3]+J[7]+J[21]+J[25]+J[29], +H[0]+H[3]+H[4]+
H[7]+J[2]+J[3]+J[6]+J[21]+J[24]+J[28], +H[0]+H[3]+H[4]+H[7]+H[8]+J[2]+J[3]+J[6]+J[7]+J[21]+J[24]+J[25]+
J[28]+J[29]+J[35], +H[0]+H[3]+H[4]+H[6]+J[2]+J[3]+J[5]+J[21]+J[23]+J[27], +H[0]+H[3]+H[4]+H[6]+H[8]+
J[2]+J[3]+J[5]+J[7]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[0]+H[3]+H[4]+H[6]+H[7]+J[2]+J[3]+J[5]+J[6]+
J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[0]+H[3]+H[4]+H[6]+H[7]+H[8]+J[2]+J[3]+J[5]+J[6]+J[7]+J[21]+J[23]+
J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[3]+H[4]+H[5]+J[2]+J[3]+J[4]+J[21]+J[22]+J[26], +
H[0]+H[3]+H[4]+H[5]+H[8]+J[2]+J[3]+J[4]+J[7]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32], +H[0]+H[3]+H[4]+H[5]+
H[7]+J[2]+J[3]+J[4]+J[6]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[0]+H[3]+H[4]+H[5]+H[7]+H[8]+J[2]+J[3]+
J[4]+J[6]+J[7]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[3]+H[4]+H[5]+H[6]+
J[2]+J[3]+J[4]+J[5]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[0]+H[3]+H[4]+H[5]+H[6]+H[8]+J[2]+J[3]+J[4]+
J[5]+J[7]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[3]+H[4]+H[5]+H[6]+H[7]+
J[2]+J[3]+J[4]+J[5]+J[6]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[3]+H[4]+
H[5]+H[6]+H[7]+H[8]+J[2]+J[3]+J[4]+J[5]+J[6]+J[7]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+
J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[2]+J[1], +H[0]+H[2]+H[8]+J[1]+J[7]+J[20], +H[0]+H[2]+H[7]+
J[1]+J[6]+J[19], +H[0]+H[2]+H[7]+H[8]+J[1]+J[6]+J[7]+J[19]+J[20]+J[35], +H[0]+H[2]+H[6]+J[1]+J[5]+J[18], +
H[0]+H[2]+H[6]+H[8]+J[1]+J[5]+J[7]+J[18]+J[20]+J[34], +H[0]+H[2]+H[6]+H[7]+J[1]+J[5]+J[6]+J[18]+J[19]+
J[33], +H[0]+H[2]+H[6]+H[7]+H[8]+J[1]+J[5]+J[6]+J[7]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35], +H[0]+H[2]+
H[5]+J[1]+J[4]+J[17], +H[0]+H[2]+H[5]+H[8]+J[1]+J[4]+J[7]+J[17]+J[20]+J[32], +H[0]+H[2]+H[5]+H[7]+J[1]+
J[4]+J[6]+J[17]+J[19]+J[31], +H[0]+H[2]+H[5]+H[7]+H[8]+J[1]+J[4]+J[6]+J[7]+J[17]+J[19]+J[20]+J[31]+J[32]+
J[35], +H[0]+H[2]+H[5]+H[6]+J[1]+J[4]+J[5]+J[17]+J[18]+J[30], +H[0]+H[2]+H[5]+H[6]+H[8]+J[1]+J[4]+J[5]+
J[7]+J[17]+J[18]+J[20]+J[30]+J[32]+J[34], +H[0]+H[2]+H[5]+H[6]+H[7]+J[1]+J[4]+J[5]+J[6]+J[17]+J[18]+
J[19]+J[30]+J[31]+J[33], +H[0]+H[2]+H[5]+H[6]+H[7]+H[8]+J[1]+J[4]+J[5]+J[6]+J[7]+J[17]+J[18]+J[19]+J[20]+
J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[2]+H[4]+J[1]+J[3]+J[16], +H[0]+H[2]+H[4]+H[8]+J[1]+J[3]+
J[7]+J[16]+J[20]+J[29], +H[0]+H[2]+H[4]+H[7]+J[1]+J[3]+J[6]+J[16]+J[19]+J[28], +H[0]+H[2]+H[4]+H[7]+
H[8]+J[1]+J[3]+J[6]+J[7]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35], +H[0]+H[2]+H[4]+H[6]+J[1]+J[3]+J[5]+J[16]+
J[18]+J[27], +H[0]+H[2]+H[4]+H[6]+H[8]+J[1]+J[3]+J[5]+J[7]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34], +H[0]+
H[2]+H[4]+H[6]+H[7]+J[1]+J[3]+J[5]+J[6]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33], +H[0]+H[2]+H[4]+H[6]+H[7]+
H[8]+J[1]+J[3]+J[5]+J[6]+J[7]+J[16]+J[18]+J[19]+J[20]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[2]+
H[4]+H[5]+J[1]+J[3]+J[4]+J[16]+J[17]+J[26], +H[0]+H[2]+H[4]+H[5]+H[8]+J[1]+J[3]+J[4]+J[7]+J[16]+J[17]+
J[20]+J[26]+J[29]+J[32], +H[0]+H[2]+H[4]+H[5]+H[7]+J[1]+J[3]+J[4]+J[6]+J[16]+J[17]+J[19]+J[26]+J[28]+
J[31], +H[0]+H[2]+H[4]+H[5]+H[7]+H[8]+J[1]+J[3]+J[4]+J[6]+J[7]+J[16]+J[17]+J[19]+J[20]+J[26]+J[28]+J[29]+
J[31]+J[32]+J[35], +H[0]+H[2]+H[4]+H[5]+H[6]+J[1]+J[3]+J[4]+J[5]+J[16]+J[17]+J[18]+J[26]+J[27]+J[30], +
H[0]+H[2]+H[4]+H[5]+H[6]+H[8]+J[1]+J[3]+J[4]+J[5]+J[7]+J[16]+J[17]+J[18]+J[20]+J[26]+J[27]+J[29]+J[30]+
J[32]+J[34], +H[0]+H[2]+H[4]+H[5]+H[6]+H[7]+J[1]+J[3]+J[4]+J[5]+J[6]+J[16]+J[17]+J[18]+J[19]+J[26]+J[27]+
J[28]+J[30]+J[31]+J[33], +H[0]+H[2]+H[4]+H[5]+H[6]+H[7]+H[8]+J[1]+J[3]+J[4]+J[5]+J[6]+J[7]+J[16]+J[17]+
J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[2]+H[3]+J[1]+
J[2]+J[15], +H[0]+H[2]+H[3]+H[8]+J[1]+J[2]+J[7]+J[15]+J[20]+J[25], +H[0]+H[2]+H[3]+H[7]+J[1]+J[2]+J[6]+
J[15]+J[19]+J[24], +H[0]+H[2]+H[3]+H[7]+H[8]+J[1]+J[2]+J[6]+J[7]+J[15]+J[19]+J[20]+J[24]+J[25]+J[35], +
H[0]+H[2]+H[3]+H[6]+J[1]+J[2]+J[5]+J[15]+J[18]+J[23], +H[0]+H[2]+H[3]+H[6]+H[8]+J[1]+J[2]+J[5]+J[7]+
J[15]+J[18]+J[20]+J[23]+J[25]+J[34], +H[0]+H[2]+H[3]+H[6]+H[7]+J[1]+J[2]+J[5]+J[6]+J[15]+J[18]+J[19]+
J[23]+J[24]+J[33], +H[0]+H[2]+H[3]+H[6]+H[7]+H[8]+J[1]+J[2]+J[5]+J[6]+J[7]+J[15]+J[18]+J[19]+J[20]+J[23]+
J[24]+J[25]+J[33]+J[34]+J[35], +H[0]+H[2]+H[3]+H[5]+J[1]+J[2]+J[4]+J[15]+J[17]+J[22], +H[0]+H[2]+H[3]+
H[5]+H[8]+J[1]+J[2]+J[4]+J[7]+J[15]+J[17]+J[20]+J[22]+J[25]+J[32], +H[0]+H[2]+H[3]+H[5]+H[7]+J[1]+J[2]+
J[4]+J[6]+J[15]+J[17]+J[19]+J[22]+J[24]+J[31], +H[0]+H[2]+H[3]+H[5]+H[7]+H[8]+J[1]+J[2]+J[4]+J[6]+J[7]+
J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[0]+H[2]+H[3]+H[5]+H[6]+J[1]+J[2]+J[4]+
J[5]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30], +H[0]+H[2]+H[3]+H[5]+H[6]+H[8]+J[1]+J[2]+J[4]+J[5]+J[7]+J[15]+
J[17]+J[18]+J[20]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34], +H[0]+H[2]+H[3]+H[5]+H[6]+H[7]+J[1]+J[2]+J[4]+
J[5]+J[6]+J[15]+J[17]+J[18]+J[19]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +H[0]+H[2]+H[3]+H[5]+H[6]+H[7]+
H[8]+J[1]+J[2]+J[4]+J[5]+J[6]+J[7]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+
J[32]+J[33]+J[34]+J[35], +H[0]+H[2]+H[3]+H[4]+J[1]+J[2]+J[3]+J[15]+J[16]+J[21], +H[0]+H[2]+H[3]+H[4]+
H[8]+J[1]+J[2]+J[3]+J[7]+J[15]+J[16]+J[20]+J[21]+J[25]+J[29], +H[0]+H[2]+H[3]+H[4]+H[7]+J[1]+J[2]+J[3]+
J[6]+J[15]+J[16]+J[19]+J[21]+J[24]+J[28], +H[0]+H[2]+H[3]+H[4]+H[7]+H[8]+J[1]+J[2]+J[3]+J[6]+J[7]+J[15]+
J[16]+J[19]+J[20]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[0]+H[2]+H[3]+H[4]+H[6]+J[1]+J[2]+J[3]+J[5]+
J[15]+J[16]+J[18]+J[21]+J[23]+J[27], +H[0]+H[2]+H[3]+H[4]+H[6]+H[8]+J[1]+J[2]+J[3]+J[5]+J[7]+J[15]+J[16]+
J[18]+J[20]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[0]+H[2]+H[3]+H[4]+H[6]+H[7]+J[1]+J[2]+J[3]+J[5]+
J[6]+J[15]+J[16]+J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[0]+H[2]+H[3]+H[4]+H[6]+H[7]+H[8]+
J[1]+J[2]+J[3]+J[5]+J[6]+J[7]+J[15]+J[16]+J[18]+J[19]+J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+
J[33]+J[34]+J[35], +H[0]+H[2]+H[3]+H[4]+H[5]+J[1]+J[2]+J[3]+J[4]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26], +
H[0]+H[2]+H[3]+H[4]+H[5]+H[8]+J[1]+J[2]+J[3]+J[4]+J[7]+J[15]+J[16]+J[17]+J[20]+J[21]+J[22]+J[25]+J[26]+
J[29]+J[32], +H[0]+H[2]+H[3]+H[4]+H[5]+H[7]+J[1]+J[2]+J[3]+J[4]+J[6]+J[15]+J[16]+J[17]+J[19]+J[21]+J[22]+
J[24]+J[26]+J[28]+J[31], +H[0]+H[2]+H[3]+H[4]+H[5]+H[7]+H[8]+J[1]+J[2]+J[3]+J[4]+J[6]+J[7]+J[15]+J[16]+
J[17]+J[19]+J[20]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[2]+H[3]+H[4]+
H[5]+H[6]+J[1]+J[2]+J[3]+J[4]+J[5]+J[15]+J[16]+J[17]+J[18]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[0]+
H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[1]+J[2]+J[3]+J[4]+J[5]+J[7]+J[15]+J[16]+J[17]+J[18]+J[20]+J[21]+J[22]+
J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[1]+J[2]+J[3]+
J[4]+J[5]+J[6]+J[15]+J[16]+J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +
H[0]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[1]+J[2]+J[3]+J[4]+J[5]+J[6]+J[7]+J[15]+J[16]+J[17]+J[18]+J[19]+
J[20]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+
H[1]+J[0], +H[0]+H[1]+H[8]+J[0]+J[7]+J[14], +H[0]+H[1]+H[7]+J[0]+J[6]+J[13], +H[0]+H[1]+H[7]+H[8]+J[0]+
J[6]+J[7]+J[13]+J[14]+J[35], +H[0]+H[1]+H[6]+J[0]+J[5]+J[12], +H[0]+H[1]+H[6]+H[8]+J[0]+J[5]+J[7]+J[12]+
J[14]+J[34], +H[0]+H[1]+H[6]+H[7]+J[0]+J[5]+J[6]+J[12]+J[13]+J[33], +H[0]+H[1]+H[6]+H[7]+H[8]+J[0]+J[5]+
J[6]+J[7]+J[12]+J[13]+J[14]+J[33]+J[34]+J[35], +H[0]+H[1]+H[5]+J[0]+J[4]+J[11], +H[0]+H[1]+H[5]+H[8]+
J[0]+J[4]+J[7]+J[11]+J[14]+J[32], +H[0]+H[1]+H[5]+H[7]+J[0]+J[4]+J[6]+J[11]+J[13]+J[31], +H[0]+H[1]+
H[5]+H[7]+H[8]+J[0]+J[4]+J[6]+J[7]+J[11]+J[13]+J[14]+J[31]+J[32]+J[35], +H[0]+H[1]+H[5]+H[6]+J[0]+J[4]+
J[5]+J[11]+J[12]+J[30], +H[0]+H[1]+H[5]+H[6]+H[8]+J[0]+J[4]+J[5]+J[7]+J[11]+J[12]+J[14]+J[30]+J[32]+
J[34], +H[0]+H[1]+H[5]+H[6]+H[7]+J[0]+J[4]+J[5]+J[6]+J[11]+J[12]+J[13]+J[30]+J[31]+J[33], +H[0]+H[1]+
H[5]+H[6]+H[7]+H[8]+J[0]+J[4]+J[5]+J[6]+J[7]+J[11]+J[12]+J[13]+J[14]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +
H[0]+H[1]+H[4]+J[0]+J[3]+J[10], +H[0]+H[1]+H[4]+H[8]+J[0]+J[3]+J[7]+J[10]+J[14]+J[29], +H[0]+H[1]+H[4]+
H[7]+J[0]+J[3]+J[6]+J[10]+J[13]+J[28], +H[0]+H[1]+H[4]+H[7]+H[8]+J[0]+J[3]+J[6]+J[7]+J[10]+J[13]+J[14]+
J[28]+J[29]+J[35], +H[0]+H[1]+H[4]+H[6]+J[0]+J[3]+J[5]+J[10]+J[12]+J[27], +H[0]+H[1]+H[4]+H[6]+H[8]+
J[0]+J[3]+J[5]+J[7]+J[10]+J[12]+J[14]+J[27]+J[29]+J[34], +H[0]+H[1]+H[4]+H[6]+H[7]+J[0]+J[3]+J[5]+J[6]+
J[10]+J[12]+J[13]+J[27]+J[28]+J[33], +H[0]+H[1]+H[4]+H[6]+H[7]+H[8]+J[0]+J[3]+J[5]+J[6]+J[7]+J[10]+J[12]+
J[13]+J[14]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[1]+H[4]+H[5]+J[0]+J[3]+J[4]+J[10]+J[11]+J[26], +
H[0]+H[1]+H[4]+H[5]+H[8]+J[0]+J[3]+J[4]+J[7]+J[10]+J[11]+J[14]+J[26]+J[29]+J[32], +H[0]+H[1]+H[4]+H[5]+
H[7]+J[0]+J[3]+J[4]+J[6]+J[10]+J[11]+J[13]+J[26]+J[28]+J[31], +H[0]+H[1]+H[4]+H[5]+H[7]+H[8]+J[0]+J[3]+
J[4]+J[6]+J[7]+J[10]+J[11]+J[13]+J[14]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[1]+H[4]+H[5]+H[6]+
J[0]+J[3]+J[4]+J[5]+J[10]+J[11]+J[12]+J[26]+J[27]+J[30], +H[0]+H[1]+H[4]+H[5]+H[6]+H[8]+J[0]+J[3]+J[4]+
J[5]+J[7]+J[10]+J[11]+J[12]+J[14]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[1]+H[4]+H[5]+H[6]+H[7]+
J[0]+J[3]+J[4]+J[5]+J[6]+J[10]+J[11]+J[12]+J[13]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[1]+H[4]+
H[5]+H[6]+H[7]+H[8]+J[0]+J[3]+J[4]+J[5]+J[6]+J[7]+J[10]+J[11]+J[12]+J[13]+J[14]+J[26]+J[27]+J[28]+J[29]+
J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[3]+J[0]+J[2]+J[9], +H[0]+H[1]+H[3]+H[8]+J[0]+J[2]+
J[7]+J[9]+J[14]+J[25], +H[0]+H[1]+H[3]+H[7]+J[0]+J[2]+J[6]+J[9]+J[13]+J[24], +H[0]+H[1]+H[3]+H[7]+H[8]+
J[0]+J[2]+J[6]+J[7]+J[9]+J[13]+J[14]+J[24]+J[25]+J[35], +H[0]+H[1]+H[3]+H[6]+J[0]+J[2]+J[5]+J[9]+J[12]+
J[23], +H[0]+H[1]+H[3]+H[6]+H[8]+J[0]+J[2]+J[5]+J[7]+J[9]+J[12]+J[14]+J[23]+J[25]+J[34], +H[0]+H[1]+
H[3]+H[6]+H[7]+J[0]+J[2]+J[5]+J[6]+J[9]+J[12]+J[13]+J[23]+J[24]+J[33], +H[0]+H[1]+H[3]+H[6]+H[7]+H[8]+
J[0]+J[2]+J[5]+J[6]+J[7]+J[9]+J[12]+J[13]+J[14]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[0]+H[1]+H[3]+
H[5]+J[0]+J[2]+J[4]+J[9]+J[11]+J[22], +H[0]+H[1]+H[3]+H[5]+H[8]+J[0]+J[2]+J[4]+J[7]+J[9]+J[11]+J[14]+
J[22]+J[25]+J[32], +H[0]+H[1]+H[3]+H[5]+H[7]+J[0]+J[2]+J[4]+J[6]+J[9]+J[11]+J[13]+J[22]+J[24]+J[31], +
H[0]+H[1]+H[3]+H[5]+H[7]+H[8]+J[0]+J[2]+J[4]+J[6]+J[7]+J[9]+J[11]+J[13]+J[14]+J[22]+J[24]+J[25]+J[31]+
J[32]+J[35], +H[0]+H[1]+H[3]+H[5]+H[6]+J[0]+J[2]+J[4]+J[5]+J[9]+J[11]+J[12]+J[22]+J[23]+J[30], +H[0]+
H[1]+H[3]+H[5]+H[6]+H[8]+J[0]+J[2]+J[4]+J[5]+J[7]+J[9]+J[11]+J[12]+J[14]+J[22]+J[23]+J[25]+J[30]+J[32]+
J[34], +H[0]+H[1]+H[3]+H[5]+H[6]+H[7]+J[0]+J[2]+J[4]+J[5]+J[6]+J[9]+J[11]+J[12]+J[13]+J[22]+J[23]+J[24]+
J[30]+J[31]+J[33], +H[0]+H[1]+H[3]+H[5]+H[6]+H[7]+H[8]+J[0]+J[2]+J[4]+J[5]+J[6]+J[7]+J[9]+J[11]+J[12]+
J[13]+J[14]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[3]+H[4]+J[0]+J[2]+
J[3]+J[9]+J[10]+J[21], +H[0]+H[1]+H[3]+H[4]+H[8]+J[0]+J[2]+J[3]+J[7]+J[9]+J[10]+J[14]+J[21]+J[25]+J[29], +
H[0]+H[1]+H[3]+H[4]+H[7]+J[0]+J[2]+J[3]+J[6]+J[9]+J[10]+J[13]+J[21]+J[24]+J[28], +H[0]+H[1]+H[3]+H[4]+
H[7]+H[8]+J[0]+J[2]+J[3]+J[6]+J[7]+J[9]+J[10]+J[13]+J[14]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[0]+
H[1]+H[3]+H[4]+H[6]+J[0]+J[2]+J[3]+J[5]+J[9]+J[10]+J[12]+J[21]+J[23]+J[27], +H[0]+H[1]+H[3]+H[4]+H[6]+
H[8]+J[0]+J[2]+J[3]+J[5]+J[7]+J[9]+J[10]+J[12]+J[14]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[0]+H[1]+
H[3]+H[4]+H[6]+H[7]+J[0]+J[2]+J[3]+J[5]+J[6]+J[9]+J[10]+J[12]+J[13]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +
H[0]+H[1]+H[3]+H[4]+H[6]+H[7]+H[8]+J[0]+J[2]+J[3]+J[5]+J[6]+J[7]+J[9]+J[10]+J[12]+J[13]+J[14]+J[21]+
J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[1]+H[3]+H[4]+H[5]+J[0]+J[2]+J[3]+J[4]+
J[9]+J[10]+J[11]+J[21]+J[22]+J[26], +H[0]+H[1]+H[3]+H[4]+H[5]+H[8]+J[0]+J[2]+J[3]+J[4]+J[7]+J[9]+J[10]+
J[11]+J[14]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32], +H[0]+H[1]+H[3]+H[4]+H[5]+H[7]+J[0]+J[2]+J[3]+J[4]+
J[6]+J[9]+J[10]+J[11]+J[13]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[0]+H[1]+H[3]+H[4]+H[5]+H[7]+H[8]+
J[0]+J[2]+J[3]+J[4]+J[6]+J[7]+J[9]+J[10]+J[11]+J[13]+J[14]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+
J[31]+J[32]+J[35], +H[0]+H[1]+H[3]+H[4]+H[5]+H[6]+J[0]+J[2]+J[3]+J[4]+J[5]+J[9]+J[10]+J[11]+J[12]+J[21]+
J[22]+J[23]+J[26]+J[27]+J[30], +H[0]+H[1]+H[3]+H[4]+H[5]+H[6]+H[8]+J[0]+J[2]+J[3]+J[4]+J[5]+J[7]+J[9]+
J[10]+J[11]+J[12]+J[14]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[1]+H[3]+
H[4]+H[5]+H[6]+H[7]+J[0]+J[2]+J[3]+J[4]+J[5]+J[6]+J[9]+J[10]+J[11]+J[12]+J[13]+J[21]+J[22]+J[23]+J[24]+
J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[1]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[0]+J[2]+J[3]+J[4]+J[5]+
J[6]+J[7]+J[9]+J[10]+J[11]+J[12]+J[13]+J[14]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+
J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+J[0]+J[1]+J[8], +H[0]+H[1]+H[2]+H[8]+J[0]+J[1]+J[7]+J[8]+
J[14]+J[20], +H[0]+H[1]+H[2]+H[7]+J[0]+J[1]+J[6]+J[8]+J[13]+J[19], +H[0]+H[1]+H[2]+H[7]+H[8]+J[0]+J[1]+
J[6]+J[7]+J[8]+J[13]+J[14]+J[19]+J[20]+J[35], +H[0]+H[1]+H[2]+H[6]+J[0]+J[1]+J[5]+J[8]+J[12]+J[18], +
H[0]+H[1]+H[2]+H[6]+H[8]+J[0]+J[1]+J[5]+J[7]+J[8]+J[12]+J[14]+J[18]+J[20]+J[34], +H[0]+H[1]+H[2]+H[6]+
H[7]+J[0]+J[1]+J[5]+J[6]+J[8]+J[12]+J[13]+J[18]+J[19]+J[33], +H[0]+H[1]+H[2]+H[6]+H[7]+H[8]+J[0]+J[1]+
J[5]+J[6]+J[7]+J[8]+J[12]+J[13]+J[14]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[5]+J[0]+
J[1]+J[4]+J[8]+J[11]+J[17], +H[0]+H[1]+H[2]+H[5]+H[8]+J[0]+J[1]+J[4]+J[7]+J[8]+J[11]+J[14]+J[17]+J[20]+
J[32], +H[0]+H[1]+H[2]+H[5]+H[7]+J[0]+J[1]+J[4]+J[6]+J[8]+J[11]+J[13]+J[17]+J[19]+J[31], +H[0]+H[1]+
H[2]+H[5]+H[7]+H[8]+J[0]+J[1]+J[4]+J[6]+J[7]+J[8]+J[11]+J[13]+J[14]+J[17]+J[19]+J[20]+J[31]+J[32]+J[35], +
H[0]+H[1]+H[2]+H[5]+H[6]+J[0]+J[1]+J[4]+J[5]+J[8]+J[11]+J[12]+J[17]+J[18]+J[30], +H[0]+H[1]+H[2]+H[5]+
H[6]+H[8]+J[0]+J[1]+J[4]+J[5]+J[7]+J[8]+J[11]+J[12]+J[14]+J[17]+J[18]+J[20]+J[30]+J[32]+J[34], +H[0]+
H[1]+H[2]+H[5]+H[6]+H[7]+J[0]+J[1]+J[4]+J[5]+J[6]+J[8]+J[11]+J[12]+J[13]+J[17]+J[18]+J[19]+J[30]+J[31]+
J[33], +H[0]+H[1]+H[2]+H[5]+H[6]+H[7]+H[8]+J[0]+J[1]+J[4]+J[5]+J[6]+J[7]+J[8]+J[11]+J[12]+J[13]+J[14]+
J[17]+J[18]+J[19]+J[20]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[4]+J[0]+J[1]+J[3]+J[8]+
J[10]+J[16], +H[0]+H[1]+H[2]+H[4]+H[8]+J[0]+J[1]+J[3]+J[7]+J[8]+J[10]+J[14]+J[16]+J[20]+J[29], +H[0]+
H[1]+H[2]+H[4]+H[7]+J[0]+J[1]+J[3]+J[6]+J[8]+J[10]+J[13]+J[16]+J[19]+J[28], +H[0]+H[1]+H[2]+H[4]+H[7]+
H[8]+J[0]+J[1]+J[3]+J[6]+J[7]+J[8]+J[10]+J[13]+J[14]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35], +H[0]+H[1]+
H[2]+H[4]+H[6]+J[0]+J[1]+J[3]+J[5]+J[8]+J[10]+J[12]+J[16]+J[18]+J[27], +H[0]+H[1]+H[2]+H[4]+H[6]+H[8]+
J[0]+J[1]+J[3]+J[5]+J[7]+J[8]+J[10]+J[12]+J[14]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34], +H[0]+H[1]+H[2]+
H[4]+H[6]+H[7]+J[0]+J[1]+J[3]+J[5]+J[6]+J[8]+J[10]+J[12]+J[13]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33], +
H[0]+H[1]+H[2]+H[4]+H[6]+H[7]+H[8]+J[0]+J[1]+J[3]+J[5]+J[6]+J[7]+J[8]+J[10]+J[12]+J[13]+J[14]+J[16]+
J[18]+J[19]+J[20]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[4]+H[5]+J[0]+J[1]+J[3]+J[4]+
J[8]+J[10]+J[11]+J[16]+J[17]+J[26], +H[0]+H[1]+H[2]+H[4]+H[5]+H[8]+J[0]+J[1]+J[3]+J[4]+J[7]+J[8]+J[10]+
J[11]+J[14]+J[16]+J[17]+J[20]+J[26]+J[29]+J[32], +H[0]+H[1]+H[2]+H[4]+H[5]+H[7]+J[0]+J[1]+J[3]+J[4]+
J[6]+J[8]+J[10]+J[11]+J[13]+J[16]+J[17]+J[19]+J[26]+J[28]+J[31], +H[0]+H[1]+H[2]+H[4]+H[5]+H[7]+H[8]+
J[0]+J[1]+J[3]+J[4]+J[6]+J[7]+J[8]+J[10]+J[11]+J[13]+J[14]+J[16]+J[17]+J[19]+J[20]+J[26]+J[28]+J[29]+
J[31]+J[32]+J[35], +H[0]+H[1]+H[2]+H[4]+H[5]+H[6]+J[0]+J[1]+J[3]+J[4]+J[5]+J[8]+J[10]+J[11]+J[12]+J[16]+
J[17]+J[18]+J[26]+J[27]+J[30], +H[0]+H[1]+H[2]+H[4]+H[5]+H[6]+H[8]+J[0]+J[1]+J[3]+J[4]+J[5]+J[7]+J[8]+
J[10]+J[11]+J[12]+J[14]+J[16]+J[17]+J[18]+J[20]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[1]+H[2]+
H[4]+H[5]+H[6]+H[7]+J[0]+J[1]+J[3]+J[4]+J[5]+J[6]+J[8]+J[10]+J[11]+J[12]+J[13]+J[16]+J[17]+J[18]+J[19]+
J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[1]+H[2]+H[4]+H[5]+H[6]+H[7]+H[8]+J[0]+J[1]+J[3]+J[4]+J[5]+
J[6]+J[7]+J[8]+J[10]+J[11]+J[12]+J[13]+J[14]+J[16]+J[17]+J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+
J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[3]+J[0]+J[1]+J[2]+J[8]+J[9]+J[15], +H[0]+H[1]+H[2]+
H[3]+H[8]+J[0]+J[1]+J[2]+J[7]+J[8]+J[9]+J[14]+J[15]+J[20]+J[25], +H[0]+H[1]+H[2]+H[3]+H[7]+J[0]+J[1]+
J[2]+J[6]+J[8]+J[9]+J[13]+J[15]+J[19]+J[24], +H[0]+H[1]+H[2]+H[3]+H[7]+H[8]+J[0]+J[1]+J[2]+J[6]+J[7]+
J[8]+J[9]+J[13]+J[14]+J[15]+J[19]+J[20]+J[24]+J[25]+J[35], +H[0]+H[1]+H[2]+H[3]+H[6]+J[0]+J[1]+J[2]+
J[5]+J[8]+J[9]+J[12]+J[15]+J[18]+J[23], +H[0]+H[1]+H[2]+H[3]+H[6]+H[8]+J[0]+J[1]+J[2]+J[5]+J[7]+J[8]+
J[9]+J[12]+J[14]+J[15]+J[18]+J[20]+J[23]+J[25]+J[34], +H[0]+H[1]+H[2]+H[3]+H[6]+H[7]+J[0]+J[1]+J[2]+
J[5]+J[6]+J[8]+J[9]+J[12]+J[13]+J[15]+J[18]+J[19]+J[23]+J[24]+J[33], +H[0]+H[1]+H[2]+H[3]+H[6]+H[7]+
H[8]+J[0]+J[1]+J[2]+J[5]+J[6]+J[7]+J[8]+J[9]+J[12]+J[13]+J[14]+J[15]+J[18]+J[19]+J[20]+J[23]+J[24]+J[25]+
J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[3]+H[5]+J[0]+J[1]+J[2]+J[4]+J[8]+J[9]+J[11]+J[15]+J[17]+J[22], +
H[0]+H[1]+H[2]+H[3]+H[5]+H[8]+J[0]+J[1]+J[2]+J[4]+J[7]+J[8]+J[9]+J[11]+J[14]+J[15]+J[17]+J[20]+J[22]+
J[25]+J[32], +H[0]+H[1]+H[2]+H[3]+H[5]+H[7]+J[0]+J[1]+J[2]+J[4]+J[6]+J[8]+J[9]+J[11]+J[13]+J[15]+J[17]+
J[19]+J[22]+J[24]+J[31], +H[0]+H[1]+H[2]+H[3]+H[5]+H[7]+H[8]+J[0]+J[1]+J[2]+J[4]+J[6]+J[7]+J[8]+J[9]+
J[11]+J[13]+J[14]+J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[0]+H[1]+H[2]+H[3]+
H[5]+H[6]+J[0]+J[1]+J[2]+J[4]+J[5]+J[8]+J[9]+J[11]+J[12]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30], +H[0]+
H[1]+H[2]+H[3]+H[5]+H[6]+H[8]+J[0]+J[1]+J[2]+J[4]+J[5]+J[7]+J[8]+J[9]+J[11]+J[12]+J[14]+J[15]+J[17]+
J[18]+J[20]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34], +H[0]+H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+J[0]+J[1]+J[2]+
J[4]+J[5]+J[6]+J[8]+J[9]+J[11]+J[12]+J[13]+J[15]+J[17]+J[18]+J[19]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +
H[0]+H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+H[8]+J[0]+J[1]+J[2]+J[4]+J[5]+J[6]+J[7]+J[8]+J[9]+J[11]+J[12]+J[13]+
J[14]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+
H[1]+H[2]+H[3]+H[4]+J[0]+J[1]+J[2]+J[3]+J[8]+J[9]+J[10]+J[15]+J[16]+J[21], +H[0]+H[1]+H[2]+H[3]+H[4]+
H[8]+J[0]+J[1]+J[2]+J[3]+J[7]+J[8]+J[9]+J[10]+J[14]+J[15]+J[16]+J[20]+J[21]+J[25]+J[29], +H[0]+H[1]+
H[2]+H[3]+H[4]+H[7]+J[0]+J[1]+J[2]+J[3]+J[6]+J[8]+J[9]+J[10]+J[13]+J[15]+J[16]+J[19]+J[21]+J[24]+J[28], +
H[0]+H[1]+H[2]+H[3]+H[4]+H[7]+H[8]+J[0]+J[1]+J[2]+J[3]+J[6]+J[7]+J[8]+J[9]+J[10]+J[13]+J[14]+J[15]+J[16]+
J[19]+J[20]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[0]+H[1]+H[2]+H[3]+H[4]+H[6]+J[0]+J[1]+J[2]+J[3]+
J[5]+J[8]+J[9]+J[10]+J[12]+J[15]+J[16]+J[18]+J[21]+J[23]+J[27], +H[0]+H[1]+H[2]+H[3]+H[4]+H[6]+H[8]+
J[0]+J[1]+J[2]+J[3]+J[5]+J[7]+J[8]+J[9]+J[10]+J[12]+J[14]+J[15]+J[16]+J[18]+J[20]+J[21]+J[23]+J[25]+
J[27]+J[29]+J[34], +H[0]+H[1]+H[2]+H[3]+H[4]+H[6]+H[7]+J[0]+J[1]+J[2]+J[3]+J[5]+J[6]+J[8]+J[9]+J[10]+
J[12]+J[13]+J[15]+J[16]+J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[0]+H[1]+H[2]+H[3]+H[4]+H[6]+
H[7]+H[8]+J[0]+J[1]+J[2]+J[3]+J[5]+J[6]+J[7]+J[8]+J[9]+J[10]+J[12]+J[13]+J[14]+J[15]+J[16]+J[18]+J[19]+
J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+J[0]+
J[1]+J[2]+J[3]+J[4]+J[8]+J[9]+J[10]+J[11]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26], +H[0]+H[1]+H[2]+H[3]+
H[4]+H[5]+H[8]+J[0]+J[1]+J[2]+J[3]+J[4]+J[7]+J[8]+J[9]+J[10]+J[11]+J[14]+J[15]+J[16]+J[17]+J[20]+J[21]+
J[22]+J[25]+J[26]+J[29]+J[32], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+H[7]+J[0]+J[1]+J[2]+J[3]+J[4]+J[6]+J[8]+
J[9]+J[10]+J[11]+J[13]+J[15]+J[16]+J[17]+J[19]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[0]+H[1]+H[2]+
H[3]+H[4]+H[5]+H[7]+H[8]+J[0]+J[1]+J[2]+J[3]+J[4]+J[6]+J[7]+J[8]+J[9]+J[10]+J[11]+J[13]+J[14]+J[15]+
J[16]+J[17]+J[19]+J[20]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[1]+H[2]+
H[3]+H[4]+H[5]+H[6]+J[0]+J[1]+J[2]+J[3]+J[4]+J[5]+J[8]+J[9]+J[10]+J[11]+J[12]+J[15]+J[16]+J[17]+J[18]+
J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[0]+J[1]+J[2]+J[3]+J[4]+
J[5]+J[7]+J[8]+J[9]+J[10]+J[11]+J[12]+J[14]+J[15]+J[16]+J[17]+J[18]+J[20]+J[21]+J[22]+J[23]+J[25]+J[26]+
J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[0]+J[1]+J[2]+J[3]+J[4]+J[5]+
J[6]+J[8]+J[9]+J[10]+J[11]+J[12]+J[13]+J[15]+J[16]+J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+
J[28]+J[30]+J[31]+J[33], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[0]+J[1]+J[2]+J[3]+J[4]+J[5]+
J[6]+J[7]+J[8]+J[9]+J[10]+J[11]+J[12]+J[13]+J[14]+J[15]+J[16]+J[17]+J[18]+J[19]+J[20]+J[21]+J[22]+J[23]+
J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35],])
logZ = fast_logsumexp(energyTerms)[0]
Pout[0] = exp( +0 - logZ )
Pout[1] = exp( +H[8]+0 - logZ )
Pout[2] = exp( +H[7]+0 - logZ )
Pout[3] = exp( +H[7]+H[8]+J[35] - logZ )
Pout[4] = exp( +H[6]+0 - logZ )
Pout[5] = exp( +H[6]+H[8]+J[34] - logZ )
Pout[6] = exp( +H[6]+H[7]+J[33] - logZ )
Pout[7] = exp( +H[6]+H[7]+H[8]+J[33]+J[34]+J[35] - logZ )
Pout[8] = exp( +H[5]+0 - logZ )
Pout[9] = exp( +H[5]+H[8]+J[32] - logZ )
Pout[10] = exp( +H[5]+H[7]+J[31] - logZ )
Pout[11] = exp( +H[5]+H[7]+H[8]+J[31]+J[32]+J[35] - logZ )
Pout[12] = exp( +H[5]+H[6]+J[30] - logZ )
Pout[13] = exp( +H[5]+H[6]+H[8]+J[30]+J[32]+J[34] - logZ )
Pout[14] = exp( +H[5]+H[6]+H[7]+J[30]+J[31]+J[33] - logZ )
Pout[15] = exp( +H[5]+H[6]+H[7]+H[8]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[16] = exp( +H[4]+0 - logZ )
Pout[17] = exp( +H[4]+H[8]+J[29] - logZ )
Pout[18] = exp( +H[4]+H[7]+J[28] - logZ )
Pout[19] = exp( +H[4]+H[7]+H[8]+J[28]+J[29]+J[35] - logZ )
Pout[20] = exp( +H[4]+H[6]+J[27] - logZ )
Pout[21] = exp( +H[4]+H[6]+H[8]+J[27]+J[29]+J[34] - logZ )
Pout[22] = exp( +H[4]+H[6]+H[7]+J[27]+J[28]+J[33] - logZ )
Pout[23] = exp( +H[4]+H[6]+H[7]+H[8]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[24] = exp( +H[4]+H[5]+J[26] - logZ )
Pout[25] = exp( +H[4]+H[5]+H[8]+J[26]+J[29]+J[32] - logZ )
Pout[26] = exp( +H[4]+H[5]+H[7]+J[26]+J[28]+J[31] - logZ )
Pout[27] = exp( +H[4]+H[5]+H[7]+H[8]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[28] = exp( +H[4]+H[5]+H[6]+J[26]+J[27]+J[30] - logZ )
Pout[29] = exp( +H[4]+H[5]+H[6]+H[8]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[30] = exp( +H[4]+H[5]+H[6]+H[7]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[31] = exp( +H[4]+H[5]+H[6]+H[7]+H[8]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[32] = exp( +H[3]+0 - logZ )
Pout[33] = exp( +H[3]+H[8]+J[25] - logZ )
Pout[34] = exp( +H[3]+H[7]+J[24] - logZ )
Pout[35] = exp( +H[3]+H[7]+H[8]+J[24]+J[25]+J[35] - logZ )
Pout[36] = exp( +H[3]+H[6]+J[23] - logZ )
Pout[37] = exp( +H[3]+H[6]+H[8]+J[23]+J[25]+J[34] - logZ )
Pout[38] = exp( +H[3]+H[6]+H[7]+J[23]+J[24]+J[33] - logZ )
Pout[39] = exp( +H[3]+H[6]+H[7]+H[8]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35] - logZ )
Pout[40] = exp( +H[3]+H[5]+J[22] - logZ )
Pout[41] = exp( +H[3]+H[5]+H[8]+J[22]+J[25]+J[32] - logZ )
Pout[42] = exp( +H[3]+H[5]+H[7]+J[22]+J[24]+J[31] - logZ )
Pout[43] = exp( +H[3]+H[5]+H[7]+H[8]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35] - logZ )
Pout[44] = exp( +H[3]+H[5]+H[6]+J[22]+J[23]+J[30] - logZ )
Pout[45] = exp( +H[3]+H[5]+H[6]+H[8]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34] - logZ )
Pout[46] = exp( +H[3]+H[5]+H[6]+H[7]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33] - logZ )
Pout[47] = exp( +H[3]+H[5]+H[6]+H[7]+H[8]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[48] = exp( +H[3]+H[4]+J[21] - logZ )
Pout[49] = exp( +H[3]+H[4]+H[8]+J[21]+J[25]+J[29] - logZ )
Pout[50] = exp( +H[3]+H[4]+H[7]+J[21]+J[24]+J[28] - logZ )
Pout[51] = exp( +H[3]+H[4]+H[7]+H[8]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35] - logZ )
Pout[52] = exp( +H[3]+H[4]+H[6]+J[21]+J[23]+J[27] - logZ )
Pout[53] = exp( +H[3]+H[4]+H[6]+H[8]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34] - logZ )
Pout[54] = exp( +H[3]+H[4]+H[6]+H[7]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33] - logZ )
Pout[55] = exp( +H[3]+H[4]+H[6]+H[7]+H[8]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[56] = exp( +H[3]+H[4]+H[5]+J[21]+J[22]+J[26] - logZ )
Pout[57] = exp( +H[3]+H[4]+H[5]+H[8]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32] - logZ )
Pout[58] = exp( +H[3]+H[4]+H[5]+H[7]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31] - logZ )
Pout[59] = exp( +H[3]+H[4]+H[5]+H[7]+H[8]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[60] = exp( +H[3]+H[4]+H[5]+H[6]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30] - logZ )
Pout[61] = exp( +H[3]+H[4]+H[5]+H[6]+H[8]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[62] = exp( +H[3]+H[4]+H[5]+H[6]+H[7]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[63] = exp( +H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[64] = exp( +H[2]+0 - logZ )
Pout[65] = exp( +H[2]+H[8]+J[20] - logZ )
Pout[66] = exp( +H[2]+H[7]+J[19] - logZ )
Pout[67] = exp( +H[2]+H[7]+H[8]+J[19]+J[20]+J[35] - logZ )
Pout[68] = exp( +H[2]+H[6]+J[18] - logZ )
Pout[69] = exp( +H[2]+H[6]+H[8]+J[18]+J[20]+J[34] - logZ )
Pout[70] = exp( +H[2]+H[6]+H[7]+J[18]+J[19]+J[33] - logZ )
Pout[71] = exp( +H[2]+H[6]+H[7]+H[8]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35] - logZ )
Pout[72] = exp( +H[2]+H[5]+J[17] - logZ )
Pout[73] = exp( +H[2]+H[5]+H[8]+J[17]+J[20]+J[32] - logZ )
Pout[74] = exp( +H[2]+H[5]+H[7]+J[17]+J[19]+J[31] - logZ )
Pout[75] = exp( +H[2]+H[5]+H[7]+H[8]+J[17]+J[19]+J[20]+J[31]+J[32]+J[35] - logZ )
Pout[76] = exp( +H[2]+H[5]+H[6]+J[17]+J[18]+J[30] - logZ )
Pout[77] = exp( +H[2]+H[5]+H[6]+H[8]+J[17]+J[18]+J[20]+J[30]+J[32]+J[34] - logZ )
Pout[78] = exp( +H[2]+H[5]+H[6]+H[7]+J[17]+J[18]+J[19]+J[30]+J[31]+J[33] - logZ )
Pout[79] = exp( +H[2]+H[5]+H[6]+H[7]+H[8]+J[17]+J[18]+J[19]+J[20]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[80] = exp( +H[2]+H[4]+J[16] - logZ )
Pout[81] = exp( +H[2]+H[4]+H[8]+J[16]+J[20]+J[29] - logZ )
Pout[82] = exp( +H[2]+H[4]+H[7]+J[16]+J[19]+J[28] - logZ )
Pout[83] = exp( +H[2]+H[4]+H[7]+H[8]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35] - logZ )
Pout[84] = exp( +H[2]+H[4]+H[6]+J[16]+J[18]+J[27] - logZ )
Pout[85] = exp( +H[2]+H[4]+H[6]+H[8]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34] - logZ )
Pout[86] = exp( +H[2]+H[4]+H[6]+H[7]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33] - logZ )
Pout[87] = exp( +H[2]+H[4]+H[6]+H[7]+H[8]+J[16]+J[18]+J[19]+J[20]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[88] = exp( +H[2]+H[4]+H[5]+J[16]+J[17]+J[26] - logZ )
Pout[89] = exp( +H[2]+H[4]+H[5]+H[8]+J[16]+J[17]+J[20]+J[26]+J[29]+J[32] - logZ )
Pout[90] = exp( +H[2]+H[4]+H[5]+H[7]+J[16]+J[17]+J[19]+J[26]+J[28]+J[31] - logZ )
Pout[91] = exp( +H[2]+H[4]+H[5]+H[7]+H[8]+J[16]+J[17]+J[19]+J[20]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[92] = exp( +H[2]+H[4]+H[5]+H[6]+J[16]+J[17]+J[18]+J[26]+J[27]+J[30] - logZ )
Pout[93] = exp( +H[2]+H[4]+H[5]+H[6]+H[8]+J[16]+J[17]+J[18]+J[20]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[94] = exp( +H[2]+H[4]+H[5]+H[6]+H[7]+J[16]+J[17]+J[18]+J[19]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[95] = exp( +H[2]+H[4]+H[5]+H[6]+H[7]+H[8]+J[16]+J[17]+J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[96] = exp( +H[2]+H[3]+J[15] - logZ )
Pout[97] = exp( +H[2]+H[3]+H[8]+J[15]+J[20]+J[25] - logZ )
Pout[98] = exp( +H[2]+H[3]+H[7]+J[15]+J[19]+J[24] - logZ )
Pout[99] = exp( +H[2]+H[3]+H[7]+H[8]+J[15]+J[19]+J[20]+J[24]+J[25]+J[35] - logZ )
Pout[100] = exp( +H[2]+H[3]+H[6]+J[15]+J[18]+J[23] - logZ )
Pout[101] = exp( +H[2]+H[3]+H[6]+H[8]+J[15]+J[18]+J[20]+J[23]+J[25]+J[34] - logZ )
Pout[102] = exp( +H[2]+H[3]+H[6]+H[7]+J[15]+J[18]+J[19]+J[23]+J[24]+J[33] - logZ )
Pout[103] = exp( +H[2]+H[3]+H[6]+H[7]+H[8]+J[15]+J[18]+J[19]+J[20]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35] - logZ )
Pout[104] = exp( +H[2]+H[3]+H[5]+J[15]+J[17]+J[22] - logZ )
Pout[105] = exp( +H[2]+H[3]+H[5]+H[8]+J[15]+J[17]+J[20]+J[22]+J[25]+J[32] - logZ )
Pout[106] = exp( +H[2]+H[3]+H[5]+H[7]+J[15]+J[17]+J[19]+J[22]+J[24]+J[31] - logZ )
Pout[107] = exp( +H[2]+H[3]+H[5]+H[7]+H[8]+J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35] - logZ )
Pout[108] = exp( +H[2]+H[3]+H[5]+H[6]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30] - logZ )
Pout[109] = exp( +H[2]+H[3]+H[5]+H[6]+H[8]+J[15]+J[17]+J[18]+J[20]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34] - logZ )
Pout[110] = exp( +H[2]+H[3]+H[5]+H[6]+H[7]+J[15]+J[17]+J[18]+J[19]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33] - logZ )
Pout[111] = exp( +H[2]+H[3]+H[5]+H[6]+H[7]+H[8]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[112] = exp( +H[2]+H[3]+H[4]+J[15]+J[16]+J[21] - logZ )
Pout[113] = exp( +H[2]+H[3]+H[4]+H[8]+J[15]+J[16]+J[20]+J[21]+J[25]+J[29] - logZ )
Pout[114] = exp( +H[2]+H[3]+H[4]+H[7]+J[15]+J[16]+J[19]+J[21]+J[24]+J[28] - logZ )
Pout[115] = exp( +H[2]+H[3]+H[4]+H[7]+H[8]+J[15]+J[16]+J[19]+J[20]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35] - logZ )
Pout[116] = exp( +H[2]+H[3]+H[4]+H[6]+J[15]+J[16]+J[18]+J[21]+J[23]+J[27] - logZ )
Pout[117] = exp( +H[2]+H[3]+H[4]+H[6]+H[8]+J[15]+J[16]+J[18]+J[20]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34] - logZ )
Pout[118] = exp( +H[2]+H[3]+H[4]+H[6]+H[7]+J[15]+J[16]+J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33] - logZ )
Pout[119] = exp( +H[2]+H[3]+H[4]+H[6]+H[7]+H[8]+J[15]+J[16]+J[18]+J[19]+J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[120] = exp( +H[2]+H[3]+H[4]+H[5]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26] - logZ )
Pout[121] = exp( +H[2]+H[3]+H[4]+H[5]+H[8]+J[15]+J[16]+J[17]+J[20]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32] - logZ )
Pout[122] = exp( +H[2]+H[3]+H[4]+H[5]+H[7]+J[15]+J[16]+J[17]+J[19]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31] - logZ )
Pout[123] = exp( +H[2]+H[3]+H[4]+H[5]+H[7]+H[8]+J[15]+J[16]+J[17]+J[19]+J[20]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[124] = exp( +H[2]+H[3]+H[4]+H[5]+H[6]+J[15]+J[16]+J[17]+J[18]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30] - logZ )
Pout[125] = exp( +H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[15]+J[16]+J[17]+J[18]+J[20]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[126] = exp( +H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[15]+J[16]+J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[127] = exp( +H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[15]+J[16]+J[17]+J[18]+J[19]+J[20]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[128] = exp( +H[1]+0 - logZ )
Pout[129] = exp( +H[1]+H[8]+J[14] - logZ )
Pout[130] = exp( +H[1]+H[7]+J[13] - logZ )
Pout[131] = exp( +H[1]+H[7]+H[8]+J[13]+J[14]+J[35] - logZ )
Pout[132] = exp( +H[1]+H[6]+J[12] - logZ )
Pout[133] = exp( +H[1]+H[6]+H[8]+J[12]+J[14]+J[34] - logZ )
Pout[134] = exp( +H[1]+H[6]+H[7]+J[12]+J[13]+J[33] - logZ )
Pout[135] = exp( +H[1]+H[6]+H[7]+H[8]+J[12]+J[13]+J[14]+J[33]+J[34]+J[35] - logZ )
Pout[136] = exp( +H[1]+H[5]+J[11] - logZ )
Pout[137] = exp( +H[1]+H[5]+H[8]+J[11]+J[14]+J[32] - logZ )
Pout[138] = exp( +H[1]+H[5]+H[7]+J[11]+J[13]+J[31] - logZ )
Pout[139] = exp( +H[1]+H[5]+H[7]+H[8]+J[11]+J[13]+J[14]+J[31]+J[32]+J[35] - logZ )
Pout[140] = exp( +H[1]+H[5]+H[6]+J[11]+J[12]+J[30] - logZ )
Pout[141] = exp( +H[1]+H[5]+H[6]+H[8]+J[11]+J[12]+J[14]+J[30]+J[32]+J[34] - logZ )
Pout[142] = exp( +H[1]+H[5]+H[6]+H[7]+J[11]+J[12]+J[13]+J[30]+J[31]+J[33] - logZ )
Pout[143] = exp( +H[1]+H[5]+H[6]+H[7]+H[8]+J[11]+J[12]+J[13]+J[14]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[144] = exp( +H[1]+H[4]+J[10] - logZ )
Pout[145] = exp( +H[1]+H[4]+H[8]+J[10]+J[14]+J[29] - logZ )
Pout[146] = exp( +H[1]+H[4]+H[7]+J[10]+J[13]+J[28] - logZ )
Pout[147] = exp( +H[1]+H[4]+H[7]+H[8]+J[10]+J[13]+J[14]+J[28]+J[29]+J[35] - logZ )
Pout[148] = exp( +H[1]+H[4]+H[6]+J[10]+J[12]+J[27] - logZ )
Pout[149] = exp( +H[1]+H[4]+H[6]+H[8]+J[10]+J[12]+J[14]+J[27]+J[29]+J[34] - logZ )
Pout[150] = exp( +H[1]+H[4]+H[6]+H[7]+J[10]+J[12]+J[13]+J[27]+J[28]+J[33] - logZ )
Pout[151] = exp( +H[1]+H[4]+H[6]+H[7]+H[8]+J[10]+J[12]+J[13]+J[14]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[152] = exp( +H[1]+H[4]+H[5]+J[10]+J[11]+J[26] - logZ )
Pout[153] = exp( +H[1]+H[4]+H[5]+H[8]+J[10]+J[11]+J[14]+J[26]+J[29]+J[32] - logZ )
Pout[154] = exp( +H[1]+H[4]+H[5]+H[7]+J[10]+J[11]+J[13]+J[26]+J[28]+J[31] - logZ )
Pout[155] = exp( +H[1]+H[4]+H[5]+H[7]+H[8]+J[10]+J[11]+J[13]+J[14]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[156] = exp( +H[1]+H[4]+H[5]+H[6]+J[10]+J[11]+J[12]+J[26]+J[27]+J[30] - logZ )
Pout[157] = exp( +H[1]+H[4]+H[5]+H[6]+H[8]+J[10]+J[11]+J[12]+J[14]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[158] = exp( +H[1]+H[4]+H[5]+H[6]+H[7]+J[10]+J[11]+J[12]+J[13]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[159] = exp( +H[1]+H[4]+H[5]+H[6]+H[7]+H[8]+J[10]+J[11]+J[12]+J[13]+J[14]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[160] = exp( +H[1]+H[3]+J[9] - logZ )
Pout[161] = exp( +H[1]+H[3]+H[8]+J[9]+J[14]+J[25] - logZ )
Pout[162] = exp( +H[1]+H[3]+H[7]+J[9]+J[13]+J[24] - logZ )
Pout[163] = exp( +H[1]+H[3]+H[7]+H[8]+J[9]+J[13]+J[14]+J[24]+J[25]+J[35] - logZ )
Pout[164] = exp( +H[1]+H[3]+H[6]+J[9]+J[12]+J[23] - logZ )
Pout[165] = exp( +H[1]+H[3]+H[6]+H[8]+J[9]+J[12]+J[14]+J[23]+J[25]+J[34] - logZ )
Pout[166] = exp( +H[1]+H[3]+H[6]+H[7]+J[9]+J[12]+J[13]+J[23]+J[24]+J[33] - logZ )
Pout[167] = exp( +H[1]+H[3]+H[6]+H[7]+H[8]+J[9]+J[12]+J[13]+J[14]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35] - logZ )
Pout[168] = exp( +H[1]+H[3]+H[5]+J[9]+J[11]+J[22] - logZ )
Pout[169] = exp( +H[1]+H[3]+H[5]+H[8]+J[9]+J[11]+J[14]+J[22]+J[25]+J[32] - logZ )
Pout[170] = exp( +H[1]+H[3]+H[5]+H[7]+J[9]+J[11]+J[13]+J[22]+J[24]+J[31] - logZ )
Pout[171] = exp( +H[1]+H[3]+H[5]+H[7]+H[8]+J[9]+J[11]+J[13]+J[14]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35] - logZ )
Pout[172] = exp( +H[1]+H[3]+H[5]+H[6]+J[9]+J[11]+J[12]+J[22]+J[23]+J[30] - logZ )
Pout[173] = exp( +H[1]+H[3]+H[5]+H[6]+H[8]+J[9]+J[11]+J[12]+J[14]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34] - logZ )
Pout[174] = exp( +H[1]+H[3]+H[5]+H[6]+H[7]+J[9]+J[11]+J[12]+J[13]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33] - logZ )
Pout[175] = exp( +H[1]+H[3]+H[5]+H[6]+H[7]+H[8]+J[9]+J[11]+J[12]+J[13]+J[14]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[176] = exp( +H[1]+H[3]+H[4]+J[9]+J[10]+J[21] - logZ )
Pout[177] = exp( +H[1]+H[3]+H[4]+H[8]+J[9]+J[10]+J[14]+J[21]+J[25]+J[29] - logZ )
Pout[178] = exp( +H[1]+H[3]+H[4]+H[7]+J[9]+J[10]+J[13]+J[21]+J[24]+J[28] - logZ )
Pout[179] = exp( +H[1]+H[3]+H[4]+H[7]+H[8]+J[9]+J[10]+J[13]+J[14]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35] - logZ )
Pout[180] = exp( +H[1]+H[3]+H[4]+H[6]+J[9]+J[10]+J[12]+J[21]+J[23]+J[27] - logZ )
Pout[181] = exp( +H[1]+H[3]+H[4]+H[6]+H[8]+J[9]+J[10]+J[12]+J[14]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34] - logZ )
Pout[182] = exp( +H[1]+H[3]+H[4]+H[6]+H[7]+J[9]+J[10]+J[12]+J[13]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33] - logZ )
Pout[183] = exp( +H[1]+H[3]+H[4]+H[6]+H[7]+H[8]+J[9]+J[10]+J[12]+J[13]+J[14]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[184] = exp( +H[1]+H[3]+H[4]+H[5]+J[9]+J[10]+J[11]+J[21]+J[22]+J[26] - logZ )
Pout[185] = exp( +H[1]+H[3]+H[4]+H[5]+H[8]+J[9]+J[10]+J[11]+J[14]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32] - logZ )
Pout[186] = exp( +H[1]+H[3]+H[4]+H[5]+H[7]+J[9]+J[10]+J[11]+J[13]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31] - logZ )
Pout[187] = exp( +H[1]+H[3]+H[4]+H[5]+H[7]+H[8]+J[9]+J[10]+J[11]+J[13]+J[14]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[188] = exp( +H[1]+H[3]+H[4]+H[5]+H[6]+J[9]+J[10]+J[11]+J[12]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30] - logZ )
Pout[189] = exp( +H[1]+H[3]+H[4]+H[5]+H[6]+H[8]+J[9]+J[10]+J[11]+J[12]+J[14]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[190] = exp( +H[1]+H[3]+H[4]+H[5]+H[6]+H[7]+J[9]+J[10]+J[11]+J[12]+J[13]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[191] = exp( +H[1]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[9]+J[10]+J[11]+J[12]+J[13]+J[14]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[192] = exp( +H[1]+H[2]+J[8] - logZ )
Pout[193] = exp( +H[1]+H[2]+H[8]+J[8]+J[14]+J[20] - logZ )
Pout[194] = exp( +H[1]+H[2]+H[7]+J[8]+J[13]+J[19] - logZ )
Pout[195] = exp( +H[1]+H[2]+H[7]+H[8]+J[8]+J[13]+J[14]+J[19]+J[20]+J[35] - logZ )
Pout[196] = exp( +H[1]+H[2]+H[6]+J[8]+J[12]+J[18] - logZ )
Pout[197] = exp( +H[1]+H[2]+H[6]+H[8]+J[8]+J[12]+J[14]+J[18]+J[20]+J[34] - logZ )
Pout[198] = exp( +H[1]+H[2]+H[6]+H[7]+J[8]+J[12]+J[13]+J[18]+J[19]+J[33] - logZ )
Pout[199] = exp( +H[1]+H[2]+H[6]+H[7]+H[8]+J[8]+J[12]+J[13]+J[14]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35] - logZ )
Pout[200] = exp( +H[1]+H[2]+H[5]+J[8]+J[11]+J[17] - logZ )
Pout[201] = exp( +H[1]+H[2]+H[5]+H[8]+J[8]+J[11]+J[14]+J[17]+J[20]+J[32] - logZ )
Pout[202] = exp( +H[1]+H[2]+H[5]+H[7]+J[8]+J[11]+J[13]+J[17]+J[19]+J[31] - logZ )
Pout[203] = exp( +H[1]+H[2]+H[5]+H[7]+H[8]+J[8]+J[11]+J[13]+J[14]+J[17]+J[19]+J[20]+J[31]+J[32]+J[35] - logZ )
Pout[204] = exp( +H[1]+H[2]+H[5]+H[6]+J[8]+J[11]+J[12]+J[17]+J[18]+J[30] - logZ )
Pout[205] = exp( +H[1]+H[2]+H[5]+H[6]+H[8]+J[8]+J[11]+J[12]+J[14]+J[17]+J[18]+J[20]+J[30]+J[32]+J[34] - logZ )
Pout[206] = exp( +H[1]+H[2]+H[5]+H[6]+H[7]+J[8]+J[11]+J[12]+J[13]+J[17]+J[18]+J[19]+J[30]+J[31]+J[33] - logZ )
Pout[207] = exp( +H[1]+H[2]+H[5]+H[6]+H[7]+H[8]+J[8]+J[11]+J[12]+J[13]+J[14]+J[17]+J[18]+J[19]+J[20]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[208] = exp( +H[1]+H[2]+H[4]+J[8]+J[10]+J[16] - logZ )
Pout[209] = exp( +H[1]+H[2]+H[4]+H[8]+J[8]+J[10]+J[14]+J[16]+J[20]+J[29] - logZ )
Pout[210] = exp( +H[1]+H[2]+H[4]+H[7]+J[8]+J[10]+J[13]+J[16]+J[19]+J[28] - logZ )
Pout[211] = exp( +H[1]+H[2]+H[4]+H[7]+H[8]+J[8]+J[10]+J[13]+J[14]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35] - logZ )
Pout[212] = exp( +H[1]+H[2]+H[4]+H[6]+J[8]+J[10]+J[12]+J[16]+J[18]+J[27] - logZ )
Pout[213] = exp( +H[1]+H[2]+H[4]+H[6]+H[8]+J[8]+J[10]+J[12]+J[14]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34] - logZ )
Pout[214] = exp( +H[1]+H[2]+H[4]+H[6]+H[7]+J[8]+J[10]+J[12]+J[13]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33] - logZ )
Pout[215] = exp( +H[1]+H[2]+H[4]+H[6]+H[7]+H[8]+J[8]+J[10]+J[12]+J[13]+J[14]+J[16]+J[18]+J[19]+J[20]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[216] = exp( +H[1]+H[2]+H[4]+H[5]+J[8]+J[10]+J[11]+J[16]+J[17]+J[26] - logZ )
Pout[217] = exp( +H[1]+H[2]+H[4]+H[5]+H[8]+J[8]+J[10]+J[11]+J[14]+J[16]+J[17]+J[20]+J[26]+J[29]+J[32] - logZ )
Pout[218] = exp( +H[1]+H[2]+H[4]+H[5]+H[7]+J[8]+J[10]+J[11]+J[13]+J[16]+J[17]+J[19]+J[26]+J[28]+J[31] - logZ )
Pout[219] = exp( +H[1]+H[2]+H[4]+H[5]+H[7]+H[8]+J[8]+J[10]+J[11]+J[13]+J[14]+J[16]+J[17]+J[19]+J[20]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[220] = exp( +H[1]+H[2]+H[4]+H[5]+H[6]+J[8]+J[10]+J[11]+J[12]+J[16]+J[17]+J[18]+J[26]+J[27]+J[30] - logZ )
Pout[221] = exp( +H[1]+H[2]+H[4]+H[5]+H[6]+H[8]+J[8]+J[10]+J[11]+J[12]+J[14]+J[16]+J[17]+J[18]+J[20]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[222] = exp( +H[1]+H[2]+H[4]+H[5]+H[6]+H[7]+J[8]+J[10]+J[11]+J[12]+J[13]+J[16]+J[17]+J[18]+J[19]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[223] = exp( +H[1]+H[2]+H[4]+H[5]+H[6]+H[7]+H[8]+J[8]+J[10]+J[11]+J[12]+J[13]+J[14]+J[16]+J[17]+J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[224] = exp( +H[1]+H[2]+H[3]+J[8]+J[9]+J[15] - logZ )
Pout[225] = exp( +H[1]+H[2]+H[3]+H[8]+J[8]+J[9]+J[14]+J[15]+J[20]+J[25] - logZ )
Pout[226] = exp( +H[1]+H[2]+H[3]+H[7]+J[8]+J[9]+J[13]+J[15]+J[19]+J[24] - logZ )
Pout[227] = exp( +H[1]+H[2]+H[3]+H[7]+H[8]+J[8]+J[9]+J[13]+J[14]+J[15]+J[19]+J[20]+J[24]+J[25]+J[35] - logZ )
Pout[228] = exp( +H[1]+H[2]+H[3]+H[6]+J[8]+J[9]+J[12]+J[15]+J[18]+J[23] - logZ )
Pout[229] = exp( +H[1]+H[2]+H[3]+H[6]+H[8]+J[8]+J[9]+J[12]+J[14]+J[15]+J[18]+J[20]+J[23]+J[25]+J[34] - logZ )
Pout[230] = exp( +H[1]+H[2]+H[3]+H[6]+H[7]+J[8]+J[9]+J[12]+J[13]+J[15]+J[18]+J[19]+J[23]+J[24]+J[33] - logZ )
Pout[231] = exp( +H[1]+H[2]+H[3]+H[6]+H[7]+H[8]+J[8]+J[9]+J[12]+J[13]+J[14]+J[15]+J[18]+J[19]+J[20]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35] - logZ )
Pout[232] = exp( +H[1]+H[2]+H[3]+H[5]+J[8]+J[9]+J[11]+J[15]+J[17]+J[22] - logZ )
Pout[233] = exp( +H[1]+H[2]+H[3]+H[5]+H[8]+J[8]+J[9]+J[11]+J[14]+J[15]+J[17]+J[20]+J[22]+J[25]+J[32] - logZ )
Pout[234] = exp( +H[1]+H[2]+H[3]+H[5]+H[7]+J[8]+J[9]+J[11]+J[13]+J[15]+J[17]+J[19]+J[22]+J[24]+J[31] - logZ )
Pout[235] = exp( +H[1]+H[2]+H[3]+H[5]+H[7]+H[8]+J[8]+J[9]+J[11]+J[13]+J[14]+J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35] - logZ )
Pout[236] = exp( +H[1]+H[2]+H[3]+H[5]+H[6]+J[8]+J[9]+J[11]+J[12]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30] - logZ )
Pout[237] = exp( +H[1]+H[2]+H[3]+H[5]+H[6]+H[8]+J[8]+J[9]+J[11]+J[12]+J[14]+J[15]+J[17]+J[18]+J[20]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34] - logZ )
Pout[238] = exp( +H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+J[8]+J[9]+J[11]+J[12]+J[13]+J[15]+J[17]+J[18]+J[19]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33] - logZ )
Pout[239] = exp( +H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+H[8]+J[8]+J[9]+J[11]+J[12]+J[13]+J[14]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[240] = exp( +H[1]+H[2]+H[3]+H[4]+J[8]+J[9]+J[10]+J[15]+J[16]+J[21] - logZ )
Pout[241] = exp( +H[1]+H[2]+H[3]+H[4]+H[8]+J[8]+J[9]+J[10]+J[14]+J[15]+J[16]+J[20]+J[21]+J[25]+J[29] - logZ )
Pout[242] = exp( +H[1]+H[2]+H[3]+H[4]+H[7]+J[8]+J[9]+J[10]+J[13]+J[15]+J[16]+J[19]+J[21]+J[24]+J[28] - logZ )
Pout[243] = exp( +H[1]+H[2]+H[3]+H[4]+H[7]+H[8]+J[8]+J[9]+J[10]+J[13]+J[14]+J[15]+J[16]+J[19]+J[20]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35] - logZ )
Pout[244] = exp( +H[1]+H[2]+H[3]+H[4]+H[6]+J[8]+J[9]+J[10]+J[12]+J[15]+J[16]+J[18]+J[21]+J[23]+J[27] - logZ )
Pout[245] = exp( +H[1]+H[2]+H[3]+H[4]+H[6]+H[8]+J[8]+J[9]+J[10]+J[12]+J[14]+J[15]+J[16]+J[18]+J[20]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34] - logZ )
Pout[246] = exp( +H[1]+H[2]+H[3]+H[4]+H[6]+H[7]+J[8]+J[9]+J[10]+J[12]+J[13]+J[15]+J[16]+J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33] - logZ )
Pout[247] = exp( +H[1]+H[2]+H[3]+H[4]+H[6]+H[7]+H[8]+J[8]+J[9]+J[10]+J[12]+J[13]+J[14]+J[15]+J[16]+J[18]+J[19]+J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[248] = exp( +H[1]+H[2]+H[3]+H[4]+H[5]+J[8]+J[9]+J[10]+J[11]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26] - logZ )
Pout[249] = exp( +H[1]+H[2]+H[3]+H[4]+H[5]+H[8]+J[8]+J[9]+J[10]+J[11]+J[14]+J[15]+J[16]+J[17]+J[20]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32] - logZ )
Pout[250] = exp( +H[1]+H[2]+H[3]+H[4]+H[5]+H[7]+J[8]+J[9]+J[10]+J[11]+J[13]+J[15]+J[16]+J[17]+J[19]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31] - logZ )
Pout[251] = exp( +H[1]+H[2]+H[3]+H[4]+H[5]+H[7]+H[8]+J[8]+J[9]+J[10]+J[11]+J[13]+J[14]+J[15]+J[16]+J[17]+J[19]+J[20]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[252] = exp( +H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+J[8]+J[9]+J[10]+J[11]+J[12]+J[15]+J[16]+J[17]+J[18]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30] - logZ )
Pout[253] = exp( +H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[8]+J[9]+J[10]+J[11]+J[12]+J[14]+J[15]+J[16]+J[17]+J[18]+J[20]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[254] = exp( +H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[8]+J[9]+J[10]+J[11]+J[12]+J[13]+J[15]+J[16]+J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[255] = exp( +H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[8]+J[9]+J[10]+J[11]+J[12]+J[13]+J[14]+J[15]+J[16]+J[17]+J[18]+J[19]+J[20]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[256] = exp( +H[0]+0 - logZ )
Pout[257] = exp( +H[0]+H[8]+J[7] - logZ )
Pout[258] = exp( +H[0]+H[7]+J[6] - logZ )
Pout[259] = exp( +H[0]+H[7]+H[8]+J[6]+J[7]+J[35] - logZ )
Pout[260] = exp( +H[0]+H[6]+J[5] - logZ )
Pout[261] = exp( +H[0]+H[6]+H[8]+J[5]+J[7]+J[34] - logZ )
Pout[262] = exp( +H[0]+H[6]+H[7]+J[5]+J[6]+J[33] - logZ )
Pout[263] = exp( +H[0]+H[6]+H[7]+H[8]+J[5]+J[6]+J[7]+J[33]+J[34]+J[35] - logZ )
Pout[264] = exp( +H[0]+H[5]+J[4] - logZ )
Pout[265] = exp( +H[0]+H[5]+H[8]+J[4]+J[7]+J[32] - logZ )
Pout[266] = exp( +H[0]+H[5]+H[7]+J[4]+J[6]+J[31] - logZ )
Pout[267] = exp( +H[0]+H[5]+H[7]+H[8]+J[4]+J[6]+J[7]+J[31]+J[32]+J[35] - logZ )
Pout[268] = exp( +H[0]+H[5]+H[6]+J[4]+J[5]+J[30] - logZ )
Pout[269] = exp( +H[0]+H[5]+H[6]+H[8]+J[4]+J[5]+J[7]+J[30]+J[32]+J[34] - logZ )
Pout[270] = exp( +H[0]+H[5]+H[6]+H[7]+J[4]+J[5]+J[6]+J[30]+J[31]+J[33] - logZ )
Pout[271] = exp( +H[0]+H[5]+H[6]+H[7]+H[8]+J[4]+J[5]+J[6]+J[7]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[272] = exp( +H[0]+H[4]+J[3] - logZ )
Pout[273] = exp( +H[0]+H[4]+H[8]+J[3]+J[7]+J[29] - logZ )
Pout[274] = exp( +H[0]+H[4]+H[7]+J[3]+J[6]+J[28] - logZ )
Pout[275] = exp( +H[0]+H[4]+H[7]+H[8]+J[3]+J[6]+J[7]+J[28]+J[29]+J[35] - logZ )
Pout[276] = exp( +H[0]+H[4]+H[6]+J[3]+J[5]+J[27] - logZ )
Pout[277] = exp( +H[0]+H[4]+H[6]+H[8]+J[3]+J[5]+J[7]+J[27]+J[29]+J[34] - logZ )
Pout[278] = exp( +H[0]+H[4]+H[6]+H[7]+J[3]+J[5]+J[6]+J[27]+J[28]+J[33] - logZ )
Pout[279] = exp( +H[0]+H[4]+H[6]+H[7]+H[8]+J[3]+J[5]+J[6]+J[7]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[280] = exp( +H[0]+H[4]+H[5]+J[3]+J[4]+J[26] - logZ )
Pout[281] = exp( +H[0]+H[4]+H[5]+H[8]+J[3]+J[4]+J[7]+J[26]+J[29]+J[32] - logZ )
Pout[282] = exp( +H[0]+H[4]+H[5]+H[7]+J[3]+J[4]+J[6]+J[26]+J[28]+J[31] - logZ )
Pout[283] = exp( +H[0]+H[4]+H[5]+H[7]+H[8]+J[3]+J[4]+J[6]+J[7]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[284] = exp( +H[0]+H[4]+H[5]+H[6]+J[3]+J[4]+J[5]+J[26]+J[27]+J[30] - logZ )
Pout[285] = exp( +H[0]+H[4]+H[5]+H[6]+H[8]+J[3]+J[4]+J[5]+J[7]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[286] = exp( +H[0]+H[4]+H[5]+H[6]+H[7]+J[3]+J[4]+J[5]+J[6]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[287] = exp( +H[0]+H[4]+H[5]+H[6]+H[7]+H[8]+J[3]+J[4]+J[5]+J[6]+J[7]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[288] = exp( +H[0]+H[3]+J[2] - logZ )
Pout[289] = exp( +H[0]+H[3]+H[8]+J[2]+J[7]+J[25] - logZ )
Pout[290] = exp( +H[0]+H[3]+H[7]+J[2]+J[6]+J[24] - logZ )
Pout[291] = exp( +H[0]+H[3]+H[7]+H[8]+J[2]+J[6]+J[7]+J[24]+J[25]+J[35] - logZ )
Pout[292] = exp( +H[0]+H[3]+H[6]+J[2]+J[5]+J[23] - logZ )
Pout[293] = exp( +H[0]+H[3]+H[6]+H[8]+J[2]+J[5]+J[7]+J[23]+J[25]+J[34] - logZ )
Pout[294] = exp( +H[0]+H[3]+H[6]+H[7]+J[2]+J[5]+J[6]+J[23]+J[24]+J[33] - logZ )
Pout[295] = exp( +H[0]+H[3]+H[6]+H[7]+H[8]+J[2]+J[5]+J[6]+J[7]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35] - logZ )
Pout[296] = exp( +H[0]+H[3]+H[5]+J[2]+J[4]+J[22] - logZ )
Pout[297] = exp( +H[0]+H[3]+H[5]+H[8]+J[2]+J[4]+J[7]+J[22]+J[25]+J[32] - logZ )
Pout[298] = exp( +H[0]+H[3]+H[5]+H[7]+J[2]+J[4]+J[6]+J[22]+J[24]+J[31] - logZ )
Pout[299] = exp( +H[0]+H[3]+H[5]+H[7]+H[8]+J[2]+J[4]+J[6]+J[7]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35] - logZ )
Pout[300] = exp( +H[0]+H[3]+H[5]+H[6]+J[2]+J[4]+J[5]+J[22]+J[23]+J[30] - logZ )
Pout[301] = exp( +H[0]+H[3]+H[5]+H[6]+H[8]+J[2]+J[4]+J[5]+J[7]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34] - logZ )
Pout[302] = exp( +H[0]+H[3]+H[5]+H[6]+H[7]+J[2]+J[4]+J[5]+J[6]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33] - logZ )
Pout[303] = exp( +H[0]+H[3]+H[5]+H[6]+H[7]+H[8]+J[2]+J[4]+J[5]+J[6]+J[7]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[304] = exp( +H[0]+H[3]+H[4]+J[2]+J[3]+J[21] - logZ )
Pout[305] = exp( +H[0]+H[3]+H[4]+H[8]+J[2]+J[3]+J[7]+J[21]+J[25]+J[29] - logZ )
Pout[306] = exp( +H[0]+H[3]+H[4]+H[7]+J[2]+J[3]+J[6]+J[21]+J[24]+J[28] - logZ )
Pout[307] = exp( +H[0]+H[3]+H[4]+H[7]+H[8]+J[2]+J[3]+J[6]+J[7]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35] - logZ )
Pout[308] = exp( +H[0]+H[3]+H[4]+H[6]+J[2]+J[3]+J[5]+J[21]+J[23]+J[27] - logZ )
Pout[309] = exp( +H[0]+H[3]+H[4]+H[6]+H[8]+J[2]+J[3]+J[5]+J[7]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34] - logZ )
Pout[310] = exp( +H[0]+H[3]+H[4]+H[6]+H[7]+J[2]+J[3]+J[5]+J[6]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33] - logZ )
Pout[311] = exp( +H[0]+H[3]+H[4]+H[6]+H[7]+H[8]+J[2]+J[3]+J[5]+J[6]+J[7]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[312] = exp( +H[0]+H[3]+H[4]+H[5]+J[2]+J[3]+J[4]+J[21]+J[22]+J[26] - logZ )
Pout[313] = exp( +H[0]+H[3]+H[4]+H[5]+H[8]+J[2]+J[3]+J[4]+J[7]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32] - logZ )
Pout[314] = exp( +H[0]+H[3]+H[4]+H[5]+H[7]+J[2]+J[3]+J[4]+J[6]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31] - logZ )
Pout[315] = exp( +H[0]+H[3]+H[4]+H[5]+H[7]+H[8]+J[2]+J[3]+J[4]+J[6]+J[7]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[316] = exp( +H[0]+H[3]+H[4]+H[5]+H[6]+J[2]+J[3]+J[4]+J[5]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30] - logZ )
Pout[317] = exp( +H[0]+H[3]+H[4]+H[5]+H[6]+H[8]+J[2]+J[3]+J[4]+J[5]+J[7]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[318] = exp( +H[0]+H[3]+H[4]+H[5]+H[6]+H[7]+J[2]+J[3]+J[4]+J[5]+J[6]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[319] = exp( +H[0]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[2]+J[3]+J[4]+J[5]+J[6]+J[7]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[320] = exp( +H[0]+H[2]+J[1] - logZ )
Pout[321] = exp( +H[0]+H[2]+H[8]+J[1]+J[7]+J[20] - logZ )
Pout[322] = exp( +H[0]+H[2]+H[7]+J[1]+J[6]+J[19] - logZ )
Pout[323] = exp( +H[0]+H[2]+H[7]+H[8]+J[1]+J[6]+J[7]+J[19]+J[20]+J[35] - logZ )
Pout[324] = exp( +H[0]+H[2]+H[6]+J[1]+J[5]+J[18] - logZ )
Pout[325] = exp( +H[0]+H[2]+H[6]+H[8]+J[1]+J[5]+J[7]+J[18]+J[20]+J[34] - logZ )
Pout[326] = exp( +H[0]+H[2]+H[6]+H[7]+J[1]+J[5]+J[6]+J[18]+J[19]+J[33] - logZ )
Pout[327] = exp( +H[0]+H[2]+H[6]+H[7]+H[8]+J[1]+J[5]+J[6]+J[7]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35] - logZ )
Pout[328] = exp( +H[0]+H[2]+H[5]+J[1]+J[4]+J[17] - logZ )
Pout[329] = exp( +H[0]+H[2]+H[5]+H[8]+J[1]+J[4]+J[7]+J[17]+J[20]+J[32] - logZ )
Pout[330] = exp( +H[0]+H[2]+H[5]+H[7]+J[1]+J[4]+J[6]+J[17]+J[19]+J[31] - logZ )
Pout[331] = exp( +H[0]+H[2]+H[5]+H[7]+H[8]+J[1]+J[4]+J[6]+J[7]+J[17]+J[19]+J[20]+J[31]+J[32]+J[35] - logZ )
Pout[332] = exp( +H[0]+H[2]+H[5]+H[6]+J[1]+J[4]+J[5]+J[17]+J[18]+J[30] - logZ )
Pout[333] = exp( +H[0]+H[2]+H[5]+H[6]+H[8]+J[1]+J[4]+J[5]+J[7]+J[17]+J[18]+J[20]+J[30]+J[32]+J[34] - logZ )
Pout[334] = exp( +H[0]+H[2]+H[5]+H[6]+H[7]+J[1]+J[4]+J[5]+J[6]+J[17]+J[18]+J[19]+J[30]+J[31]+J[33] - logZ )
Pout[335] = exp( +H[0]+H[2]+H[5]+H[6]+H[7]+H[8]+J[1]+J[4]+J[5]+J[6]+J[7]+J[17]+J[18]+J[19]+J[20]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[336] = exp( +H[0]+H[2]+H[4]+J[1]+J[3]+J[16] - logZ )
Pout[337] = exp( +H[0]+H[2]+H[4]+H[8]+J[1]+J[3]+J[7]+J[16]+J[20]+J[29] - logZ )
Pout[338] = exp( +H[0]+H[2]+H[4]+H[7]+J[1]+J[3]+J[6]+J[16]+J[19]+J[28] - logZ )
Pout[339] = exp( +H[0]+H[2]+H[4]+H[7]+H[8]+J[1]+J[3]+J[6]+J[7]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35] - logZ )
Pout[340] = exp( +H[0]+H[2]+H[4]+H[6]+J[1]+J[3]+J[5]+J[16]+J[18]+J[27] - logZ )
Pout[341] = exp( +H[0]+H[2]+H[4]+H[6]+H[8]+J[1]+J[3]+J[5]+J[7]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34] - logZ )
Pout[342] = exp( +H[0]+H[2]+H[4]+H[6]+H[7]+J[1]+J[3]+J[5]+J[6]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33] - logZ )
Pout[343] = exp( +H[0]+H[2]+H[4]+H[6]+H[7]+H[8]+J[1]+J[3]+J[5]+J[6]+J[7]+J[16]+J[18]+J[19]+J[20]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[344] = exp( +H[0]+H[2]+H[4]+H[5]+J[1]+J[3]+J[4]+J[16]+J[17]+J[26] - logZ )
Pout[345] = exp( +H[0]+H[2]+H[4]+H[5]+H[8]+J[1]+J[3]+J[4]+J[7]+J[16]+J[17]+J[20]+J[26]+J[29]+J[32] - logZ )
Pout[346] = exp( +H[0]+H[2]+H[4]+H[5]+H[7]+J[1]+J[3]+J[4]+J[6]+J[16]+J[17]+J[19]+J[26]+J[28]+J[31] - logZ )
Pout[347] = exp( +H[0]+H[2]+H[4]+H[5]+H[7]+H[8]+J[1]+J[3]+J[4]+J[6]+J[7]+J[16]+J[17]+J[19]+J[20]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[348] = exp( +H[0]+H[2]+H[4]+H[5]+H[6]+J[1]+J[3]+J[4]+J[5]+J[16]+J[17]+J[18]+J[26]+J[27]+J[30] - logZ )
Pout[349] = exp( +H[0]+H[2]+H[4]+H[5]+H[6]+H[8]+J[1]+J[3]+J[4]+J[5]+J[7]+J[16]+J[17]+J[18]+J[20]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[350] = exp( +H[0]+H[2]+H[4]+H[5]+H[6]+H[7]+J[1]+J[3]+J[4]+J[5]+J[6]+J[16]+J[17]+J[18]+J[19]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[351] = exp( +H[0]+H[2]+H[4]+H[5]+H[6]+H[7]+H[8]+J[1]+J[3]+J[4]+J[5]+J[6]+J[7]+J[16]+J[17]+J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[352] = exp( +H[0]+H[2]+H[3]+J[1]+J[2]+J[15] - logZ )
Pout[353] = exp( +H[0]+H[2]+H[3]+H[8]+J[1]+J[2]+J[7]+J[15]+J[20]+J[25] - logZ )
Pout[354] = exp( +H[0]+H[2]+H[3]+H[7]+J[1]+J[2]+J[6]+J[15]+J[19]+J[24] - logZ )
Pout[355] = exp( +H[0]+H[2]+H[3]+H[7]+H[8]+J[1]+J[2]+J[6]+J[7]+J[15]+J[19]+J[20]+J[24]+J[25]+J[35] - logZ )
Pout[356] = exp( +H[0]+H[2]+H[3]+H[6]+J[1]+J[2]+J[5]+J[15]+J[18]+J[23] - logZ )
Pout[357] = exp( +H[0]+H[2]+H[3]+H[6]+H[8]+J[1]+J[2]+J[5]+J[7]+J[15]+J[18]+J[20]+J[23]+J[25]+J[34] - logZ )
Pout[358] = exp( +H[0]+H[2]+H[3]+H[6]+H[7]+J[1]+J[2]+J[5]+J[6]+J[15]+J[18]+J[19]+J[23]+J[24]+J[33] - logZ )
Pout[359] = exp( +H[0]+H[2]+H[3]+H[6]+H[7]+H[8]+J[1]+J[2]+J[5]+J[6]+J[7]+J[15]+J[18]+J[19]+J[20]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35] - logZ )
Pout[360] = exp( +H[0]+H[2]+H[3]+H[5]+J[1]+J[2]+J[4]+J[15]+J[17]+J[22] - logZ )
Pout[361] = exp( +H[0]+H[2]+H[3]+H[5]+H[8]+J[1]+J[2]+J[4]+J[7]+J[15]+J[17]+J[20]+J[22]+J[25]+J[32] - logZ )
Pout[362] = exp( +H[0]+H[2]+H[3]+H[5]+H[7]+J[1]+J[2]+J[4]+J[6]+J[15]+J[17]+J[19]+J[22]+J[24]+J[31] - logZ )
Pout[363] = exp( +H[0]+H[2]+H[3]+H[5]+H[7]+H[8]+J[1]+J[2]+J[4]+J[6]+J[7]+J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35] - logZ )
Pout[364] = exp( +H[0]+H[2]+H[3]+H[5]+H[6]+J[1]+J[2]+J[4]+J[5]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30] - logZ )
Pout[365] = exp( +H[0]+H[2]+H[3]+H[5]+H[6]+H[8]+J[1]+J[2]+J[4]+J[5]+J[7]+J[15]+J[17]+J[18]+J[20]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34] - logZ )
Pout[366] = exp( +H[0]+H[2]+H[3]+H[5]+H[6]+H[7]+J[1]+J[2]+J[4]+J[5]+J[6]+J[15]+J[17]+J[18]+J[19]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33] - logZ )
Pout[367] = exp( +H[0]+H[2]+H[3]+H[5]+H[6]+H[7]+H[8]+J[1]+J[2]+J[4]+J[5]+J[6]+J[7]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[368] = exp( +H[0]+H[2]+H[3]+H[4]+J[1]+J[2]+J[3]+J[15]+J[16]+J[21] - logZ )
Pout[369] = exp( +H[0]+H[2]+H[3]+H[4]+H[8]+J[1]+J[2]+J[3]+J[7]+J[15]+J[16]+J[20]+J[21]+J[25]+J[29] - logZ )
Pout[370] = exp( +H[0]+H[2]+H[3]+H[4]+H[7]+J[1]+J[2]+J[3]+J[6]+J[15]+J[16]+J[19]+J[21]+J[24]+J[28] - logZ )
Pout[371] = exp( +H[0]+H[2]+H[3]+H[4]+H[7]+H[8]+J[1]+J[2]+J[3]+J[6]+J[7]+J[15]+J[16]+J[19]+J[20]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35] - logZ )
Pout[372] = exp( +H[0]+H[2]+H[3]+H[4]+H[6]+J[1]+J[2]+J[3]+J[5]+J[15]+J[16]+J[18]+J[21]+J[23]+J[27] - logZ )
Pout[373] = exp( +H[0]+H[2]+H[3]+H[4]+H[6]+H[8]+J[1]+J[2]+J[3]+J[5]+J[7]+J[15]+J[16]+J[18]+J[20]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34] - logZ )
Pout[374] = exp( +H[0]+H[2]+H[3]+H[4]+H[6]+H[7]+J[1]+J[2]+J[3]+J[5]+J[6]+J[15]+J[16]+J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33] - logZ )
Pout[375] = exp( +H[0]+H[2]+H[3]+H[4]+H[6]+H[7]+H[8]+J[1]+J[2]+J[3]+J[5]+J[6]+J[7]+J[15]+J[16]+J[18]+J[19]+J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[376] = exp( +H[0]+H[2]+H[3]+H[4]+H[5]+J[1]+J[2]+J[3]+J[4]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26] - logZ )
Pout[377] = exp( +H[0]+H[2]+H[3]+H[4]+H[5]+H[8]+J[1]+J[2]+J[3]+J[4]+J[7]+J[15]+J[16]+J[17]+J[20]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32] - logZ )
Pout[378] = exp( +H[0]+H[2]+H[3]+H[4]+H[5]+H[7]+J[1]+J[2]+J[3]+J[4]+J[6]+J[15]+J[16]+J[17]+J[19]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31] - logZ )
Pout[379] = exp( +H[0]+H[2]+H[3]+H[4]+H[5]+H[7]+H[8]+J[1]+J[2]+J[3]+J[4]+J[6]+J[7]+J[15]+J[16]+J[17]+J[19]+J[20]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[380] = exp( +H[0]+H[2]+H[3]+H[4]+H[5]+H[6]+J[1]+J[2]+J[3]+J[4]+J[5]+J[15]+J[16]+J[17]+J[18]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30] - logZ )
Pout[381] = exp( +H[0]+H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[1]+J[2]+J[3]+J[4]+J[5]+J[7]+J[15]+J[16]+J[17]+J[18]+J[20]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[382] = exp( +H[0]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[1]+J[2]+J[3]+J[4]+J[5]+J[6]+J[15]+J[16]+J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[383] = exp( +H[0]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[1]+J[2]+J[3]+J[4]+J[5]+J[6]+J[7]+J[15]+J[16]+J[17]+J[18]+J[19]+J[20]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[384] = exp( +H[0]+H[1]+J[0] - logZ )
Pout[385] = exp( +H[0]+H[1]+H[8]+J[0]+J[7]+J[14] - logZ )
Pout[386] = exp( +H[0]+H[1]+H[7]+J[0]+J[6]+J[13] - logZ )
Pout[387] = exp( +H[0]+H[1]+H[7]+H[8]+J[0]+J[6]+J[7]+J[13]+J[14]+J[35] - logZ )
Pout[388] = exp( +H[0]+H[1]+H[6]+J[0]+J[5]+J[12] - logZ )
Pout[389] = exp( +H[0]+H[1]+H[6]+H[8]+J[0]+J[5]+J[7]+J[12]+J[14]+J[34] - logZ )
Pout[390] = exp( +H[0]+H[1]+H[6]+H[7]+J[0]+J[5]+J[6]+J[12]+J[13]+J[33] - logZ )
Pout[391] = exp( +H[0]+H[1]+H[6]+H[7]+H[8]+J[0]+J[5]+J[6]+J[7]+J[12]+J[13]+J[14]+J[33]+J[34]+J[35] - logZ )
Pout[392] = exp( +H[0]+H[1]+H[5]+J[0]+J[4]+J[11] - logZ )
Pout[393] = exp( +H[0]+H[1]+H[5]+H[8]+J[0]+J[4]+J[7]+J[11]+J[14]+J[32] - logZ )
Pout[394] = exp( +H[0]+H[1]+H[5]+H[7]+J[0]+J[4]+J[6]+J[11]+J[13]+J[31] - logZ )
Pout[395] = exp( +H[0]+H[1]+H[5]+H[7]+H[8]+J[0]+J[4]+J[6]+J[7]+J[11]+J[13]+J[14]+J[31]+J[32]+J[35] - logZ )
Pout[396] = exp( +H[0]+H[1]+H[5]+H[6]+J[0]+J[4]+J[5]+J[11]+J[12]+J[30] - logZ )
Pout[397] = exp( +H[0]+H[1]+H[5]+H[6]+H[8]+J[0]+J[4]+J[5]+J[7]+J[11]+J[12]+J[14]+J[30]+J[32]+J[34] - logZ )
Pout[398] = exp( +H[0]+H[1]+H[5]+H[6]+H[7]+J[0]+J[4]+J[5]+J[6]+J[11]+J[12]+J[13]+J[30]+J[31]+J[33] - logZ )
Pout[399] = exp( +H[0]+H[1]+H[5]+H[6]+H[7]+H[8]+J[0]+J[4]+J[5]+J[6]+J[7]+J[11]+J[12]+J[13]+J[14]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[400] = exp( +H[0]+H[1]+H[4]+J[0]+J[3]+J[10] - logZ )
Pout[401] = exp( +H[0]+H[1]+H[4]+H[8]+J[0]+J[3]+J[7]+J[10]+J[14]+J[29] - logZ )
Pout[402] = exp( +H[0]+H[1]+H[4]+H[7]+J[0]+J[3]+J[6]+J[10]+J[13]+J[28] - logZ )
Pout[403] = exp( +H[0]+H[1]+H[4]+H[7]+H[8]+J[0]+J[3]+J[6]+J[7]+J[10]+J[13]+J[14]+J[28]+J[29]+J[35] - logZ )
Pout[404] = exp( +H[0]+H[1]+H[4]+H[6]+J[0]+J[3]+J[5]+J[10]+J[12]+J[27] - logZ )
Pout[405] = exp( +H[0]+H[1]+H[4]+H[6]+H[8]+J[0]+J[3]+J[5]+J[7]+J[10]+J[12]+J[14]+J[27]+J[29]+J[34] - logZ )
Pout[406] = exp( +H[0]+H[1]+H[4]+H[6]+H[7]+J[0]+J[3]+J[5]+J[6]+J[10]+J[12]+J[13]+J[27]+J[28]+J[33] - logZ )
Pout[407] = exp( +H[0]+H[1]+H[4]+H[6]+H[7]+H[8]+J[0]+J[3]+J[5]+J[6]+J[7]+J[10]+J[12]+J[13]+J[14]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35] - logZ )
Pout[408] = exp( +H[0]+H[1]+H[4]+H[5]+J[0]+J[3]+J[4]+J[10]+J[11]+J[26] - logZ )
Pout[409] = exp( +H[0]+H[1]+H[4]+H[5]+H[8]+J[0]+J[3]+J[4]+J[7]+J[10]+J[11]+J[14]+J[26]+J[29]+J[32] - logZ )
Pout[410] = exp( +H[0]+H[1]+H[4]+H[5]+H[7]+J[0]+J[3]+J[4]+J[6]+J[10]+J[11]+J[13]+J[26]+J[28]+J[31] - logZ )
Pout[411] = exp( +H[0]+H[1]+H[4]+H[5]+H[7]+H[8]+J[0]+J[3]+J[4]+J[6]+J[7]+J[10]+J[11]+J[13]+J[14]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35] - logZ )
Pout[412] = exp( +H[0]+H[1]+H[4]+H[5]+H[6]+J[0]+J[3]+J[4]+J[5]+J[10]+J[11]+J[12]+J[26]+J[27]+J[30] - logZ )
Pout[413] = exp( +H[0]+H[1]+H[4]+H[5]+H[6]+H[8]+J[0]+J[3]+J[4]+J[5]+J[7]+J[10]+J[11]+J[12]+J[14]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34] - logZ )
Pout[414] = exp( +H[0]+H[1]+H[4]+H[5]+H[6]+H[7]+J[0]+J[3]+J[4]+J[5]+J[6]+J[10]+J[11]+J[12]+J[13]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33] - logZ )
Pout[415] = exp( +H[0]+H[1]+H[4]+H[5]+H[6]+H[7]+H[8]+J[0]+J[3]+J[4]+J[5]+J[6]+J[7]+J[10]+J[11]+J[12]+J[13]+J[14]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35] - logZ )
Pout[416] = exp( +H[0]+H[1]+H[3]+J[0]+J[2]+J[9] - logZ )
Pout[417] = exp( +H[0]+H[1]+H[3]+H[8]+J[0]+J[2]+J[7]+J[9]+J[14]+J[25] - logZ )
Pout[418] = exp( +H[0]+H[1]+H[3]+H[7]+J[0]+J[2]+J[6]+J[9]+J[13]+J[24] - logZ )
Pout[419] = exp( +H[0]+H[1]+H[3]+H[7]+H[8]+J[0]+J[2]+J[6]+J[7]+J[9]+J[13]+J[14]+J[24]+J[25]+J[35] - logZ )
Pout[420] = exp( +H[0]+H[1]+H[3]+H[6]+J[0]+J[2]+J[5]+J[9]+J[12]+J[23] - logZ )
Pout[421] = exp( +H[0]+H[1]+H[3]+H[6]+H[8]+J[0]+J[2]+J[5]+J[7]+J[9]+J[12]+J[14]+J[23]+J[25]+J[34] - logZ )
Pout[422] = exp( +H[0]+H[1]+H[3]+H[6]+H[7]+J[0]+J[2]+J[5]+J[6]+J[9]+J[12]+J[13]+J[23]+J[24]+J[33] - logZ )
Pout[423] = exp( +H[0]+H[1]+H[3]+H[6]+H[7]+H[8]+J[0]+J[2]+J[5]+J[6]+J[7]+J[9]+J[12]+J[13]+J[14]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35] - logZ )
Pout[424] = | exp( +H[0]+H[1]+H[3]+H[5]+J[0]+J[2]+J[4]+J[9]+J[11]+J[22] - logZ ) | numpy.exp |
import pandas as pd
import sys
import sqlite3
import urllib
import os
from multiprocessing import Pool
import numpy as np
data = pd.read_csv("../data/complete_smiles.csv")
data["is_np"] = pd.read_csv("../data/coconut_decoy.csv", usecols=["is_np"])
data = data[data.is_np==0]
zinc_id = list()
for i, smiles in enumerate(data.smiles):
print(i)
try:
zinc_id.append(
get_zincid_from_smile(smiles)[0]
)
except:
zinc_id.append(None)
zinc_fast(data.smiles.iloc[0])
a_pool = Pool(30)
result = a_pool.map(zinc_fast,data.smiles.tolist())
result = np.array(result)
np.sum(result==None)
data.loc[data.is_np==0,["ZINC_ID"]] = result
data.to_csv("../data/publish_data.csv", index =False)
check = data.loc[data["is_np"]==0, ["smiles","ZINC_ID"]]
()
np.sum(result== None)
check[check.ZINC_ID.isna()].iloc[1,0]
a_pool = Pool(30)
result2 = a_pool.map(zinc_fast,check[check.ZINC_ID.isna()].smiles.tolist())
result2 = np.array(result2)
check.loc[check.ZINC_ID.isna(),"ZINC_ID"] =result2
| np.sum(result2==None) | numpy.sum |
"""
Power Flow Analysis: Support Functions
Created By:
<NAME>
<NAME>
"""
import numpy as np
from numpy.linalg import inv
import pandas as pd
"""
Imports Bus and line data from excel sheets
Takes in an array containing ['File Location', 'Sheet Name']
Returns two panda data frames for the bus and line data
"""
def import_BusAndLineData(BusData_Location, LineData_Location):
BusData = pd.read_excel(BusData_Location[0], sheet_name=BusData_Location[1])
LineData = pd.read_excel(LineData_Location[0], sheet_name=LineData_Location[1])
return BusData, LineData
"""
Builds G and B matrices to be used in Power Flow calculations
Takes in data frame containing all line information, and number of busses in system
Returns G and B arrays
"""
def build_AdmittanceMatrix(LineData, sys_Size):
col = np.array(LineData.columns)
line_From = np.array(LineData[col[0]])
line_To = np.array(LineData[col[1]])
line_R = np.array(LineData[col[2]])
line_X = np.array(LineData[col[3]])
line_Z = np.array(LineData[col[2]]) + 1j*np.array(LineData[col[3]])
line_Y = 1/line_Z
line_B = np.array(LineData[col[4]])
line_Fmax = np.array(LineData[col[5]])
sys_Y = np.array([[0 for j in range(sys_Size)] for i in range(sys_Size)], dtype = complex)
sys_G = np.zeros((sys_Size, sys_Size))
sys_B = np.zeros((sys_Size, sys_Size))
#X_ij
for i in range(sys_Size): #Row
for j in range(sys_Size): #Column
if i==j: # Diagonal, sum of Y(From==i || To==i) + .5B(From==i || To ==i)
sys_Y[i][j] = np.sum(line_Y[np.array(line_From==i+1) + np.array(line_To==i+1)]) \
+.5j*np.sum(line_B[np.array(line_From==i+1) + np.array(line_To==i+1)])
elif i<j: #Non Diagonal, -Y(From==i && To==j)
sys_Y[i][j] = -np.sum(line_Y[np.multiply(np.array(line_From==i+1), np.array(line_To==j+1))])
else: #i>j =[j][i]
sys_Y[i][j] = sys_Y[j][i]
sys_G = sys_Y.real
sys_B = sys_Y.imag
return sys_Y, sys_G, sys_B
"""
Parses intial bus information from data
Takes in Bus Data data frame
Returns sys_:
LoadP - active power consumed at node
LoadQ - reactive power consumed at node
BusType - type of bus<(S)lack, (G)enerator, (D)rain>
PGen - Active Power produced by each generator node
VRef - Reference voltages at PV busses
"""
def init_BusData(BusData):
col = np.array(BusData.columns)
sys_BusNum = np.array(BusData[col[0]])
sys_LoadP = np.array(BusData[col[1]])
sys_LoadQ = np.array(BusData[col[2]])
sys_BusType = np.array(BusData[col[3]])
sys_PGen = np.array(BusData[col[4]])
sys_VRef = np.array(BusData[col[5]])
return sys_BusNum, sys_LoadP, sys_LoadQ, sys_BusType, sys_PGen, sys_VRef
"""
Initializes System Data for processing
Takes in sys_:
LoadP - active power consumed at node
LoadQ - reactive power consumed at node
BusType - type of bus<(S)lack, (G)enerator, (D)rain>
PGen - Active Power produced by each generator node
VRef - Reference voltages at PV busses
Returns a 2D array containing each buses's current information
[i,:] - Bus i's information
[:,0] - Bus #
[:,1] - Voltage (V)
[:,2] - Angle (T)
[:,3] - Active Power (P_inj)
[:,4] - P(T,V)-P_inj (mismatch)
[:,5] - Reactive Power (Q_inj)
[:,6] - Q(T,V)-Q_inj (mismatch)
"""
def init_SysData(sys_BusNum, sys_LoadP, sys_LoadQ, sys_BusType, sys_PGen, sys_VRef, sys_G, sys_B, S_Base):
n= sys_LoadP.size
sys_Data = np.zeros((n,7))
sys_Data[:,0] = sys_BusNum
sys_Data[:,1] = sys_VRef #Sets initial voltages to provided reference
sys_Data[:,2] = np.zeros(n) #Sets initial angles to zero
sys_Data[:,3] = (sys_PGen-sys_LoadP)/S_Base #Sets initial power inject to Bus generation minus load in per unit
sys_Data[sys_BusType=='S',3] = (np.sum(sys_LoadP)-np.sum(sys_PGen))/S_Base #Sets initial guess for active power required from slack bus
sys_Data[:,5] = (-sys_LoadQ)/S_Base #Sets initial power inject to Bus generation minus load in per unit
sys_Data[sys_BusType=='S',5] = (-np.sum(sys_LoadQ))/S_Base #Sets initial guess for reactive power required from slack bus
for i in range(n): #Sets initial mismatch to calculated power from (V,T) minus expected inject
sys_Data[i,4] = -sys_Data[i,3]
sys_Data[i,6] = -sys_Data[i,5]
for j in range(n):
sys_Data[i,4] += sys_Data[i,1]*sys_Data[j,1]*\
(sys_G[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2])+\
sys_B[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2]))
sys_Data[i,6] += sys_Data[i,1]*sys_Data[j,1]*\
(sys_G[i,j]*np.sin(sys_Data[i,2]-sys_Data[j,2])-\
sys_B[i,j]*np.cos(sys_Data[i,2]-sys_Data[j,2]))
return sys_Data
"""
Determines Jacobian value for a given J_11 cell (dP/dT)
Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij
Returns Jacobian cell value
"""
def Jacobian_PowerFlow_11(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij):
if(i==j):
J_ij = -Q_i - B_ij*(V_i**2)
else:
J_ij = V_i*V_j*(G_ij*np.sin(T_i-T_j)-B_ij*np.cos(T_i-T_j))
return J_ij
"""
Determines Jacobian value for a given J_12 cell (dP/dV)
Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij
Returns Jacobian cell value
"""
def Jacobian_PowerFlow_12(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij):
if(i==j):
J_ij = (P_i/V_i) + G_ij*V_i
else:
J_ij = V_i*(G_ij*np.cos(T_i-T_j)+B_ij*np.sin(T_i-T_j))
return J_ij
"""
Determines Jacobian value for a given J_21 cell (dQ/dT)
Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij
Returns Jacobian cell value
"""
def Jacobian_PowerFlow_21(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij):
if(i==j):
J_ij = P_i-G_ij*(V_i**2)
else:
J_ij = -V_i*V_j*(G_ij*np.cos(T_i-T_j)+B_ij*np.sin(T_i-T_j))
return J_ij
"""
Determines Jacobian value for a given J_22 cell (dQ/dV)
Takes in: i, j, n, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij
Returns Jacobian cell value
"""
def Jacobian_PowerFlow_22(i, j, V_i, V_j, T_i, T_j, P_i, Q_i, G_ij, B_ij):
if(i==j):
J_ij = (Q_i/V_i)-B_ij*V_i
else:
J_ij = V_i*(G_ij*np.sin(T_i-T_j)-B_ij*np.cos(T_i-T_j))
return J_ij
"""
Processes 1 iteration of current system data
Takes in sys_Data, a 2D array containing each node's current information
[0] - Bus #
[1] - Voltage (V)
[2] - Angle (T)
[3] - Active Power (P_inj)
[4] - P(T,V)-P_inj (mismatch)
[5] - Reactive Power (Q_inj)
[6] - Q(T,V)-Q_inj (mismatch)
As well as, the systems G and B matrices, and node types
Returns the updated array
"""
def update_SysData(sys_Data, sys_G, sys_B, sys_BusType):
n = sys_BusType.size
D_index = sys_BusType=='D'
G_index = sys_BusType=='G'
S_index = sys_BusType=='S'
"""Determine Jacobian"""
J = np.zeros((2*n,2*n))
for i in range(n):
for j in range(n): #(i, j, V_i, V_j, T_i, T_j, P_i(T,V), Q_i(T,V), G_ij, B_ij)
J[i,j] = Jacobian_PowerFlow_11(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j])
J[i,j+n] = Jacobian_PowerFlow_12(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j])
J[i+n,j] = Jacobian_PowerFlow_21(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j])
J[i+n,j+n] =Jacobian_PowerFlow_22(i, j, sys_Data[i,1], sys_Data[j,1], sys_Data[i,2], sys_Data[j,2], sys_Data[i,4]+sys_Data[i,3], sys_Data[i,6]+sys_Data[i,5], sys_G[i,j], sys_B[i,j])
"""Remove non-implicit values"""
for i in range(n-1,-1,-1):
if S_index[i]:
J=np.delete(J, i+n, 0)
J=np.delete(J, i+n, 1)
J=np.delete(J, i, 0)
J=np.delete(J, i, 1)
elif G_index[i]:
J=np.delete(J, i+n, 0)
J=np.delete(J, i+n, 1)
"""Determine Inverse"""
J_inv = | inv(J) | numpy.linalg.inv |
import os
import time
import datetime
import numpy as np
import tensorflow as tf
import utils.data_utils as utils
from models.quinn import Quinn
tf.logging.set_verbosity(tf.logging.ERROR)
train_file = './data/dumps/train.pckl'
val_file = './data/dumps/val.pckl'
# Model Parameters
max_length = 600
vocab_size = 3193
embedding_dims = 300
hidden_layers = 256
# Training Parameters
l2_lambda = 1e-4
batch_size = 64
num_epochs = 100
num_checkpoints = 3
checkpoint_every = 10
# Prepare and load training and validation data
if not os.path.exists(train_file):
print ("Train dump not found. Preparing data ...")
train_tsv_path = './data/english/All_Train.tsv'
utils.create_dump(train_tsv_path, train_file)
if not os.path.exists(val_file):
print ("Validation dump not found. Preparing data ...")
val_tsv_path = './data/english/All_Dev.tsv'
utils.create_dump(val_tsv_path, val_file)
print ('Loading dataset from ./data/dumps/ ...')
x_train, x_train_map, y_train, y_train_prob = utils.fetch(train_file)
x_val, x_val_map, y_val, y_val_prob = utils.fetch(val_file)
# Load embeddings
embedding_path = './data/dumps/embeddings.npy'
embedding = utils.load_embeddings(embedding_path, vocab_size, dimensions=300)
print ("Embeddings loaded, Vocabulary Size: {:d}.".format(vocab_size))
# Shuffle training data
| np.random.seed(10) | numpy.random.seed |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The Phantom class is instantiated with a ground-truth phantom and corresponding material properties data. The get_projections method simulates data acquisition and returns radiographs for the specified theta values.
"""
import sys
import os
import numpy as np
import pandas as pd
from scipy import misc
import h5py
import time
from scipy.integrate import simps
import matplotlib.pyplot as plt
import cv2
from tomopy import project
from scipy.ndimage.filters import gaussian_filter
from tomo_twin.pg_filter import add_phase_contrast
model_data_path = '../model_data'
class Phantom:
def __init__(self, vol, materials, res, energy_pts, bits = 16, data_path = model_data_path):
'''
Parameters
----------
vol : np.array
labeled (segmented / ground-truth) volume. voxel values are in finite range [0,...n_materials-1].
materials : dict
dict of material names and their respective density g/cc, e.g. {"Fe" : 7.87, "Al": 2.7}
res : float
voxel size in microns
energy_pts : float or np.array
list of energies
bits : int
16 for 16 bit camera
data_path : str
path to exported XOP data
'''
# deal with materials
self.bits = bits
self.res = res
self.data_path = data_path
self.energy_pts = | np.asarray(energy_pts) | numpy.asarray |
import os
import sys
from glob import glob
from tqdm import tqdm
import numpy as np
import pandas as pd
import SimpleITK as sitk
from torch.utils.data import Dataset, DataLoader
import nibabel
from scipy import ndimage
import time
import torch
import torch.nn as nn
import fire
import time
import pydicom
import shutil
def read_config_file(config_file):
'''
config_file: '../data/config/肝穿病人给放射科和合作者.xlsx'
表头:['编号', '住院号', '姓名', 'series uid', 'Unnamed: 4', '性别1男2女', '年龄', 'MRS脂肪峰面积', '水峰面积', '脂肪含量', 'Fat', 'necrosisfoci', 'ballooning', 'NAS(total)', 'fibrosis', 'NAS大于4', '进展性纤维化', '脂肪肝病理评分']
此处用到 'series uid', 'fibrosis'
debug cmd: read_config_file('../data/config/肝穿病人给放射科和合作者.xlsx')
'''
df = pd.read_excel(config_file)
series_fib_dict = {}
for index, row in df.iterrows():
series_fib_dict[row['series uid']] = int(row['fibrosis'])
return series_fib_dict
def read_dcm_file(in_dcm_path):
series_reader = sitk.ImageSeriesReader()
dicomfilenames = series_reader.GetGDCMSeriesFileNames(in_dcm_path)
series_reader.SetFileNames(dicomfilenames)
series_reader.MetaDataDictionaryArrayUpdateOn()
series_reader.LoadPrivateTagsOn()
image = series_reader.Execute()
return image
def split_data_to_two_phase_one_case(series_path, out_dir):
'''
debug cmd: split_data_to_two_phase_one_case('../data/images_mr_filtered/1.3.12.2.1107.5.2.30.25245.2015120320185731080640838.0.0.0', '')
'''
in_files1 = glob((series_path, '*.dcm'))
in_files2 = glob((series_path, '*.DCM'))
in_files = in_files1 + in_files2
echo_1_files = []
echo_2_files = []
for infile in in_files:
metadata = pydicom.dcmread(infile)
if 1 == metadata.EchoNumbers:
echo_1_files.append(infile)
elif 2 == metadata.EchoNumbers:
echo_2_files.append(infile)
series_uid = os.path.basename(series_path)
out_series_path = os.path.join(out_dir, series_uid)
out_echo_1_path = os.path.join(out_series_path, 'echo_1')
out_echo_2_path = os.path.join(out_series_path, 'echo_2')
os.makedirs(out_series_path, exist_ok=True)
os.makedirs(out_echo_1_path, exist_ok=True)
os.makedirs(out_echo_2_path, exist_ok=True)
assert len(echo_1_files) == len(echo_2_files)
for src_file in echo_1_files:
dst_file = os.path.join(out_echo_1_path, os.path.basename(src_file))
shutil.copyfile(src_file, dst_file)
print('====> copy from {} to {}'.format(src_file, dst_file))
for src_file in echo_2_files:
dst_file = os.path.join(out_echo_2_path, os.path.basename(src_file))
shutil.copyfile(src_file, dst_file)
print('====> copy from {} to {}'.format(src_file, dst_file))
def split_data_to_two_phase_singletask(in_dir, out_dir, config_file):
'''
indir: ../data/images_mr_filtered
outdir: ../data/experiment_0/0.ori
config_file: '../data/config/肝穿病人给放射科和合作者.xlsx' 根据配置文件确定需要进入后续操作的series,这里为防止文件夹中混入非序列的子文件夹
debug cmd: split_data_to_two_phase_singletask('../data/images_mr_filtered', '../data/experiment_0/0.ori', '../data/config/肝穿病人给放射科和合作者.xlsx')
invoke cmd: python FattyLiverDatasets.py split_data_to_two_phase_singletask '../data/images_mr_filtered' '../data/experiment_0/0.ori' '../data/config/肝穿病人给放射科和合作者.xlsx'
'''
series_fib_dict = read_config_file(config_file)
series_uids = os.listdir(in_dir)
series_paths = []
for series_uid in series_uids:
if not series_uid in series_fib_dict:
continue
series_path = os.path.join(in_dir, series_uid)
series_paths.append(series_path)
split_data_to_two_phase_one_case(series_path, out_dir)
def resample_sitkImage_by_spacing(sitkImage, newSpacing, vol_default_value='min', interpolator=sitk.sitkNearestNeighbor):
"""
:param sitkImage:
:param newSpacing:
:return:
"""
if sitkImage == None:
return None
if newSpacing is None:
return None
dim = sitkImage.GetDimension()
if len(newSpacing) != dim:
return None
# determine the default value
vol_value = 0.0
if vol_default_value == 'min':
vol_value = float(np.ndarray.min(sitk.GetArrayFromImage(sitkImage)))
elif vol_default_value == 'zero':
vol_value = 0.0
elif str(vol_default_value).isnumeric():
vol_value = float(vol_default_value)
# calculate new size
np_oldSize = np.array(sitkImage.GetSize())
np_oldSpacing = np.array(sitkImage.GetSpacing())
np_newSpacing = np.array(newSpacing)
np_newSize = np.divide(np.multiply(np_oldSize, np_oldSpacing), np_newSpacing)
newSize = tuple(np_newSize.astype(np.uint).tolist())
# resample sitkImage into new specs
transform = sitk.Transform()
return sitk.Resample(sitkImage, newSize, transform, interpolator, sitkImage.GetOrigin(),
newSpacing, sitkImage.GetDirection(), vol_value, sitkImage.GetPixelID())
def resample_data_one_case(series_path, out_dir, z_mul:int):
'''
series_path: ../data/experiment_0/0.ori/1.3.12.2.1107.5.2.30.25245.2015120320185731080640838.0.0.0/11
resample_data_one_case('../data/experiment_0/0.ori/1.3.12.2.1107.5.2.30.25245.2015120320185731080640838.0.0.0/echo_1', '../data/experiment_0/0.ori/1.3.12.2.1107.5.2.30.25245.2015120320185731080640838.0.0.0', 1)
'''
beg = time.time()
print('====> processing {}'.format(series_path))
image = read_dcm_file(series_path)
basename = os.path.basename(series_path)
# 1. 保存原始分辨率数据的nii.gz
out_raw_file = os.path.join(out_dir, '{}.nii.gz'.format(basename))
# sitk.WriteImage(image, out_raw_file)
# 2. resample, base x-spacing
# spc = image.GetSpacing()
# mults = [1,2,4,8]
# for z_mul in mults:
# out_resampled_file = os.path.join(out_dir, '{}_z_mul{}.nii.gz'.format(basename, z_mul))
# new_spc = [spc[0]] + [spc[0]] + [spc[0]*z_mul]
# resampled_img = resample_sitkImage_by_spacing(image, new_spc, interpolator=sitk.sitkLinear)
# sitk.WriteImage(resampled_img, out_resampled_file)
end = time.time()
print('=====> finish {}, time elapsed is {:.3f}s'.format(series_path, end-beg))
return out_raw_file
def resample_data_singletask(series_paths):
'''
indir: ../data/experiment_0/0.ori
debug cmd: resample_data_singletask('../data/experiment_0/0.ori')
invoke cmd: python FattyLiverDatasets.py resample_data_singletask '../data/experiment_0/0.ori'
'''
print(series_paths)
for series_path in tqdm(series_paths):
if not os.path.isdir(series_path):
continue
echo_1_path = os.path.join(series_path, 'echo_1')
echo_2_path = os.path.join(series_path, 'echo_2')
out_dir = series_path
if not os.path.isdir(echo_1_path):
print('{} echo 1 data not exist!'.format(series_path))
continue
if not os.path.isdir(echo_2_path):
print('{} echo 2 data not exist!'.format(series_path))
continue
out_echo_1_file = resample_data_one_case(echo_1_path, out_dir, 1)
out_echo_2_file = resample_data_one_case(echo_2_path, out_dir, 1)
echo_1_image = sitk.ReadImage(out_echo_1_file)
echo_2_image = sitk.ReadImage(out_echo_2_file)
echo_1_arr = sitk.GetArrayFromImage(echo_1_image)
echo_2_arr = sitk.GetArrayFromImage(echo_2_image)
echo_1_arr = | np.array(echo_1_arr, dtype=np.int16) | numpy.array |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Reliability calibration plugins."""
import operator
import warnings
import iris
import numpy as np
import scipy
from improver import BasePlugin, PostProcessingPlugin
from improver.calibration.utilities import (
check_forecast_consistency,
create_unified_frt_coord,
filter_non_matching_cubes,
)
from improver.metadata.probabilistic import (
find_threshold_coordinate,
probability_is_above_or_below,
)
from improver.metadata.utilities import generate_mandatory_attributes
from improver.utilities.cube_manipulation import MergeCubes, collapsed
class ConstructReliabilityCalibrationTables(BasePlugin):
"""A plugin for creating and populating reliability calibration tables."""
def __init__(
self,
n_probability_bins=5,
single_value_lower_limit=False,
single_value_upper_limit=False,
):
"""
Initialise class for creating reliability calibration tables. These
tables include data columns entitled observation_count,
sum_of_forecast_probabilities, and forecast_count, defined below.
n_probability_bins (int):
The total number of probability bins required in the reliability
tables. If single value limits are turned on, these are included in
this total.
single_value_lower_limit (bool):
Mandates that the lowest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus 0 to 1.0E-6.
single_value_upper_limit (bool):
Mandates that the highest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus (1 - 1.0E-6) to 1.
"""
self.single_value_tolerance = 1.0e-6
self.probability_bins = self._define_probability_bins(
n_probability_bins, single_value_lower_limit, single_value_upper_limit
)
self.table_columns = np.array(
["observation_count", "sum_of_forecast_probabilities", "forecast_count"]
)
self.expected_table_shape = (len(self.table_columns), n_probability_bins)
def __repr__(self):
"""Represent the configured plugin instance as a string."""
bin_values = ", ".join(
["[{:1.2f} --> {:1.2f}]".format(*item) for item in self.probability_bins]
)
result = "<ConstructReliabilityCalibrationTables: " "probability_bins: {}>"
return result.format(bin_values)
def _define_probability_bins(
self, n_probability_bins, single_value_lower_limit, single_value_upper_limit
):
"""
Define equally sized probability bins for use in a reliability table.
The range 0 to 1 is divided into ranges to give n_probability bins.
If single_value_lower_limit and / or single_value_upper_limit are True,
additional bins corresponding to values of 0 and / or 1 will be created,
each with a width defined by self.single_value_tolerance.
Args:
n_probability_bins (int):
The total number of probability bins desired in the
reliability tables. This number includes the extrema bins
(equals 0 and equals 1) if single value limits are turned on,
in which case the minimum number of bins is 3.
single_value_lower_limit (bool):
Mandates that the lowest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus 0 to 1.0E-6.
single_value_upper_limit (bool):
Mandates that the highest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus (1 - 1.0E-6) to 1.
Returns:
numpy.ndarray:
An array of 2-element arrays that contain the bounds of the
probability bins. These bounds are non-overlapping, with
adjacent bin boundaries spaced at the smallest representable
interval.
Raises:
ValueError: If trying to use both single_value_lower_limit and
single_value_upper_limit with 2 or fewer probability bins.
"""
if single_value_lower_limit and single_value_upper_limit:
if n_probability_bins <= 2:
msg = (
"Cannot use both single_value_lower_limit and "
"single_value_upper_limit with 2 or fewer "
"probability bins."
)
raise ValueError(msg)
n_probability_bins = n_probability_bins - 2
elif single_value_lower_limit or single_value_upper_limit:
n_probability_bins = n_probability_bins - 1
bin_lower = np.linspace(0, 1, n_probability_bins + 1, dtype=np.float32)
bin_upper = np.nextafter(bin_lower, 0, dtype=np.float32)
bin_upper[-1] = 1.0
bins = np.stack([bin_lower[:-1], bin_upper[1:]], 1).astype(np.float32)
if single_value_lower_limit:
bins[0, 0] = np.nextafter(self.single_value_tolerance, 1, dtype=np.float32)
lowest_bin = np.array([0, self.single_value_tolerance], dtype=np.float32)
bins = np.vstack([lowest_bin, bins]).astype(np.float32)
if single_value_upper_limit:
bins[-1, 1] = np.nextafter(
1.0 - self.single_value_tolerance, 0, dtype=np.float32
)
highest_bin = np.array(
[1.0 - self.single_value_tolerance, 1], dtype=np.float32
)
bins = np.vstack([bins, highest_bin]).astype(np.float32)
return bins
def _create_probability_bins_coord(self):
"""
Construct a dimension coordinate describing the probability bins
of the reliability table.
Returns:
iris.coords.DimCoord:
A dimension coordinate describing probability bins.
"""
values = np.mean(self.probability_bins, axis=1, dtype=np.float32)
probability_bins_coord = iris.coords.DimCoord(
values, long_name="probability_bin", units=1, bounds=self.probability_bins
)
return probability_bins_coord
def _create_reliability_table_coords(self):
"""
Construct coordinates that describe the reliability table rows. These
are observation_count, sum_of_forecast_probabilities, and
forecast_count. The order used here is the order in which the table
data is populated, so these must remain consistent with the
_populate_reliability_bins function.
Returns:
(tuple): tuple containing:
**index_coord** (iris.coords.DimCoord):
A numerical index dimension coordinate.
**name_coord** (iris.coords.AuxCoord):
An auxiliary coordinate that assigns names to the index
coordinates, where these names correspond to the
reliability table rows.
"""
index_coord = iris.coords.DimCoord(
np.arange(len(self.table_columns), dtype=np.int32),
long_name="table_row_index",
units=1,
)
name_coord = iris.coords.AuxCoord(
self.table_columns, long_name="table_row_name", units=1
)
return index_coord, name_coord
@staticmethod
def _define_metadata(forecast_slice):
"""
Define metadata that is specifically required for reliability table
cubes, whilst ensuring any mandatory attributes are also populated.
Args:
forecast_slice (iris.cube.Cube):
The source cube from which to get pre-existing metadata of use.
Returns:
dict:
A dictionary of attributes that are appropriate for the
reliability table cube.
"""
attributes = generate_mandatory_attributes([forecast_slice])
attributes["title"] = "Reliability calibration data table"
return attributes
def _create_reliability_table_cube(self, forecast, threshold_coord):
"""
Construct a reliability table cube and populate it with the provided
data. The returned cube will include a cycle hour coordinate, which
describes the model cycle hour at which the forecast data was produced.
It will further include the forecast period, threshold coordinate,
and spatial coordinates from the forecast cube.
Args:
forecast (iris.cube.Cube):
A cube slice across the spatial dimensions of the forecast
data. This slice provides the time and threshold values that
relate to the reliability_table_data.
threshold_coord (iris.coords.DimCoord):
The threshold coordinate.
Returns:
iris.cube.Cube:
A reliability table cube.
"""
def _get_coords_and_dims(coord_names):
"""Obtain the requested coordinates and their dimension index from
the forecast slice cube."""
coords_and_dims = []
leading_coords = [probability_bins_coord, reliability_index_coord]
for coord_name in coord_names:
crd = forecast_slice.coord(coord_name)
crd_dim = forecast_slice.coord_dims(crd)
crd_dim = crd_dim[0] + len(leading_coords) if crd_dim else ()
coords_and_dims.append((crd, crd_dim))
return coords_and_dims
forecast_slice = next(forecast.slices_over(["time", threshold_coord]))
expected_shape = self.expected_table_shape + forecast_slice.shape
dummy_data = np.zeros((expected_shape))
diagnostic = find_threshold_coordinate(forecast).name()
attributes = self._define_metadata(forecast)
# Define reliability table specific coordinates
probability_bins_coord = self._create_probability_bins_coord()
(
reliability_index_coord,
reliability_name_coord,
) = self._create_reliability_table_coords()
frt_coord = create_unified_frt_coord(forecast.coord("forecast_reference_time"))
# List of required non-spatial coordinates from the forecast
non_spatial_coords = ["forecast_period", diagnostic]
# Construct a list of coordinates in the desired order
dim_coords = [forecast.coord(axis=dim).name() for dim in ["x", "y"]]
dim_coords_and_dims = _get_coords_and_dims(dim_coords)
aux_coords_and_dims = _get_coords_and_dims(non_spatial_coords)
dim_coords_and_dims.append((reliability_index_coord, 0))
aux_coords_and_dims.append((reliability_name_coord, 0))
dim_coords_and_dims.append((probability_bins_coord, 1))
reliability_cube = iris.cube.Cube(
dummy_data,
units=1,
attributes=attributes,
dim_coords_and_dims=dim_coords_and_dims,
aux_coords_and_dims=aux_coords_and_dims,
)
reliability_cube.add_aux_coord(frt_coord)
reliability_cube.rename("reliability_calibration_table")
return reliability_cube
def _populate_reliability_bins(self, forecast, truth):
"""
For an x-y slice at a single validity time and threshold, populate
a reliability table using the provided truth.
Args:
forecast (numpy.ndarray or numpy.ma.MaskedArray):
An array containing data over an xy slice for a single validity
time and threshold.
truth (numpy.ndarray or numpy.ma.MaskedArray):
An array containing a thresholded gridded truth at an
equivalent validity time to the forecast array.
Returns:
numpy.ma.MaskedArray:
An array containing reliability table data for a single time
and threshold. The leading dimension corresponds to the rows
of a calibration table, the second dimension to the number of
probability bins, and the trailing dimensions are the spatial
dimensions of the forecast and truth cubes (which are
equivalent).
"""
observation_counts = []
forecast_probabilities = []
forecast_counts = []
for bin_min, bin_max in self.probability_bins:
observation_mask = (
((forecast >= bin_min) & (forecast <= bin_max)) & (np.isclose(truth, 1))
).astype(int)
forecast_mask = ((forecast >= bin_min) & (forecast <= bin_max)).astype(int)
forecasts_probability_values = forecast * forecast_mask
observation_counts.append(observation_mask)
forecast_probabilities.append(forecasts_probability_values)
forecast_counts.append(forecast_mask)
reliability_table = np.ma.stack(
[
np.ma.stack(observation_counts),
np.ma.stack(forecast_probabilities),
np.ma.stack(forecast_counts),
]
)
return reliability_table.astype(np.float32)
def _populate_masked_reliability_bins(self, forecast, truth):
"""
Support populating the reliability table bins with a masked truth. If a
masked truth is provided, a masked reliability table is returned.
Args:
forecast (numpy.ndarray):
An array containing data over an xy slice for a single validity
time and threshold.
truth (numpy.ma.MaskedArray):
An array containing a thresholded gridded truth at an
equivalent validity time to the forecast array.
Returns:
numpy.ma.MaskedArray:
An array containing reliability table data for a single time
and threshold. The leading dimension corresponds to the rows
of a calibration table, the second dimension to the number of
probability bins, and the trailing dimensions are the spatial
dimensions of the forecast and truth cubes (which are
equivalent).
"""
forecast = np.ma.masked_where(np.ma.getmask(truth), forecast)
table = self._populate_reliability_bins(forecast, truth)
# Zero data underneath mask to support bitwise addition of masks.
table.data[table.mask] = 0
return table
def _add_reliability_tables(self, forecast, truth, threshold_reliability):
"""
Add reliability tables. The presence of a masked truth is handled
separately to ensure support for a mask that changes with validity time.
Args:
forecast (numpy.ndarray):
An array containing data over an xy slice for a single validity
time and threshold.
truth (numpy.ndarray or numpy.ma.MaskedArray):
An array containing a thresholded gridded truth at an
equivalent validity time to the forecast array.
threshold_reliability (numpy.ndarray or numpy.ma.MaskedArray):
The current reliability table that will be added to.
Returns:
numpy.ndarray or numpy.ma.MaskedArray:
An array containing reliability table data for a single time
and threshold. The leading dimension corresponds to the rows
of a calibration table, the second dimension to the number of
probability bins, and the trailing dimensions are the spatial
dimensions of the forecast and truth cubes (which are
equivalent).
"""
if np.ma.is_masked(truth.data):
table = self._populate_masked_reliability_bins(forecast.data, truth.data)
# Bitwise addition of masks. This ensures that only points that are
# masked in both the existing and new reliability tables are kept
# as being masked within the resulting reliability table.
mask = threshold_reliability.mask & table.mask
threshold_reliability = np.ma.array(
threshold_reliability.data + table.data, mask=mask, dtype=np.float32,
)
else:
np.add(
threshold_reliability,
self._populate_reliability_bins(forecast.data, truth.data),
out=threshold_reliability,
dtype=np.float32,
)
return threshold_reliability
def process(self, historic_forecasts, truths):
"""
Slice data over threshold and time coordinates to construct reliability
tables. These are summed over time to give a single table for each
threshold, constructed from all the provided historic forecasts and
truths. If a masked truth is provided, a masked reliability table is
returned. If the mask within the truth varies at different timesteps,
any point that is unmasked for at least one timestep will have
unmasked values within the reliability table. Therefore historic
forecast points will only be used if they have a corresponding valid
truth point for each timestep.
.. See the documentation for an example of the resulting reliability
table cube.
.. include:: extended_documentation/calibration/
reliability_calibration/reliability_calibration_examples.rst
Note that the forecast and truth data used is probabilistic, i.e. has
already been thresholded relative to the thresholds of interest, using
the equality operator required. As such this plugin is agnostic as to
whether the data is thresholded below or above a given diagnostic
threshold.
Args:
historic_forecasts (iris.cube.Cube):
A cube containing the historical forecasts used in calibration.
These are expected to all have a consistent cycle hour, that is
the hour in the forecast reference time.
truths (iris.cube.Cube):
A cube containing the thresholded gridded truths used in
calibration.
Returns:
iris.cube.CubeList:
A cubelist of reliability table cubes, one for each threshold
in the historic forecast cubes.
Raises:
ValueError: If the forecast and truth cubes have differing
threshold coordinates.
"""
historic_forecasts, truths = filter_non_matching_cubes(
historic_forecasts, truths
)
threshold_coord = find_threshold_coordinate(historic_forecasts)
truth_threshold_coord = find_threshold_coordinate(truths)
if not threshold_coord == truth_threshold_coord:
msg = "Threshold coordinates differ between forecasts and truths."
raise ValueError(msg)
time_coord = historic_forecasts.coord("time")
check_forecast_consistency(historic_forecasts)
reliability_cube = self._create_reliability_table_cube(
historic_forecasts, threshold_coord
)
populate_bins_func = self._populate_reliability_bins
if | np.ma.is_masked(truths.data) | numpy.ma.is_masked |
'''
author: bg
goal:
type: Image Clustering DL learn <-- VGG Auto-encoder (AE) +
how: DCNN clustering - Local Aggregation by Zhuang et al (2019) + SegNet method of AE arch
ref: https://towardsdatascience.com/image-clustering-implementation-with-pytorch-587af1d14123
refactors:
'''
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import models
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import KMeans
from sklearn.preprocessing import normalize
from scipy.spatial.distance import cosine as cosine_distance
from tqdm import tqdm
import preprocess, utilz
from skimage import img_as_uint
### ============= 1. AutoEncoder ============
class EncoderVGG( nn.Module ):
## VGG16 based
channels_in = 3
channels_code = 512
def __init__(self, pretrained=True, n_channelz = 3, code_channels=512):
super(EncoderVGG, self).__init__()
self.channels_in = n_channelz
self.channels_code = code_channels
## setup vgg encoder - chuck classifier and avg pool layers < only keep feature extraction layers
vgg = models.vgg16_bn(pretrained=pretrained)
del vgg.classifier
del vgg.avgpool
self.encoder = self._encodify(vgg )
def _encodify(self, encoder):
## adjust: avail pooling indices from encoder.max_pool to the decoder unpooling layers
## the models.vgg16_bn does not generate the indices --> so reinitialilze them so tha they can do that
modulez = nn.ModuleList()
for mod in encoder.features:
if isinstance(mod, nn.MaxPool2d):
mod_add = nn.MaxPool2d(
kernel_size = mod.kernel_size,
stride = mod.stride,
padding = mod.padding,
return_indices = True
)
modulez.append( mod_add )
else:
modulez.append( mod )
return modulez
def forward(self, x):
## forward pass
pool_indicies = [] ## to be passed to decoder for unpooling
x_current = x
for mod_encode in self.encoder:
outie = mod_encode( x_current )
# pooling layers return two outputs; 2nd is the indices
if isinstance(outie, tuple) and len(outie) == 2:
x_current, idx = outie
pool_indicies.append( idx )
else:
x_current = outie
return x_current, pool_indicies
class DecoderVGG(nn.Module):
## sorta transposed version of the VGG16 network => looks like the encoder in reverse but not strictly so
channels_in = EncoderVGG.channels_code
channels_out = EncoderVGG.channels_in
def __init__(self, encoder):
super(DecoderVGG, self).__init__()
self.decoder = self._invert(encoder)
def _invert(self, encoder):
## decoder as a somewhat mirror of encoder
## BUT/AND: 1, 2D transpose convolution + 2. 2D unpooling
## 1. 2D transpose convolution + batch norm + activation
## convert encoder.conv to decoder.transposed conv
## 2. 2d unpool : conver encoder.pool to decoder.unpool
modulez = []
for mod in reversed( encoder ):
if isinstance(mod, nn.Conv2d):
kwargz = {'in_channels':mod.out_channels,
'out_channels':mod.in_channels,
'kernel_size':mod.kernel_size,
'stride': mod.stride,
'padding':mod.padding }
mod_trans = nn.ConvTranspose2d( **kwargz )
mod_norm = nn.BatchNorm2d( mod.in_channels )
mod_act = nn.ReLU(inplace=True)
modulez += [mod_trans, mod_norm, mod_act ]
elif isinstance(mod, nn.MaxPool2d):
kwargz = {'kernel_size': mod.kernel_size,
'stride':mod.stride,
'padding':mod.padding}
modulez.append( nn.MaxUnpool2d(**kwargz) )
## drop last norm and activation so that final output is from a conv with bias
modulez = modulez[:-2]
return nn.ModuleList( modulez )
def forward(self, x, pool_indices ):
## x is a tensor from encoder and pool_indices is the list from encoder
x_current = x
k_pool = 0
rev_pool_indices = list(reversed(pool_indices))
for mod in self.decoder:
## if @ unpooling make use of the indices for that layer
if isinstance(mod, nn.MaxUnpool2d):
x_current = mod(x_current, indices=rev_pool_indices[k_pool])
k_pool += 1
else:
x_current = mod(x_current)
return x_current
class AutoEncoderVGG(nn.Module): ## now combine the encoder and decoder
channels_in = EncoderVGG.channels_in
channels_out = DecoderVGG.channels_out
channels_code = EncoderVGG.channels_code
def __init__(self, pretrained=True, n_channelz=3,out_size=512):
super(AutoEncoderVGG, self).__init__( )
self.encoder = EncoderVGG(pretrained=pretrained, n_channelz=n_channelz, code_channels=out_size)
self.decoder = DecoderVGG(self.encoder.encoder)
print("Setup AE")
def forward(self, x):
x_, idx = self.encoder(x)
x_ = self.decoder(x_, idx)
return x_
def get_params(self):
return list(self.encoder.parameters() ) + list(self.decoder.parameters() )
''' NOTES:
- Use MSE to quantify diff --> nn.MSELoss as objective fx
'''
def train_autoencoder(model, X_data, n_epoch=3):
loss_fx = nn.MSELoss()
o_k = {'lr':0.001, 'momentum':0.9}
# paramz = model.encoder.parameters() + model.decoder.parameters()
paramz = model.get_params()
optimizer = torch.optim.SGD( paramz, **o_k)
# 1. train model
model.train()
for epoch in tqdm( range(n_epoch) ):
running_loss = 0
n_inst = 0
for x in X_data:
# zero the grads > compute loss > backprop
optimizer.zero_grad()
outie = model(x.float() )
loss = loss_fx(outie, x.float() )
loss.backward()
optimizer.step()
# update aggregates and reporting
running_loss += loss.item()
#running_loss = running_loss/batch_size
print(f"E {epoch}: loss {running_loss}")
def predict_autoencoder(model, x):
model.eval()
outie = model(x.float() )
O_ = []
## what is this bit doing ??
for img in outie:
img = img.detach()
O_.append( img )
yield torch.stack( O_ )
### ============= 2. Clustering ============
''' NOtes on clustering - Local Aggregation Loss method (Zhuang et al, 2019, arXiv:1903.12355v2)
- Images with similar xtics will have small L2 on encoded values, but encoded values are nonlinear --> not large deviations inter-cluster
- AutoEncoder == compress form high D to low D
- Now learn 'fundusness' and also values easily 'clusterable'
Local Aggregation Loss Method
- entropy based cost/objective function for clusters <-- p(cluster membership)
- TODO: review algz again
- implement custom loss function
- Memory Bank - arXiv:1903.12355v2
- a way to deal with fact that the gradient of the LA obj func depends on the gradiens of all codes of the dataset
- efficiently computing gradients during backprop << the tangled gradients of the codes w /r/t decoder params must be computed regardless
- b/c clustering @ comparing each element to all other elements in the dataset thus entablement
- memory bank trick is to treat other codes, other than those in minibatch/current, as constants.
- entanglement with derivatives of other codes thus goes away
- as long as approximated gradients are good enough to guide optimization towards a minimum, it is good
'''
class MemoryBank:
def __init__(self, n_vecs, dim_vecs, memory_mixing_rate):
self.dim_vecs = dim_vecs
self.vecs = np.array([ marsaglia(dim_vecs) for _ in range(n_vecs)])
self.memory_mixing_rate = memory_mixing_rate
self.mask_init = np.array([False]*n_vecs)
def update_memory(self, vectors, index):
if isinstance(index, int):
self.vecs[index] = self._update(vectors, self.vecs[index])
elif isinstance(index, np.ndarray):
for idx, vev in zip(index, vectors):
self.vecs[idx] = self._update(vec, self.vecs[idx] )
def mask(self, inds_int):
outie = []
for r in inds_int:
row_mask = np.full(self.vecs.shape[0], False)
row_mask[ r.astype(int) ] = True
outie.append( row_mask )
return np.array( outie )
def _update(self, vec_new, vec_recall):
return vec_new * self.memory_mixing_rate + vec_recall * (1. - self.memory_mixing_rate)
class LocalAggregationLoss(nn.Module):
def __init__(self, temperature, knns,
clustering_repeats, n_centroids,
memory_bank, kmeans_n_init=1,
nn_metric=cosine_distance, nn_metric_params={} ):
super(LocalAggregationLoss, self).__init__()
self.temperature = temperature
self.memory_bank = memory_bank
## 1. Distance: Efficiently compute nearest neighbors << set B in alg
self.neighbour_finder = NearestNeighbors(n_neighbors=knns+1,
algorithm='ball_tree',
metric=nn_metric,
metric_params=nn_metric_params)
## 2. Clusters: efficiently compute clusters << set C ini alg
self.clusterer = []
for k_clusterer in range(clustering_repeats):
self.clusterer.append(
KMeans(n_clusters=n_centroids,
init='random',
n_init=kmeans_n_init)
)
def forward(self, codes, indices):
assert codes.shape[0] == len(indices)
codes = codes.type( torch.DoubleTensor )
code_data = normalize( codes.detach().numpy(), axis=1)
##constants in the loss function; no gradients@backpass
self.memory_bank.update_memory(code_data, indices)
bg_neighbours = self._nearest_neighbours(code_data, indices)
close_neighbours = self._close_grouper(indices)
neighbour_inersect = self._intersecter(bg_neighbours, close_neighbours)
## compute pdf
v = F.normalize(codes, p=2, dim=1)
d1 = self._prob_density(v, bg_neighbours)
d2 = self._prob_density(v, neighbour_inersect)
return torch.sum(torch.log(d1) - torch.log(d2))/codes.shape[0]
def _nearest_neighbours(self, codes_data, indices):
self.neighbour_finder.fit(self.memory_bank.vectors )
indices_nearest = self.neighbour_finder.kneighbours(codes_data, return_distance=False)
return self.memory_bank.mask( indices_nearest )
def _close_grouper(self, indices):
## ascertain
memberships = [[]]*len(indices)
for clusterer in self.clusterer:
clusterer.fit( self.memory_bank.vectors )
for k_idx, cluster_idx in enumerate(clusterer.labels_[indices]) :
other_members = np.where( clusterer.labels_ == cluster_idx)[0]
other_members_union = np.union1d(memberships[k_idx], other_members)
memberships[k_idx] = other_members_union.astype(int)
return self.memory_bank.mask( np.array(memberships, dtype=object ) )
def _intersecter(self, n1, n2):
return np.array([
[v1 and v2 for v1, v2 in zip(n1_x, n2_x)]
for n1_x, n2_x in zip(n1, n2 ) ])
def _prob_density(self, codes, indices):
## unormalized differentiable probability densities
ragged = len(set([np.count_nonzero(idx) for idx in indices ] )) != 1
# In case the subsets of memory vectors are all of the same size, broadcasting can be used and the
# batch dimension is handled concisely. This will always be true for the k-nearest neighbour density
if not ragged:
vals = torch.tensor([np.compress(ind, self.memory_bank.vectors, axis=0) for ind in indices],
requires_grad=False)
v_dots = torch.matmul(vals, codes.unsqueeze(-1))
exp_values = torch.exp(torch.div(v_dots, self.temperature))
pdensity = torch.sum(exp_values, dim=1).squeeze(-1)
# Broadcasting not possible if the subsets of memory vectors are of different size, so then manually loop
# over the batch dimension and stack results
else:
xx_container = []
for k_item in range(codes.size(0)):
vals = torch.tensor(np.compress(indices[k_item], self.memory_bank.vectors, axis=0),
requires_grad=False)
v_dots_prime = torch.mv(vals, codes[k_item])
exp_values_prime = torch.exp(torch.div(v_dots_prime, self.temperature))
xx_prime = torch.sum(exp_values_prime, dim=0)
xx_container.append(xx_prime)
pdensity = torch.stack(xx_container, dim=0)
return pdensity
''' Combining Envoder and LALoss
'''
def combine_run_LAClustering(X_data, merger_type='mean', n_vecs=5400, knns=500, n_centroids=600,n_epochs=3):
model = EncoderVGGMerged(merger_type=merger_type)
memory_bank = MemoryBank(n_vecs=n_vecs, dim_vecs=model.channels_code,
memory_mixing_rate=0.5)
memory_bank.vecs = normalize( model.eval_codes_for_(X_data), axis=1)
loss_fx = LocalAggregationLoss(memory_bank=memory_bank,
temperature=0.07,
knns = knns,
clustering_repeats=6,
n_centroids=n_centroids)
o_k = {'lr':0.001, 'momentum':0.9}
paramz = model.get_params() ## parameters
optimizer = torch.optim.SGD( paramz, **o_k)
# 1. train model
model.train()
for epoch in tqdm( range(n_epoch) ):
running_loss = 0
n_inst = 0
for x in X_data:
# zero the grads > compute loss > backprop
optimizer.zero_grad()
outie = model(x.float() )
loss = loss_fx(outie, x.float() )
loss.backward()
optimizer.step()
# update aggregates and reporting
running_loss += loss.item()
#running_loss = running_loss/batch_size
print(f"E {epoch}: loss {running_loss}")
class EncoderVGGMerged(EncoderVGG):
def __init__(self, merger_type='mean', pretrained=True ):
super(EncoderVGGMerged, self).__init__(pretrained=pretrained)
if merger_type is None:
self.code_post_process = lambda x: x
self.code_post_process_kwargz = {}
elif merger_type == 'mean':
self.code_post_process = torch.mean
self.code_post_process_kwargz = {'dim':(-2, -1)}
if merger_type == 'flatten':
self.code_post_process = torch.flatten
self.code_post_process_kwargz = {'start_dim':1, 'end_dim':-1}
else:
raise ValueError("Unknown merger type for the encoder {}".format(merger_type) )
def forward(self, x):
x_current, _ = super().forward(x)
x_code = self.code_post_process(x_current, **self.code_post_process_kwargz)
return x_code
## -==========================
if __name__ == "__main__":
print("****** STARTING ******")
img_dim = (1, 3,224, 224)
fetch_image = lambda x: utilz.Image.fetch_and_resize_image(x, img_dim).astype('f')
# cnn = AutoEncoderVGG(pretrained=True)
# print(cnn)
# X_data = [ torch.tensor(np.random.rand( *img_dim ).astype('f') ) for i in range(5) ]
fdir = "/mnt/externz/zRepoz/datasets/fundus/stare/"
filez = [ f'{fdir}/im0012.ppm', f"{fdir}/im0232.ppm"]
_IMAGEZ = [fetch_image(x) for x in filez]
# X_data = [torch.tensor( x ) for x in _IMAGEZ ]
# train_autoencoder(cnn, X_data)
# x_pred = torch.tensor( np.random.rand( *img_dim ).astype('f') )
# pred = list(predict_autoencoder(cnn, x_pred))
# print( len(pred) , type(pred[0]))
# reformat_output_img = lambda x: x.reshape(224,224,3) # img_as_uint( )
# utilz.Image.plot_images_list( [ reformat_output_img(x) for x in X_data], nc=len(X_data) )
# utilz.Image.plot_images_list([ reformat_output_img(x) for x in pred], nc=len(pred) )
# print("****** FIN ******")
print("****** KMEANS *****")
import cv2
import os
from glob import glob
import matplotlib.pyplot as plt
# img_dim = (224, 224, 3)
# img_dim = (32, 32, 3)
# def fetch_image(x):
# # utilz.Image.fetch_and_resize_image(x, img_dim).astype('f')
# o = cv2.imread( x )
# o = cv2.cvtColor(o, cv2.COLOR_BGR2RGB)
# o = np.float32( o.reshape((-1, 3)) )
# return o
# fdir = "/mnt/externz/zRepoz/datasets/fundus/stare/"
# filez = [ f'{fdir}/im0012.ppm', f"{fdir}/im0232.ppm"] ## glob( f"{fdir}/*.ppm")
# #flattent into 3 color values per pixed 2D array
# _IMAGEZ = [fetch_image(x) for x in filez]
np.random.seed(1234)
n_clusters = 4
_N = 10000
_FILEZ = []
i = 0
for f in glob( f"{fdir}/*.ppm"):
_FILEZ.append( f )
i+=1
if i >= _N:
break
fetch_name = lambda x: (os.path.basename(x).split(".")[0] , np.random.randint(0, n_clusters) )
# X_data = np.array( [ fetch_image(x).reshape( (-1, 3) ) for x in _FILEZ ] ) ##_IMAGEZ #
X_data_fnames = np.array( [ ((i%10 + i//33),*fetch_name(x)) for i, x in enumerate(_FILEZ) ] ) ##_IMAGEZ #
## ==== Open CV K-Means ====
# stopping_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)
# _, labels, (centers ) = cv2.kmeans(X_data, n_clusters, None,
# stopping_criteria, 10, cv2.KMEANS_RANDOM_CENTERS )
# centers = np.uint8( centers )
# labels =labels.flatten()
# ## show clusters
# def fetch_cluster(i):
# # dat = np.array([x[0] for x in X_data_fnames] )
# # return dat[ dat == i ]
# o_ = []
# for d in X_data_fnames:
# if int(d[2]) == i:
# o_.append( (d[0], d[2]) )
# # print( f"cluster {i}: {o_}")
# return np.array(o_)
# # clusters = [ X_data_fnames[0][labels == i] for i in range(n_clusters ) ]
# clusters = [fetch_cluster(i) for i in range(n_clusters ) ]
# colorz = ('r', 'b', 'g', 'black')
# numeric = lambda X : [ int(x) for x in X]
# for clust, colr in zip(clusters, colorz[:len(clusters) ] ):
# plt.scatter( numeric(clust[0]), numeric(clust[1]), c=colr)
# plt.show()
# print( centers.shape , labels.shape )
# # print(labels)
# ## show centers
# img_centers = [x.reshape(img_dim) for x in centers]
# utilz.Image.plot_images_list( img_centers, nc=len(img_centers))
# ## segmenting using the centroids
# ## covert all pixels to the color of the centroids
# segmented_imagez = [ c[l].reshape( img_dim ) for c, l in zip(centers, labels)]
# utilz.Image.plot_images_list( segmented_imagez, nc=len(segmented_imagez))
### ==== K-NN clutering
print("****** K-NN <<<< is supervised *****")
# from sklearn.neighbors import KNeighborsClassifier
# X_data = np.array( [ fetch_image(x).flatten() for x in _FILEZ ] )
# import cv2
# def extract_color_hist(img, bins=(8,8,8)):
# hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# hist = cv2.calcHist([hsv], [0,1,2], None, bins, [0, 180, 0, 256, 0, 256])
# cv2.normalize(hist, hist) ## hist = cv2.normalize(hist)
# return hist.flatten()
# X_histz = [extract_color_hist(x.reshape(img_dim)) for x in X_data]
# model = KNeighborsClassifier(n_neighbors=n_clusters )
# model.fit(X_data)
# # print(">>>>ACCURACY**: ", model.score())
# model = KNeighborsClassifier(n_neighbors=n_clusters )
# model.fit(X_histz)
# X_data = [torch.tensor( x ) for x in _IMAGEZ ]
# train_autoencoder(cnn, X_data)
print("****** K-MEANS on VGG encoded data *****")
reformat_output_img = lambda x: x.reshape(224,224,3) # img_as_uint( )
X_data = [torch.tensor( fetch_image(x) ) for x in _FILEZ ]
print("1. Data Loaded")
cnn = AutoEncoderVGG(pretrained=True)
train_autoencoder(cnn, X_data)
print("2. Encoder trained")
X_encoded = [ list(predict_autoencoder(cnn, x)) for x in X_data]
print( len(X_encoded) , type(X_encoded[0]))
print("3. data encoded")
# print(X_encoded[0])
## ==== Open CV K-Means ====
X_data2 = np.array( [ | np.dstack(x) | numpy.dstack |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from soma import aims, aimsalgo
import numpy
import os
import six
import sys
from six.moves import range
def mergeLabelsFromTexture(tex, labels_list, new_label):
"""
inputs:
tex: labeled texture ( from FreeSurfer or an other )
labels_list, new_label: you can overwrite numbers ( labels_list ) with your own number ( new_label )
ouput:
otex: labeled texture with merged regions
"""
otex = aims.TimeTexture_S16()
tex_ar = tex[0].arraydata()
otex[0].assign(tex_ar)
otex_ar = otex[0].arraydata()
for i in labels_list:
otex_ar[otex_ar == int(i)] = new_label
return otex
def extractLabelsFromTexture(tex, labels_list, new_label):
"""
inputs:
tex: labeled texture ( from FreeSurfer or an other )
labels_list, new_label: you can overwrite numbers ( labels_list ) with your own number ( new_label )
output:
otex: labeled texture with merged regions only
"""
otex = aims.TimeTexture_S16()
otex[0].reserve(tex[0].nItem())
for i in range(tex[0].nItem()):
otex[0].push_back(0)
tex_ar = tex[0].arraydata()
otex_ar = otex[0].arraydata()
for i in labels_list:
otex_ar[tex_ar == int(i)] = new_label
return otex
def connectedComponents(mesh, tex, areas_mode=0):
"""
Parameters
----------
mesh
tex: aimsTimeTexture_S16
(one time step) labeled between 1 and LabelsNb, background = 0,
ignored_vertex = -1.
areas_mode:
if = 1: computing area measures of the connected components,
if = 0: no measure (by default).
Returns
-------
step_cc: connectedComponentTex: aimsTimeTexture_S16
time step = LabelsNb, for each time step (label in the tex), texture of
the connected components corresponding to this label (background = -1,
and connected components = values between 1 and nb_cc).
areas_measure: python dictionary
areas_measures[label] = [16.5, 6.0]
(numpy array) if label (in tex) has two connected Components 1 and 2
with area = 16.5 and 6.0 respectively, areas are in square mm
"""
# create a numpy array from aims object
dtex = tex[0].arraydata()
# number of vertices
nbvert = len(mesh.vertex())
# test for homogeneity dimension
if len(dtex) != nbvert:
raise exceptions.ValueError(
'mesh and texture have not the same dimension...')
# list of existing labels
labels = numpy.unique(dtex)
# remove a specific elements
if 0 in labels:
numpy.delete(labels, numpy.where(labels == 0))
if -1 in labels:
numpy.delete(labels, numpy.where(labels == -1))
otex = aims.TimeTexture_S16()
step_cc = aims.TimeTexture_S16()
if areas_mode:
areas_measures = {}
for label in labels:
otex[0].assign((dtex == label))
label_cc = aimsalgo.AimsMeshLabelConnectedComponent(mesh, otex, 0, 0)
# transform aims.TimeTexture_S16 to numpy array
label_cc_np = label_cc[0].arraydata()
step_cc[label-1].assign(label_cc_np)
if areas_mode:
nb_cc = label_cc_np.max()
areas_measures[label] = numpy.zeros(nb_cc)
for c in range(nb_cc):
# extracts a sub-mesh defined by a texture label value (c+1)
mesh_cc = aims.SurfaceManip.meshExtract(mesh, label_cc, c+1)[0]
# surface area of a triangular mesh (in mm2)
area_cc = aims.SurfaceManip.meshArea(mesh_cc)
areas_measures[label][c] = area_cc
if areas_mode:
return step_cc, areas_measures
else:
return step_cc
def remove_non_principal_connected_components(mesh, tex, trash_label):
"""Keep only the largest connected component in each label, for a label
texture.
Parameters
----------
mesh:
tex: label texture (S16, int)
trash_label: value to replace non-principal components
Returns
-------
out_tex: label texture
"""
t0 = tex[0].arraydata()
t0 += 1 # 0 is a real label
conn_comp, areas = connectedComponents(mesh, tex, areas_mode=True)
t0 -= 1
dtype = tex[0].arraydata().dtype
out_tex = aims.TimeTexture(dtype=dtype)
out_tex[0].assign(numpy.zeros(tex[0].size(), dtype=dtype))
out_arr = out_tex[0].arraydata()
out_arr[:] = trash_label
for label in conn_comp.keys():
comps = conn_comp[label]
largest = numpy.argmax(areas[label + 1]) + 1
comp_arr = comps.arraydata()
out_arr[comp_arr==largest] = label
return out_tex
def meshDiceIndex(mesh, texture1, texture2, timestep1=0,
timestep2=0, labels_table1=None, labels_table2=None):
"""DICE index calculation between two sets of regions defined by label textures on a common mesh.
texture1, texture2: aims.TimeTexture instances, should be int (labels).
timestep1, timestep2: timestep to use in texture1 and texture2.
labels_table1, labels_table2: optional labels translation tables (dicts or arrays) to translate values of texture1 and/or texture2.
return
"""
tex1 = texture1[timestep1].arraydata()
tex2 = texture2[timestep2].arraydata()
if labels_table1 is not None:
tex1 = numpy.array([labels_table1[x] for x in tex1])
if labels_table2 is not None:
tex2 = numpy.array([labels_table2[x] for x in tex2])
regions = max(numpy.max(tex1), numpy.max(tex2)) + 1
areas1 = numpy.zeros((regions, ))
areas2 = numpy.zeros((regions, ))
inter = numpy.zeros((regions, ))
vertices = mesh.vertex()
polygons = mesh.polygon()
for poly in polygons:
p = vertices[poly[0]]
u1 = vertices[poly[1]] - p
u2 = vertices[poly[2]] - p
area = u1.crossed(u2).norm() / 6 # 1/3 area for each vertex
l1 = tex1[poly[0]]
l2 = tex2[poly[0]]
areas1[l1] += area
areas2[l2] += area
if l1 == l2: # intersection
inter[l1] += area
l1 = tex1[poly[1]]
l2 = tex2[poly[1]]
areas1[l1] += area
areas2[l2] += area
if l1 == l2: # intersection
inter[l1] += area
l1 = tex1[poly[2]]
l2 = tex2[poly[2]]
areas1[l1] += area
areas2[l2] += area
if l1 == l2: # intersection
inter[l1] += area
dice = inter * 2 / (areas1 + areas2)
return dice, areas1, areas2, inter
def average_texture(output, inputs):
"""
Create average gyri texture from a group of subject.
"""
# read textures
tex = []
for fname in inputs:
tex.append(aims.read(fname))
# make a 2D array from a series of textures
ar = numpy.vstack([t[0].arraydata() for t in tex])
# replace the negative values by positive integers
if len(ar[ar == -1]) != 0:
tmp_label = numpy.max(ar) + 1
ar[ar == -1] = tmp_label
# count occurrences
N = numpy.max(ar)
def bin_resized(x):
y = numpy.bincount(x)
y.resize(N + 1) # labels: 1 to 72
return y
cnt = numpy.apply_along_axis(bin_resized, 0, ar)
# get max of occurrences in each vertex
maj = numpy.argmax(cnt, axis=0)
# to keep the same labels, replace (max + 1) by -1
if tmp_label:
maj[maj == tmp_label] = -1
# make an aims texture from result (numpy array)
otex = aims.TimeTexture('S16')
otex[0].assign(maj)
otex.header().update(tex[0].header())
aims.write(otex, output)
def nomenclature_to_colormap(hierarchy, labels_list, as_float=True,
default_color=[0.3, 0.6, 1., 1.]):
"""
Make a colormap from labels and colors of a nomenclature (hierarchy),
following a labels_list order.
Parameters
----------
hierarchy: Hierarchy object
nomenclature
labels_list: list of strings
labels with order. The returned colormap will follow this ordering.
as_float: bool (optional, default: True)
if True, colors will be float values in the [0-1] range.
If False, they will be int values in the [0-255] range.
default_color: list (4 floats) (optional)
Color used for labels not found in the nomenclature. It is given as
floats ([0-1] range).
Returns
-------
colormap: numpy array
array of colors (4 float values in [0-1] range)
"""
colors = []
for label in labels_list:
try:
color = hierarchy.find_color(label, default_color=default_color)
color = list(color)
if len(color) < 4:
color.append(1.)
except:
color = default_color
if not as_float:
color = [int(c*255.9) for c in color]
colors.append(list(color))
return numpy.array(colors)
def vertex_texture_to_polygon_texture(mesh, tex, allow_cut=False):
"""Make a "polygon texture" from a vartex-based label texture.
A polygon texture has a value for each polygon.
For a given polygon the value is taken as the majority of values on its
vertices. If an absolute majority cannot be obtained, the mesh polygons may
be cut to avoid losing precision. This is done if allow_cut is True.
When allow_cut is False, the returned value is the polygon texture.
It may work on meshes of any polygon size (triangles, quads, segments...)
When allow_cut is True, the returned value is a tuple:
* polygon texture
* new mesh with possibly split triangles
It only works for meshes of triangles.
"""
dtype = tex[list(tex.keys())[0]].arraydata().dtype
poly_tex = aims.TimeTexture(dtype=dtype)
if allow_cut:
out_mesh = mesh.__class__(mesh)
for t, tex0 in six.iteritems(tex):
tdata = tex0.arraydata()
ptex0 = poly_tex[t]
ptex0.resize(len(mesh.polygon(t)))
poly_labels = ptex0.arraydata()
if allow_cut:
added_vert = {}
vertex = out_mesh.vertex(t)
polygon = out_mesh.polygon(t)
for p, poly in enumerate(mesh.polygon(t)):
D = len(poly)
labels = [tdata[v] for v in poly]
ulabels = numpy.unique(labels)
ilabels = [numpy.where(labels==u)[0] for u in ulabels]
nlabels = [len(u) for u in ilabels]
if not allow_cut:
maj = ulabels[numpy.argmax(nlabels)]
poly_labels[p] = maj
else:
# WARNING this only works for triangles.
if len(ulabels) == 1:
poly_labels[p] = ulabels[0]
elif len(ulabels) == 2:
# cut off one vertex
iother = labels.index(ulabels[numpy.argmin(nlabels)])
ikeep = [i for i in range(D) if i!=iother]
iv0 = poly[iother]
iv1 = poly[(iother-1) % D]
ind = (min((iv0, iv1)), max((iv0, iv1)))
ivn1 = added_vert.get(ind)
if ivn1 is None:
v1 = (vertex[iv0] + vertex[iv1]) * 0.5
ivn1 = len(vertex)
vertex.append(v1)
added_vert[ind] = ivn1
iv2 = poly[(iother+1) % D]
ind = (min((iv0, iv2)), max((iv0, iv2)))
ivn2 = added_vert.get(ind)
if ivn2 is None:
v2 = (vertex[iv0] + vertex[iv2]) * 0.5
ivn2 = len(vertex)
vertex.append(v2)
added_vert[ind] = ivn2
polygon[p][(iother-1) % D] = ivn1
polygon[p][(iother+1) % D] = ivn2
polygon.append(poly.__class__(iv1, ivn1, ivn2))
polygon.append(poly.__class__(ivn2, iv2, iv1))
poly_labels[p] = tdata[iv0]
ptex0.append(tdata[iv1])
ptex0.append(tdata[iv2])
else:
# cut in 3 regions
bs0 = (min(poly[0], poly[1]), max(poly[0], poly[1]))
bs1 = (min(poly[1], poly[2]), max(poly[1], poly[2]))
bs2 = (min(poly[2], poly[0]), max(poly[2], poly[0]))
bi0 = added_vert.get(bs0)
if bi0 is None:
v0 = (vertex[poly[0]] + vertex[poly[1]]) * 0.5
bi0 = len(vertex)
added_vert[bs0] = bi0
vertex.append(v0)
bi1 = added_vert.get(bs1)
if bi1 is None:
v1 = (vertex[poly[1]] + vertex[poly[2]]) * 0.5
bi1 = len(vertex)
added_vert[bs1] = bi1
vertex.append(v1)
bi2 = added_vert.get(bs2)
if bi2 is None:
v2 = (vertex[poly[2]] + vertex[poly[0]]) * 0.5
bi2 = len(vertex)
added_vert[bs2] = bi2
vertex.append(v2)
bi3 = len(vertex)
v3 = (vertex[poly[0]] + vertex[poly[1]]
+ vertex[poly[2]]) / 3.
vertex.append(v3)
polygon[p][1] = bi0
polygon[p][2] = bi3
polygon.append(poly.__class__(bi3, bi2, poly[0]))
polygon.append(poly.__class__(poly[1], bi1, bi3))
polygon.append(poly.__class__(poly[1], bi3, bi0))
polygon.append(poly.__class__(poly[2], bi2, bi3))
polygon.append(poly.__class__(poly[2], bi3, bi1))
poly_labels[p] = labels[0]
ptex0.append(labels[0])
ptex0.append(labels[1])
ptex0.append(labels[1])
ptex0.append(labels[2])
ptex0.append(labels[2])
if allow_cut:
return (poly_tex, out_mesh)
else:
return poly_tex
def mesh_to_polygon_textured_mesh(mesh, poly_tex):
"""
"""
out_mesh = mesh.__class__()
out_tex = poly_tex.__class__()
polygons = mesh.polygon()
dtype = poly_tex[list(poly_tex.keys())[0]].arraydata().dtype
for t, tex0 in six.iteritems(poly_tex):
print('t:', t)
overt = out_mesh.vertex(t)
overt.assign(mesh.vertex())
onorm = out_mesh.normal(t)
onorm.assign(mesh.vertex())
opoly = out_mesh.polygon(t)
opoly.assign(mesh.polygon())
otex = out_tex[t]
otex.assign(numpy.zeros(mesh.vertex().size(), dtype=dtype) - 1)
#otex_arr = otex.arraydata()
tex_arr = tex0.arraydata()
added = {}
for p in range(len(mesh.polygon())):
plabel = tex_arr[p]
poly = opoly[p]
for i, v in enumerate(poly):
if otex[v] < 0:
otex[v] = plabel
elif otex[v] != plabel:
old_new = added.get((v, plabel))
if old_new:
# already added, just change triangle
poly[i] = old_new
else:
# add a new vertex, and change triangle
vb = overt.size()
overt.append(overt[v])
poly[i] = vb
otex.data().append(plabel)
added[(v, plabel)] = vb
#otex_arr = otex.arraydata()
out_mesh.updateNormals()
return out_mesh, out_tex
def change_wrong_labels(cc_label, label, gyri_tex, mesh_neighbors_vector,
cc_tex_label):
"""After a study of its neighbors, wrong label is replaced by the
correct number.
Parameters
----------
cc_label: label of connected component in cc_tex_label
label: label of associated vertices in gyri texture
gyri_tex (aims time texture S16): gyri texture
mesh_neighbors_vector : aims.SurfaceManip.surfaceNeighbours(mesh)
cc_tex_label : texture representing connected components of label
Returns
-------
gyri_tex (aims time texture S16): new gyri_tex texture,
without isolated vertex.
winner_label: the correct number.
"""
indexes = numpy.where(cc_tex_label == cc_label)[0]
neighbor_labels = []
print('Nb of wrong indexes: ', indexes.size)
for i in indexes:
for n in mesh_neighbors_vector[i]:
n_label = gyri_tex[0][n]
if n_label != label:
neighbor_labels.append(n_label)
v_labels = numpy.unique(neighbor_labels)
max_count = 0
winnner_label = -1
for l in v_labels:
nb_v_labels = neighbor_labels.count(l)
if nb_v_labels > max_count:
print('Number of neighbor labels: ', nb_v_labels, 'for', l)
winnner_label = l
max_count = nb_v_labels
for i in indexes:
gyri_tex[0][i] = winnner_label
return gyri_tex, winnner_label
def find_wrong_labels(mesh, gyriTex):
"""
Parameters
----------
mesh:
gyriTex: gyri texture
Returns
-------
wrong_labels: list of wrong labels
[cctex: connectedComponentTex: aimsTimeTexture_S16,
time step = LabelsNb, for each time step (label in the tex),
texture of the connected components corresponding to this label
(background = -1, and connected components = values between 1 and ccNb)
areas_measures = python dictionary,
areas_measures[label] = [16.5, 6.0] (numpy array)
if label (in tex) has two connected Components 1 and 2 with
area = 16.5 and 6.0 respectively, areas are in square mm]
"""
meshNeighborsVector = aims.SurfaceManip.surfaceNeighbours(mesh)
cctex, areas_measures = connectedComponents(
mesh, gyriTex, areas_mode=1)
wrong_labels = []
for label in areas_measures.keys():
if areas_measures[label].size != 1:
wrong_labels.append(label)
return wrong_labels
def clean_gyri_texture(mesh, gyri_tex):
"""Cleaning a gyri texture by using connected components.
Parameters
----------
mesh (aims time surface):
white mesh associated to gyri_tex
gyri_tex (aims time texture S16):
gyri texture as full FreeSurfer parcellation.
Return
------
gyri_tex (aims time texture S16):
new gyri texture, without isolated vertex.
"""
# get a list of neighbouring nodes from a surface mesh
mesh_neighbors_vector = aims.SurfaceManip.surfaceNeighbours(mesh)
cc_tex, areas_measures = connectedComponents(
mesh, gyri_tex, areas_mode=1)
wrong_labels = []
for label in areas_measures.keys():
if areas_measures[label].size != 1:
wrong_labels.append(label)
for label in wrong_labels:
cc_tex_label = cc_tex[label - 1].arraydata()
areas_measures_cc = areas_measures[label]
cc_nb = areas_measures_cc.size
for l in range(1, cc_nb):
gyri_tex, win = change_wrong_labels(
l + 1, label, gyri_tex, mesh_neighbors_vector, cc_tex_label)
return gyri_tex
def set_texture_colormap(texture, colormap, cmap_name='custom',
tex_max=None, tex_min=None, tex_index=0,
col_mapping='all'):
""" Set a colormap in a texture object header.
The texture object may be any kind of textured object: a TimeTexture
instance, or a Volume.
Parameters
----------
texture: TimeTexture, Volume...
The texture object should have a header() method.
colormap: array, Volume, or filename
The colormap may be provided as RGB or RGBA, and as an aims Volume
object, or a numpy array, or as an image filename. It should be a 1D
colormap (for now at least).
cmap_name: str (optional)
name of the colormap to be used in Anatomist.
tex_max: float (optional)
Max texture value to be mapped to the colormap bounds. It is used to
scale the max value of the colormap in Anatomist. If not specified,
the texture or volume max will be looked for in the texture object. Used
only if col_mapping is "one".
tex_min: float (optional)
Min texture value to be mapped to the colormap bounds. It is used to
scale the max value of the colormap in Anatomist. If not specified,
the texture or volume max will be looked for in the texture object. Used
only if col_mapping is "one".
tex_index: int (optional)
Texture index in the textured object
col_mapping: str or None (optional)
"all": map the full texture range to the colormap bounds (default);
"one": one-to-one mapping between colors and values (int values);
"none" or None: don't force any mapping - anatomist will choose to use a
histogram if needed.
"""
header = texture.header()
if isinstance(colormap, str):
# colormap is a filename
colormap = aims.read(colormap)
if hasattr(colormap, 'np'):
cmap = colormap['v']
else:
# assume already a np array nx3 or nx4
cmap = colormap
if cmap.shape[-1] ==4:
mode = 'rgba'
else:
mode = 'rgb'
cols = cmap.flatten().tolist()
nmax = cmap.shape[0]
params = {}
if col_mapping not in (None, "none"):
if col_mapping == "all":
params['min'] = 0.
params['max'] = 1.
elif col_mapping == "one":
if tex_max is None:
if hasattr(texture, 'max'):
# volume ?
tex_max = texture.max()
elif hasattr(texture, 'np'):
# volume also ?
tex_max = | numpy.max(texture.np) | numpy.max |
import unittest
import numpy as np
from sotodlib import core
class TestAxisManager(unittest.TestCase):
# Basic behavior of each axis type.
def test_100_index(self):
a1 = np.zeros(100)
a1[10] = 1.
aman = core.AxisManager(core.IndexAxis('samps', len(a1)))
aman.wrap('a1', a1, [(0, 'samps')])
aman.restrict('samps', (10, 30))
self.assertNotEqual(aman.a1[0], 0.)
self.assertEqual(len(aman.a1), 20)
def test_110_offset(self):
a1 = | np.zeros(100) | numpy.zeros |
import cv2
import numpy as np
## aug functions
def identity_func(img):
return img
def autocontrast_func(img, cutoff=0):
'''
same output as PIL.ImageOps.autocontrast
'''
n_bins = 256
def tune_channel(ch):
n = ch.size
cut = cutoff * n // 100
if cut == 0:
high, low = ch.max(), ch.min()
else:
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
low = np.argwhere( | np.cumsum(hist) | numpy.cumsum |
import os
import numpy as np
import json
from ._base_dataset import _BaseDataset
from ..utils import TrackEvalException
from .. import utils
from .. import _timing
class YouTubeVIS(_BaseDataset):
"""Dataset class for YouTubeVIS tracking"""
@staticmethod
def get_default_dataset_config():
"""Default class config values"""
code_path = utils.get_code_path()
default_config = {
'GT_FOLDER': os.path.join(code_path, 'data/gt/youtube_vis/'), # Location of GT data
'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/youtube_vis/'),
# Trackers location
'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)
'CLASSES_TO_EVAL': None, # Classes to eval (if None, all classes)
'SPLIT_TO_EVAL': 'train_sub_split', # Valid: 'train', 'val', 'train_sub_split'
'PRINT_CONFIG': True, # Whether to print current config
'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL
}
return default_config
def __init__(self, config=None):
"""Initialise dataset, checking that all required files are present"""
super().__init__()
# Fill non-given config values with defaults
self.config = utils.init_config(config, self.get_default_dataset_config(), self.get_name())
self.gt_fol = self.config['GT_FOLDER'] + 'youtube_vis_' + self.config['SPLIT_TO_EVAL']
self.tracker_fol = self.config['TRACKERS_FOLDER'] + 'youtube_vis_' + self.config['SPLIT_TO_EVAL']
self.use_super_categories = False
self.should_classes_combine = True
self.output_fol = self.config['OUTPUT_FOLDER']
if self.output_fol is None:
self.output_fol = self.tracker_fol
self.output_sub_fol = self.config['OUTPUT_SUB_FOLDER']
self.tracker_sub_fol = self.config['TRACKER_SUB_FOLDER']
if not os.path.exists(self.gt_fol):
print("GT folder not found: " + self.gt_fol)
raise TrackEvalException("GT folder not found: " + os.path.basename(self.gt_fol))
gt_dir_files = [file for file in os.listdir(self.gt_fol) if file.endswith('.json')]
if len(gt_dir_files) != 1:
raise TrackEvalException(self.gt_fol + ' does not contain exactly one json file.')
with open(os.path.join(self.gt_fol, gt_dir_files[0])) as f:
self.gt_data = json.load(f)
# Get classes to eval
self.valid_classes = [cls['name'] for cls in self.gt_data['categories']]
cls_name_to_cls_id_map = {cls['name']: cls['id'] for cls in self.gt_data['categories']}
if self.config['CLASSES_TO_EVAL']:
self.class_list = [cls.lower() if cls.lower() in self.valid_classes else None
for cls in self.config['CLASSES_TO_EVAL']]
if not all(self.class_list):
raise TrackEvalException('Attempted to evaluate an invalid class. Only classes ' +
', '.join(self.valid_classes) + ' are valid.')
else:
self.class_list = [cls['name'] for cls in self.gt_data['categories']]
self.class_name_to_class_id = {k: v for k, v in cls_name_to_cls_id_map.items() if k in self.class_list}
# Get sequences to eval and check gt files exist
self.seq_list = [vid['file_names'][0].split('/')[0] for vid in self.gt_data['videos']]
self.seq_name_to_seq_id = {vid['file_names'][0].split('/')[0]: vid['id'] for vid in self.gt_data['videos']}
self.seq_lengths = {vid['id']: len(vid['file_names']) for vid in self.gt_data['videos']}
# encode masks and compute track areas
self._prepare_gt_annotations()
# Get trackers to eval
if self.config['TRACKERS_TO_EVAL'] is None:
self.tracker_list = os.listdir(self.tracker_fol)
else:
self.tracker_list = self.config['TRACKERS_TO_EVAL']
if self.config['TRACKER_DISPLAY_NAMES'] is None:
self.tracker_to_disp = dict(zip(self.tracker_list, self.tracker_list))
elif (self.config['TRACKERS_TO_EVAL'] is not None) and (
len(self.config['TRACKER_DISPLAY_NAMES']) == len(self.tracker_list)):
self.tracker_to_disp = dict(zip(self.tracker_list, self.config['TRACKER_DISPLAY_NAMES']))
else:
raise TrackEvalException('List of tracker files and tracker display names do not match.')
# counter for globally unique track IDs
self.global_tid_counter = 0
self.tracker_data = dict()
for tracker in self.tracker_list:
tracker_dir_path = os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol)
tr_dir_files = [file for file in os.listdir(tracker_dir_path) if file.endswith('.json')]
if len(tr_dir_files) != 1:
raise TrackEvalException(tracker_dir_path + ' does not contain exactly one json file.')
with open(os.path.join(tracker_dir_path, tr_dir_files[0])) as f:
curr_data = json.load(f)
self.tracker_data[tracker] = curr_data
def get_display_name(self, tracker):
return self.tracker_to_disp[tracker]
def _load_raw_file(self, tracker, seq, is_gt):
"""Load a file (gt or tracker) in the YouTubeVIS format
If is_gt, this returns a dict which contains the fields:
[gt_ids, gt_classes] : list (for each timestep) of 1D NDArrays (for each det).
[gt_dets]: list (for each timestep) of lists of detections.
[classes_to_gt_tracks]: dictionary with class values as keys and list of dictionaries (with frame indices as
keys and corresponding segmentations as values) for each track
[classes_to_gt_track_ids, classes_to_gt_track_areas, classes_to_gt_track_iscrowd]: dictionary with class values
as keys and lists (for each track) as values
if not is_gt, this returns a dict which contains the fields:
[tracker_ids, tracker_classes, tracker_confidences] : list (for each timestep) of 1D NDArrays (for each det).
[tracker_dets]: list (for each timestep) of lists of detections.
[classes_to_dt_tracks]: dictionary with class values as keys and list of dictionaries (with frame indices as
keys and corresponding segmentations as values) for each track
[classes_to_dt_track_ids, classes_to_dt_track_areas]: dictionary with class values as keys and lists as values
[classes_to_dt_track_scores]: dictionary with class values as keys and 1D numpy arrays as values
"""
# select sequence tracks
seq_id = self.seq_name_to_seq_id[seq]
if is_gt:
tracks = [ann for ann in self.gt_data['annotations'] if ann['video_id'] == seq_id]
else:
tracks = self._get_tracker_seq_tracks(tracker, seq_id)
# Convert data to required format
num_timesteps = self.seq_lengths[seq_id]
data_keys = ['ids', 'classes', 'dets']
if not is_gt:
data_keys += ['tracker_confidences']
raw_data = {key: [None] * num_timesteps for key in data_keys}
for t in range(num_timesteps):
raw_data['dets'][t] = [track['segmentations'][t] for track in tracks if track['segmentations'][t]]
raw_data['ids'][t] = np.atleast_1d([track['id'] for track in tracks
if track['segmentations'][t]]).astype(int)
raw_data['classes'][t] = np.atleast_1d([track['category_id'] for track in tracks
if track['segmentations'][t]]).astype(int)
if not is_gt:
raw_data['tracker_confidences'][t] = np.atleast_1d([track['score'] for track in tracks
if track['segmentations'][t]]).astype(float)
if is_gt:
key_map = {'ids': 'gt_ids',
'classes': 'gt_classes',
'dets': 'gt_dets'}
else:
key_map = {'ids': 'tracker_ids',
'classes': 'tracker_classes',
'dets': 'tracker_dets'}
for k, v in key_map.items():
raw_data[v] = raw_data.pop(k)
all_cls_ids = {self.class_name_to_class_id[cls] for cls in self.class_list}
classes_to_tracks = {cls: [track for track in tracks if track['category_id'] == cls] for cls in all_cls_ids}
# mapping from classes to track representations and track information
raw_data['classes_to_tracks'] = {cls: [{i: track['segmentations'][i]
for i in range(len(track['segmentations']))} for track in tracks]
for cls, tracks in classes_to_tracks.items()}
raw_data['classes_to_track_ids'] = {cls: [track['id'] for track in tracks]
for cls, tracks in classes_to_tracks.items()}
raw_data['classes_to_track_areas'] = {cls: [track['area'] for track in tracks]
for cls, tracks in classes_to_tracks.items()}
if is_gt:
raw_data['classes_to_gt_track_iscrowd'] = {cls: [track['iscrowd'] for track in tracks]
for cls, tracks in classes_to_tracks.items()}
else:
raw_data['classes_to_dt_track_scores'] = {cls: np.array([track['score'] for track in tracks])
for cls, tracks in classes_to_tracks.items()}
if is_gt:
key_map = {'classes_to_tracks': 'classes_to_gt_tracks',
'classes_to_track_ids': 'classes_to_gt_track_ids',
'classes_to_track_areas': 'classes_to_gt_track_areas'}
else:
key_map = {'classes_to_tracks': 'classes_to_dt_tracks',
'classes_to_track_ids': 'classes_to_dt_track_ids',
'classes_to_track_areas': 'classes_to_dt_track_areas'}
for k, v in key_map.items():
raw_data[v] = raw_data.pop(k)
raw_data['num_timesteps'] = num_timesteps
raw_data['seq'] = seq
return raw_data
@_timing.time
def get_preprocessed_seq_data(self, raw_data, cls):
""" Preprocess data for a single sequence for a single class ready for evaluation.
Inputs:
- raw_data is a dict containing the data for the sequence already read in by get_raw_seq_data().
- cls is the class to be evaluated.
Outputs:
- data is a dict containing all of the information that metrics need to perform evaluation.
It contains the following fields:
[num_timesteps, num_gt_ids, num_tracker_ids, num_gt_dets, num_tracker_dets] : integers.
[gt_ids, tracker_ids, tracker_confidences]: list (for each timestep) of 1D NDArrays (for each det).
[gt_dets, tracker_dets]: list (for each timestep) of lists of detections.
[similarity_scores]: list (for each timestep) of 2D NDArrays.
Notes:
General preprocessing (preproc) occurs in 4 steps. Some datasets may not use all of these steps.
1) Extract only detections relevant for the class to be evaluated (including distractor detections).
2) Match gt dets and tracker dets. Remove tracker dets that are matched to a gt det that is of a
distractor class, or otherwise marked as to be removed.
3) Remove unmatched tracker dets if they fall within a crowd ignore region or don't meet a certain
other criteria (e.g. are too small).
4) Remove gt dets that were only useful for preprocessing and not for actual evaluation.
After the above preprocessing steps, this function also calculates the number of gt and tracker detections
and unique track ids. It also relabels gt and tracker ids to be contiguous and checks that ids are
unique within each timestep.
YouTubeVIS:
In YouTubeVIS, the 4 preproc steps are as follow:
1) There are 40 classes which are evaluated separately.
2) No matched tracker dets are removed.
3) No unmatched tracker dets are removed.
4) No gt dets are removed.
Further, for TrackMAP computation track representations for the given class are accessed from a dictionary
and the tracks from the tracker data are sorted according to the tracker confidence.
"""
cls_id = self.class_name_to_class_id[cls]
data_keys = ['gt_ids', 'tracker_ids', 'gt_dets', 'tracker_dets', 'similarity_scores']
data = {key: [None] * raw_data['num_timesteps'] for key in data_keys}
unique_gt_ids = []
unique_tracker_ids = []
num_gt_dets = 0
num_tracker_dets = 0
for t in range(raw_data['num_timesteps']):
# Only extract relevant dets for this class for eval (cls)
gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id)
gt_class_mask = gt_class_mask.astype(np.bool)
gt_ids = raw_data['gt_ids'][t][gt_class_mask]
gt_dets = [raw_data['gt_dets'][t][ind] for ind in range(len(gt_class_mask)) if gt_class_mask[ind]]
tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id)
tracker_class_mask = tracker_class_mask.astype(np.bool)
tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask]
tracker_dets = [raw_data['tracker_dets'][t][ind] for ind in range(len(tracker_class_mask)) if
tracker_class_mask[ind]]
similarity_scores = raw_data['similarity_scores'][t][gt_class_mask, :][:, tracker_class_mask]
data['tracker_ids'][t] = tracker_ids
data['tracker_dets'][t] = tracker_dets
data['gt_ids'][t] = gt_ids
data['gt_dets'][t] = gt_dets
data['similarity_scores'][t] = similarity_scores
unique_gt_ids += list(np.unique(data['gt_ids'][t]))
unique_tracker_ids += list( | np.unique(data['tracker_ids'][t]) | numpy.unique |
"""
Run the test step on all the LAMOST DR2 objects.
"""
import numpy as np
import glob
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, '/home/annaho')
#from lamost import load_spectra
#import dataset
#import model
from TheCannon import dataset
from TheCannon import model
#from astropy.table import Table
from matplotlib.colors import LogNorm
from matplotlib import rc
rc('font', family='serif')
rc('text', usetex=True)
import os
def test_step_iteration(ds, m, starting_guess):
errs, chisq = m.infer_labels(ds, starting_guess)
return ds.test_label_vals, chisq, errs
def test_step(date):
direc = "../xcalib_4labels"
wl = np.load("%s/wl.npz" %direc)['arr_0']
test_ID = np.load("%s/output/%s_ids.npz" %(direc, date))['arr_0']
print(str(len(test_ID)) + " objects")
test_flux = np.load("%s/output/%s_norm.npz" %(direc,date))['arr_0']
test_ivar = | np.load("%s/output/%s_norm.npz" %(direc,date)) | numpy.load |
import pkg_resources
import re
import requests
import numpy as np
import scipy as sp
import scipy.sparse
import scipy.sparse.linalg
from . import index
__all__ = ['PowerNetwork', 'load_case']
class PowerNetwork:
def __init__(self, basemva, bus=None, gen=None, gencost=None, branch=None, perunit=True):
if type(basemva) is dict:
data = basemva
self.baseMVA = data['baseMVA']
self.branch = dict()
for i, col in enumerate(index.branch):
self.branch[col] = data['branch'][:, i]
self.bus = dict()
for i, col in enumerate(index.bus):
self.bus[col] = data['bus'][:, i]
self.gen = dict()
for i, col in enumerate(index.gen):
self.gen[col] = data['gen'][:, i]
self.gencost = dict()
for i, col in enumerate(index.cost):
if col == 'COST':
self.gencost[col] = data['gencost'][:, i:]
break
self.gencost[col] = data['gencost'][:, i]
elif self.bus is not None and self.gen is not None and self.gencost is not None and self.branch is not None:
self.baseMVA = basemva
self.bus = dict()
for i, col in enumerate(index.bus):
self.bus[col] = bus[:, i]
self.gen = dict()
for i, col in enumerate(index.gen):
self.gen[col] = gen[:, i]
self.gencost = dict()
for i, col in enumerate(index.cost):
if col == 'COST':
self.gencost[col] = gencost[:, i:]
break
self.gencost[col] = gencost[:, i]
self.branch = dict()
for i, col in enumerate(index.branch):
self.branch[col] = branch[:, i]
else:
raise TypeError('Invalid input to power network. Must be either dict or all arguments must be filled.')
self.n_l = int(np.sum(self.branch['BR_STATUS']))
self.n_b = len(self.bus['BUS_I'])
self.n_g = len(self.gen['GEN_BUS'])
# Per-unit transformations
if perunit:
self.bus['PD'] /= self.baseMVA
self.bus['QD'] /= self.baseMVA
self.gen['PG'] /= self.baseMVA
self.gen['QG'] /= self.baseMVA
self.gen['QMAX'] /= self.baseMVA
self.gen['QMIN'] /= self.baseMVA
self.gen['VG'] /= self.baseMVA
self.gen['PMAX'] /= self.baseMVA
self.gen['PMIN'] /= self.baseMVA
self.gen['PC1'] /= self.baseMVA
self.gen['PC2'] /= self.baseMVA
self.gen['QC1MIN'] /= self.baseMVA
self.gen['QC1MAX'] /= self.baseMVA
self.gen['QC2MIN'] /= self.baseMVA
self.gen['QC2MAX'] /= self.baseMVA
self.gen['RAMP_AGC'] /= self.baseMVA
self.gen['RAMP_10'] /= self.baseMVA
self.gen['RAMP_30'] /= self.baseMVA
self.gen['RAMP_Q'] /= self.baseMVA
self.gencost['COST'] /= self.baseMVA
self.branch['RATE_A'] /= self.baseMVA
self.branch['RATE_B'] /= self.baseMVA
self.branch['RATE_C'] /= self.baseMVA
# Add nodal value of lost load
self.voll = 70 * np.max(self.gencost['COST'][:, -2]) * np.ones((self.n_b,))
self.Bbus = None
self.Bf = None
self.Pbusinj = None
self.Pfinj = None
self.ISF = None
self.lineOutages = None
def setContingencyLimits(self, force=False):
# Artificially enforce DA and SE limits if unenforced
if force or np.all(self.branch['RATE_A'] >= self.branch['RATE_B']):
self.branch['RATE_B'] *= 1.1
if force or np.all(self.branch['RATE_A'] >= self.branch['RATE_C']):
self.branch['RATE_C'] *= 1.7
# Artificially set ramp capacity if unset
if | np.all(self.gen['RAMP_AGC'] == 0) | numpy.all |
import atexit
from abc import ABCMeta
import numpy as np
import tensorflow as tf
from sklearn.base import ClassifierMixin, RegressorMixin
from ..models import AbstractSupervisedDBN as BaseAbstractSupervisedDBN
from ..models import BaseModel
from ..models import BinaryRBM as BaseBinaryRBM
from ..models import UnsupervisedDBN as BaseUnsupervisedDBN
from ..utils import batch_generator, to_categorical
def close_session():
sess.close()
sess = tf.Session()
atexit.register(close_session)
def weight_variable(func, shape, stddev, dtype=tf.float32):
initial = func(shape, stddev=stddev, dtype=dtype)
return tf.Variable(initial)
def bias_variable(value, shape, dtype=tf.float32):
initial = tf.constant(value, shape=shape, dtype=dtype)
return tf.Variable(initial)
class BaseTensorFlowModel(BaseModel):
def save(self, save_path):
import pickle
with open(save_path, 'wb') as fp:
pickle.dump(self.to_dict(), fp)
@classmethod
def load(cls, load_path):
import pickle
with open(load_path, 'rb') as fp:
dct_to_load = pickle.load(fp)
return cls.from_dict(dct_to_load)
def to_dict(self):
dct_to_save = {name: self.__getattribute__(
name) for name in self._get_param_names()}
dct_to_save.update(
{name: self.__getattribute__(name).eval(sess) for name in self._get_weight_variables_names()})
return dct_to_save
@classmethod
def from_dict(cls, dct_to_load):
pass
def _build_model(self, weights=None):
pass
def _initialize_weights(self, weights):
pass
@classmethod
def _get_weight_variables_names(cls):
pass
@classmethod
def _get_param_names(cls):
pass
class BinaryRBM(BaseBinaryRBM, BaseTensorFlowModel):
"""
This class implements a Binary Restricted Boltzmann machine based on TensorFlow.
"""
def fit(self, X):
"""
Fit a model given data.
:param X: array-like, shape = (n_samples, n_features)
:return:
"""
self.n_visible_units = X.shape[1]
# Initialize RBM parameters
self._build_model()
sess.run(tf.variables_initializer([self.W, self.c, self.b]))
if self.optimization_algorithm == 'sgd':
self._stochastic_gradient_descent(X)
else:
raise ValueError("Invalid optimization algorithm.")
return
@classmethod
def _get_weight_variables_names(cls):
return ['W', 'c', 'b']
@classmethod
def _get_param_names(cls):
return ['n_hidden_units',
'n_visible_units',
'activation_function',
'optimization_algorithm',
'learning_rate',
'n_epochs',
'contrastive_divergence_iter',
'batch_size',
'verbose',
'_activation_function_class']
def _initialize_weights(self, weights):
if weights:
for attr_name, value in weights.items():
self.__setattr__(attr_name, tf.Variable(value))
else:
if self.activation_function == 'sigmoid':
stddev = 1.0 / np.sqrt(self.n_visible_units)
self.W = weight_variable(
tf.random_normal, [self.n_hidden_units, self.n_visible_units], stddev)
self.c = weight_variable(
tf.random_normal, [self.n_hidden_units], stddev)
self.b = weight_variable(
tf.random_normal, [self.n_visible_units], stddev)
self._activation_function_class = tf.nn.sigmoid
elif self.activation_function == 'relu':
stddev = 0.1 / np.sqrt(self.n_visible_units)
self.W = weight_variable(tf.truncated_normal, [
self.n_hidden_units, self.n_visible_units], stddev)
self.c = bias_variable(stddev, [self.n_hidden_units])
self.b = bias_variable(stddev, [self.n_visible_units])
self._activation_function_class = tf.nn.relu
else:
raise ValueError("Invalid activation function.")
def _build_model(self, weights=None):
"""
Builds TensorFlow model.
:return:
"""
# initialize weights and biases
self._initialize_weights(weights)
# TensorFlow operations
self.visible_units_placeholder = tf.placeholder(
tf.float32, shape=[None, self.n_visible_units])
self.compute_hidden_units_op = self._activation_function_class(
tf.transpose(tf.matmul(self.W, tf.transpose(self.visible_units_placeholder))) + self.c)
self.hidden_units_placeholder = tf.placeholder(
tf.float32, shape=[None, self.n_hidden_units])
self.compute_visible_units_op = self._activation_function_class(
tf.matmul(self.hidden_units_placeholder, self.W) + self.b)
self.random_uniform_values = tf.Variable(
tf.random_uniform([self.batch_size, self.n_hidden_units]))
sample_hidden_units_op = tf.to_float(
self.random_uniform_values < self.compute_hidden_units_op)
self.random_variables = [self.random_uniform_values]
# Positive gradient
# Outer product. N is the batch size length.
# From http://stackoverflow.com/questions/35213787/tensorflow-batch-outer-product
positive_gradient_op = tf.matmul(tf.expand_dims(sample_hidden_units_op, 2), # [N, U, 1]
tf.expand_dims(self.visible_units_placeholder, 1)) # [N, 1, V]
# Negative gradient
# Gibbs sampling
sample_hidden_units_gibbs_step_op = sample_hidden_units_op
for t in range(self.contrastive_divergence_iter):
compute_visible_units_op = self._activation_function_class(
tf.matmul(sample_hidden_units_gibbs_step_op, self.W) + self.b)
compute_hidden_units_gibbs_step_op = self._activation_function_class(
tf.transpose(tf.matmul(self.W, tf.transpose(compute_visible_units_op))) + self.c)
random_uniform_values = tf.Variable(
tf.random_uniform([self.batch_size, self.n_hidden_units]))
sample_hidden_units_gibbs_step_op = tf.to_float(
random_uniform_values < compute_hidden_units_gibbs_step_op)
self.random_variables.append(random_uniform_values)
negative_gradient_op = tf.matmul(tf.expand_dims(sample_hidden_units_gibbs_step_op, 2), # [N, U, 1]
tf.expand_dims(compute_visible_units_op, 1)) # [N, 1, V]
compute_delta_W = tf.reduce_mean(
positive_gradient_op - negative_gradient_op, 0)
compute_delta_b = tf.reduce_mean(
self.visible_units_placeholder - compute_visible_units_op, 0)
compute_delta_c = tf.reduce_mean(
sample_hidden_units_op - sample_hidden_units_gibbs_step_op, 0)
self.update_W = tf.assign_add(
self.W, self.learning_rate * compute_delta_W)
self.update_b = tf.assign_add(
self.b, self.learning_rate * compute_delta_b)
self.update_c = tf.assign_add(
self.c, self.learning_rate * compute_delta_c)
@classmethod
def from_dict(cls, dct_to_load):
weights = {var_name: dct_to_load.pop(
var_name) for var_name in cls._get_weight_variables_names()}
_activation_function_class = dct_to_load.pop(
'_activation_function_class')
n_visible_units = dct_to_load.pop('n_visible_units')
instance = cls(**dct_to_load)
setattr(instance, '_activation_function_class',
_activation_function_class)
setattr(instance, 'n_visible_units', n_visible_units)
# Initialize RBM parameters
instance._build_model(weights)
sess.run(tf.variables_initializer(
[getattr(instance, name) for name in cls._get_weight_variables_names()]))
return instance
def _stochastic_gradient_descent(self, _data):
"""
Performs stochastic gradient descend optimization algorithm.
:param _data: array-like, shape = (n_samples, n_features)
:return:
"""
for iteration in range(1, self.n_epochs + 1):
idx = np.random.permutation(len(_data))
data = _data[idx]
for batch in batch_generator(self.batch_size, data):
if len(batch) < self.batch_size:
# Pad with zeros
pad = np.zeros(
(self.batch_size - batch.shape[0], batch.shape[1]), dtype=batch.dtype)
batch = np.vstack((batch, pad))
# Need to re-sample from uniform distribution
sess.run(tf.variables_initializer(self.random_variables))
sess.run([self.update_W, self.update_b, self.update_c],
feed_dict={self.visible_units_placeholder: batch})
if self.verbose:
error = self._compute_reconstruction_error(data)
print(">> Epoch %d finished \tRBM Reconstruction error %f" %
(iteration, error))
def _compute_hidden_units_matrix(self, matrix_visible_units):
"""
Computes hidden unit outputs.
:param matrix_visible_units: array-like, shape = (n_samples, n_features)
:return:
"""
return sess.run(self.compute_hidden_units_op,
feed_dict={self.visible_units_placeholder: matrix_visible_units})
def _compute_visible_units_matrix(self, matrix_hidden_units):
"""
Computes visible (or input) unit outputs.
:param matrix_hidden_units: array-like, shape = (n_samples, n_features)
:return:
"""
return sess.run(self.compute_visible_units_op,
feed_dict={self.hidden_units_placeholder: matrix_hidden_units})
class UnsupervisedDBN(BaseUnsupervisedDBN, BaseTensorFlowModel):
"""
This class implements a unsupervised Deep Belief Network in TensorFlow
"""
def __init__(self, **kwargs):
super(UnsupervisedDBN, self).__init__(**kwargs)
self.rbm_class = BinaryRBM
@classmethod
def _get_param_names(cls):
return ['hidden_layers_structure',
'activation_function',
'optimization_algorithm',
'learning_rate_rbm',
'n_epochs_rbm',
'contrastive_divergence_iter',
'batch_size',
'verbose']
@classmethod
def _get_weight_variables_names(cls):
return []
def to_dict(self):
dct_to_save = super(UnsupervisedDBN, self).to_dict()
dct_to_save['rbm_layers'] = [rbm.to_dict() for rbm in self.rbm_layers]
return dct_to_save
@classmethod
def from_dict(cls, dct_to_load):
rbm_layers = dct_to_load.pop('rbm_layers')
instance = cls(**dct_to_load)
setattr(instance, 'rbm_layers', [
instance.rbm_class.from_dict(rbm) for rbm in rbm_layers])
return instance
class TensorFlowAbstractSupervisedDBN(BaseAbstractSupervisedDBN, BaseTensorFlowModel):
__metaclass__ = ABCMeta
def __init__(self, **kwargs):
super(TensorFlowAbstractSupervisedDBN, self).__init__(
UnsupervisedDBN, **kwargs)
@classmethod
def _get_param_names(cls):
return ['n_iter_backprop',
'l2_regularization',
'learning_rate',
'batch_size',
'dropout_p',
'verbose']
@classmethod
def _get_weight_variables_names(cls):
return ['W', 'b']
def _initialize_weights(self, weights):
if weights:
for attr_name, value in weights.items():
self.__setattr__(attr_name, tf.Variable(value))
else:
if self.unsupervised_dbn.activation_function == 'sigmoid':
stddev = 1.0 / | np.sqrt(self.input_units) | numpy.sqrt |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = | N.array([1,1,2]) | numpy.array |
from . import Image
import matplotlib.pyplot as plt
import numpy as np
import re
from astropy.time import Time
from astropy import units as u
from astropy.coordinates import SkyCoord
from .fluxes import ApertureFluxes
from . import viz
from astropy.io import fits
from .telescope import Telescope
from . import utils
from astroquery.mast import Catalogs
from astropy.wcs import WCS, utils as wcsutils
import pandas as pd
from scipy.stats import binned_statistic
from .blocks.psf import Gaussian2D, Moffat2D,cutouts
from .console_utils import INFO_LABEL
from astropy.stats import sigma_clipped_stats
from astropy.io.fits.verify import VerifyWarning
from datetime import datetime
import warnings
from .blocks.registration import distances
import requests
import shutil
from pathlib import Path
from . import twirl
import io
from .utils import fast_binning, z_scale
from .console_utils import info
warnings.simplefilter('ignore', category=VerifyWarning)
class Observation(ApertureFluxes):
"""
Class to load and analyze photometry products
Parameters
----------
photfile : str
path of the `.phot` file to load
"""
def __init__(self, photfile, ignore_time=False):
super().__init__(photfile)
utils.remove_sip(self.xarray.attrs)
self.phot = photfile
self.telescope = Telescope.from_name(self.telescope)
self.gaia_data = None
self.tic_data = None
self.wcs = WCS(utils.remove_arrays(self.xarray.attrs))
self._meridian_flip = None
has_bjd = hasattr(self.xarray, "bjd_tdb")
if has_bjd:
has_bjd = ~np.all(self.xarray.bjd_tdb.isnull().values)
if not has_bjd:
try:
self.compute_bjd()
if not ignore_time:
print(f"{INFO_LABEL} Time converted to BJD TDB")
except:
if not ignore_time:
print(f"{INFO_LABEL} Could not convert time to BJD TDB")
def _check_stack(self):
assert 'stack' in self.xarray is not None, "No stack found"
# Loaders and savers (files and data)
# ------------------------------------
def __copy__(self):
copied = Observation(self.xarray.copy(), ignore_time=True)
copied.phot = self.phot
copied.telescope = self.telescope
copied.gaia_data = self.gaia_data
copied.tic_data = self.tic_data
copied.wcs = self.wcs
return copied
def copy(self):
return self.__copy__()
def to_csv(self, destination, sep=" "):
"""Export a typical csv of the observation's data
Parameters
----------
destination : str
Path of the csv file to save
sep : str, optional
separation string within csv, by default " "
"""
df = pd.DataFrame(
{
"BJD-TDB" if self.time_format == "bjd_tdb" else "JD-UTC": self.time,
"DIFF_FLUX": self.diff_flux,
"ERROR": self.diff_error,
"dx_MOVE": self.dx,
"dy_MOVE": self.dy,
"FWHM": self.fwhm,
"FWHMx": self.fwhm,
"FWHMy": self.fwhm,
"SKYLEVEL": self.sky,
"AIRMASS": self.airmass,
"EXPOSURE": self.exptime,
}
)
df.to_csv(destination, sep=sep, index=False)
def save(self, destination=None):
"""Save current observation
Parameters
----------
destination : str, optional
path to phot file, by default None
"""
self.xarray.to_netcdf(self.phot if destination is None else destination)
info(f"saved {self.phot}")
def export_stack(self, destination, **kwargs):
"""Export stack to FITS file
Parameters
----------
destination : str
path of FITS to export
"""
header = {name: value for name, value in self.xarray.attrs.items() if name.isupper()}
data = self.stack
hdul = fits.HDUList([fits.PrimaryHDU(data=data, header=fits.Header(header))])
hdul.writeto(destination, **kwargs)
def import_stack(self, fitsfile):
"""Import FITS as stack to current obs (including WCS) - do not forget to save to keep it
Parameters
----------
fitsfile : str
path of FITS stack to import
"""
data = fits.getdata(fitsfile)
header = fits.getheader(fitsfile)
self.wcs = WCS(header)
self.xarray.attrs.update(utils.header_to_cdf4_dict(header))
self.xarray["stack"] = (('w', 'h'), data)
# Convenience
# -----------
@property
def skycoord(self):
"""astropy SkyCoord object for the target
"""
return SkyCoord(self.RA, self.DEC, frame='icrs', unit=(self.telescope.ra_unit, self.telescope.dec_unit))
@property
def simbad_url(self):
"""
[notebook feature] clickable simbad query url for specified target
"""
from IPython.core.display import display, HTML
display(HTML('<a href="{}">{}</a>'.format(self.simbad, self.simbad)))
@property
def simbad(self):
"""
simbad query url for specified target
"""
return f"http://simbad.u-strasbg.fr/simbad/sim-coo?Coord={self.RA}+{self.DEC}&CooFrame=FK5&CooEpoch=2000&CooEqui=" \
"2000&CooDefinedFrames=none&Radius=2&Radius.unit=arcmin&submit=submit+query&CoordList="
@property
def denominator(self):
"""A conveniant name for the observation: {telescope}_{date}_{name}_{filter}
Returns
-------
[type]
[description]
"""
return f"{self.telescope.name}_{self.date}_{self.name}_{self.filter}"
@property
def meridian_flip(self):
"""Meridian flip time. Supposing EAST and WEST encode orientation
"""
if self._meridian_flip is not None:
return self._meridian_flip
else:
has_flip = hasattr(self.xarray, "flip")
if has_flip:
try:
np.all(np.isnan(self.flip))
return None
except TypeError:
pass
if has_flip:
if "WEST" in self.flip:
flip = (self.flip.copy() == "WEST").astype(int)
diffs = np.abs(np.diff(flip))
if np.any(diffs):
self._meridian_flip = self.time[np.argmax(diffs).flatten()]
else:
self._meridian_flip = None
return self._meridian_flip
else:
return None
else:
return None
# TESS specific methods
# --------------------
@property
def tic_id(self):
"""TIC id from digits found in target name
"""
try:
nb = re.findall('\d*\.?\d+', self.name)
df = pd.read_csv("https://exofop.ipac.caltech.edu/tess/download_toi?toi=%s&output=csv" % nb[0])
tic = df["TIC ID"][0]
return f"{tic}"
except KeyError:
print('TIC ID not found')
return None
@property
def gaia_from_toi(self):
"""Gaia id from TOI id if TOI is in target name
"""
if self.tic_id is not None:
tic_id = ("TIC " + self.tic_id)
catalog_data = Catalogs.query_object(tic_id, radius=.001, catalog="TIC")
return f"{catalog_data['GAIA'][0]}"
else:
return None
@property
def tfop_prefix(self):
return f"TIC{self.tic_id}_{self.date}_{self.telescope.name}_{self.filter}"
# Methods
# -------
def compute_bjd(self, version="prose"):
"""Compute BJD_tdb based on current time
Once this is done self.time is BJD tdb and time format can be checked in self.time_format. Note that half the
exposure time is added to the JD times before conversion. The precision of the returned time is not
guaranteed, especially with "prose" method (~30ms). "eastman" option accuracy is 20ms. See
http://astroutils.astronomy.ohio-state.edu/time/utc2bjd.html for more details.
Parameters
----------
version : str, optiona
- "prose": uses an astropy method
- "eastman": uses the web applet http://astroutils.astronomy.ohio-state.edu (Eastman et al. 2010) [requires
an internet connection]
by default "prose"
"""
assert self.telescope is not None
assert self.skycoord is not None
exposure_days = self.xarray.exposure.values/60/60/24
# For backward compatibility
# --------------------------
if "time_format" not in self.xarray.attrs:
self.xarray.attrs["time_format"] = "jd_utc"
self.xarray["jd_utc"] = ("time", self.time)
if "jd_utc" not in self:
self.xarray["jd_utc"] = ("time", self.jd)
self.xarray.drop("jd")
# -------------------------
if version == "prose":
time = Time(self.jd_utc + exposure_days/2, format="jd", scale="utc", location=self.telescope.earth_location).tdb
light_travel_tbd = time.light_travel_time(self.skycoord, location=self.telescope.earth_location)
bjd_time = (time + light_travel_tbd).value
elif version == "eastman":
bjd_time = utils.jd_to_bjd(self.jd_utc + exposure_days/2, self.skycoord.ra.deg, self.skycoord.dec.deg)
self.xarray = self.xarray.assign_coords(time=bjd_time)
self.xarray["bjd_tdb"] = ("time", bjd_time)
self.xarray.attrs["time_format"] = "bjd_tdb"
# Catalog queries
# ---------------
def query_gaia(self, limit=-1, cone_radius=None):
"""Query gaia catalog for stars in the field
"""
from astroquery.gaia import Gaia
Gaia.ROW_LIMIT = limit
header = self.xarray.attrs
shape = self.stack.shape
if cone_radius is None:
cone_radius = np.sqrt(2) * np.max(shape) * self.telescope.pixel_scale / 120
coord = self.skycoord
radius = u.Quantity(cone_radius, u.arcminute)
gaia_query = Gaia.cone_search_async(coord, radius, verbose=False, )
self.gaia_data = gaia_query.get_results()
self.gaia_data.sort("phot_g_mean_flux", reverse=True)
delta_years = (utils.datetime_to_years(datetime.strptime(self.date, "%Y%m%d")) - \
self.gaia_data["ref_epoch"].data.data) * u.year
dra = delta_years * self.gaia_data["pmra"].to(u.deg / u.year)
ddec = delta_years * self.gaia_data["pmdec"].to(u.deg / u.year)
skycoords = SkyCoord(
ra=self.gaia_data['ra'].quantity + dra,
dec=self.gaia_data['dec'].quantity + ddec,
pm_ra_cosdec=self.gaia_data['pmra'],
pm_dec=self.gaia_data['pmdec'],
radial_velocity=self.gaia_data['radial_velocity'],
obstime=Time(2015.0, format='decimalyear'))
gaias = np.array(wcsutils.skycoord_to_pixel(skycoords, self.wcs)).T
gaias[np.any(np.isnan(gaias), 1), :] = [0, 0]
self.gaia_data["x"], self.gaia_data["y"] = gaias.T
inside = np.all((np.array([0, 0]) < gaias) & (gaias < np.array(self.stack.shape)), 1)
self.gaia_data = self.gaia_data[np.argwhere(inside).squeeze()]
w, h = self.stack.shape
if np.abs(np.mean(self.gaia_data["x"])) > w or np.abs(np.mean(self.gaia_data["y"])) > h:
warnings.warn("Catalog stars seem out of the field. Check that your stack is solved and that telescope "
"'ra_unit' and 'dec_unit' are well set")
def query_tic(self,cone_radius=None):
"""Query TIC catalog (through MAST) for stars in the field
"""
from astroquery.mast import Catalogs
header = self.xarray.attrs
shape = self.stack.shape
if cone_radius is None:
cone_radius = np.sqrt(2) * np.max(shape) * self.telescope.pixel_scale / 120
coord = self.skycoord
radius = u.Quantity(cone_radius, u.arcminute)
self.tic_data = Catalogs.query_region(coord, radius, "TIC", verbose=False)
self.tic_data.sort("Jmag")
skycoords = SkyCoord(
ra=self.tic_data['ra'],
dec=self.tic_data['dec'], unit="deg")
self.tic_data["x"], self.tic_data["y"] = np.array(wcsutils.skycoord_to_pixel(skycoords, self.wcs))
w, h = self.stack.shape
if np.abs(np.mean(self.tic_data["x"])) > w or np.abs(np.mean(self.tic_data["y"])) > h:
warnings.warn("Catalog stars seem out of the field. Check that your stack is solved and that telescope "
"'ra_unit' and 'dec_unit' are well set")
@property
def gaia_target(self):
return None
@gaia_target.setter
def gaia_target(self, gaia_id):
"""Set target with a gaia id
Parameters
----------
gaia_id : int
gaia id
"""
if self.gaia_data is None:
self.query_gaia()
_ = self.gaia_data.to_pandas()[["source_id", "x", "y"]].to_numpy()
ids = _[:, 0]
positions = _[:, 1:3]
gaia_i = np.argmin(np.abs(gaia_id - ids))
self.target = np.argmin(np.power(positions[gaia_i, :] - self.stars[:, ::-1], 2).sum(1))
# Plot
# ----
def show(self, size=10, flip=False, zoom=False, contrast=0.05, wcs=False, cmap="Greys_r", sigclip=None,vmin=None,vmax=None):
"""Show stack image
Parameters
----------
size : int, optional
size of the square figure, by default 10
flip : bool, optional
, by default False
zoom : bool, optional
whether to include a zoom inlay in the image, by default False
contrast : float, optional
contrast for the Zscale of image, by default 0.05
wcs : bool, optional
whether to show grid ans axes to world coordinate
"""
if self.target == -1:
zoom = False
self._check_stack()
fig = plt.figure(figsize=(size, size))
fig.patch.set_facecolor('white')
image = self.stack.copy()
if flip:
image = image[::-1, ::-1]
if sigclip is not None:
mean, median, std = sigma_clipped_stats(image)
image[image - median < 2 * std] = median
if wcs:
ax = plt.subplot(projection=self.wcs, label='overlays')
else:
ax = fig.add_subplot(111)
if all([vmin, vmax]) is False:
_ = ax.imshow(utils.z_scale(image,c=contrast), cmap=cmap, origin="lower")
else:
_ = ax.imshow(image, cmap=cmap, origin="lower",vmin=vmin,vmax=vmax)
if wcs:
ax.coords.grid(True, color='white', ls='solid', alpha=0.3)
ax.coords[0].set_axislabel('Galactic Longitude')
ax.coords[1].set_axislabel('Galactic Latitude')
overlay = ax.get_coords_overlay('fk5')
overlay.grid(color='white', ls='--', alpha=0.3)
overlay[0].set_axislabel('Right Ascension (J2000)')
overlay[1].set_axislabel('Declination (J2000)')
def _check_show(self, **kwargs):
axes = plt.gcf().axes
if len(axes) == 0:
self.show(**kwargs)
def show_stars(self, size=10, view=None, n=None, flip=False,
comp_color="yellow", color=[0.51, 0.86, 1.], stars=None, legend=True, **kwargs):
"""Show detected stars over stack image
Parameters
----------
size : int, optional
size of the square figure, by default 10
flip : bool, optional
whether to flip image, by default False
view : str, optional
"all" to see all stars OR "reference" to have target and comparison stars hilighted, by default None
n : int, optional
max number of stars to show, by default None,
Raises
------
AssertionError
[description]
"""
self._check_show(flip=flip, size=size, **kwargs)
if stars is None:
stars = self.stars
if n is not None:
if view == "reference":
raise AssertionError("'n_stars' kwargs is incompatible with 'reference' view that will display all stars")
else:
n = len(stars)
stars = stars[0:n]
if view is None:
view = "reference" if 'comps' in self else "all"
image_size = np.array(np.shape(self.stack))[::-1]
if flip:
stars = np.array(image_size) - stars
if view == "all":
viz.plot_marks(*stars.T, np.arange(len(stars)), color=color)
if "stars" in self.xarray:
others = np.arange(n, len(self.stars))
others = np.setdiff1d(others, self.target)
viz.plot_marks(*self.stars[others].T, alpha=0.4, color=color)
elif view == "reference":
x = self.xarray.isel(apertures=self.aperture)
assert 'comps' in self, "No differential photometry"
comps = x.comps.values
others = np.setdiff1d(np.arange(len(stars)), x.comps.values)
others = np.setdiff1d(others, self.target)
_ = viz.plot_marks(*stars[self.target], self.target, color=color)
_ = viz.plot_marks(*stars[comps].T, comps, color=comp_color)
_ = viz.plot_marks(*stars[others].T, alpha=0.4, color=color)
if legend:
colors = [comp_color, color]
texts = ["Comparison stars", "Target"]
viz.circles_legend(colors, texts)
def show_gaia(self, color="yellow", alpha=1, n=None, idxs=True, limit=-1, fontsize=8, align=False):
"""Overlay Gaia objects on stack image
Parameters
----------
color : str, optional
color of marks and font, by default "lightblue"
alpha : int, optional
opacity of marks and font, by default 1
n : int, optional
max number of stars to show, by default None, by default None for all stars
idxs : bool, optional
wether to show gaia ids, by default True
"""
self._check_show()
if self.gaia_data is None:
self.query_gaia(limit=limit)
gaias = np.vstack([self.gaia_data["x"].data, self.gaia_data["y"].data]).T
defined = ~np.any(np.isnan(gaias), 1)
gaias = gaias[defined]
labels = self.gaia_data["source_id"].data.astype(str)[defined]
if align:
X = twirl.find_transform(gaias[0:30], self.stars, n=15)
gaias = twirl.affine_transform(X)(gaias)
labels = [f"{_id[0:len(_id) // 2]}\n{_id[len(_id) // 2::]}" for _id in labels]
_ = viz.plot_marks(*gaias.T, labels if idxs else None, color=color, alpha=alpha, n=n, position="top",
fontsize=fontsize)
def show_tic(self, color="white", alpha=1, n=None, idxs=True, align=True):
"""Overlay TIC objects on stack image
Parameters
----------
color : str, optional
color of marks and font, by default "lightblue"
alpha : int, optional
opacity of marks and font, by default 1
n : int, optional
max number of stars to show, by default None, by default None for all stars
idxs : bool, optional
wether to show TIC ids, by default True
"""
self._check_show()
if self.tic_data is None:
self.query_tic()
x = self.tic_data["x"].data
y = self.tic_data["y"].data
tics = np.vstack([x, y]).T
ID = self.tic_data["ID"].data
if align:
X = twirl.find_transform(tics[0:30], self.stars, n=15)
tics = twirl.affine_transform(X)(tics)
_ = viz.plot_marks(*tics.T, ID if idxs else None, color=color, alpha=alpha, n=n, position="top", fontsize=9, offset=10)
def show_cutout(self, star=None, size=200, marks=True,**kwargs):
"""
Show a zoomed cutout around a detected star or coordinates
Parameters
----------
star : [type], optional
detected star id or (x, y) coordinate, by default None
size : int, optional
side size of square cutout in pixel, by default 200
"""
if star is None:
x, y = self.stars[self.target]
elif isinstance(star, int):
x, y = self.stars[star]
elif isinstance(star, (tuple, list, np.ndarray)):
x, y = star
else:
raise ValueError("star type not understood")
self.show(**kwargs)
plt.xlim(np.array([-size / 2, size / 2]) + x)
plt.ylim(np.array([-size / 2, size / 2]) + y)
if marks:
idxs = np.argwhere(np.max(np.abs(self.stars - [x, y]), axis=1) < size).squeeze()
viz.plot_marks(*self.stars[idxs].T, label=idxs)
def plot_comps_lcs(self, n=15, ylim=(0.98, 1.02)):
"""Plot comparison stars light curves along target star light curve
Parameters
----------
n : int, optional
Number max of comparison to show, by default 5
ylim : tuple, optional
ylim of the plot, by default (0.98, 1.02)
"""
idxs = [self.target, *self.xarray.comps.isel(apertures=self.aperture).values[0:n]]
lcs = [self.xarray.diff_fluxes.isel(star=i, apertures=self.aperture).values for i in idxs]
if ylim is None:
ylim = (self.diff_flux.min() * 0.99, self.diff_flux.max() * 1.01)
offset = ylim[1] - ylim[0]
if len(plt.gcf().axes) == 0:
plt.figure(figsize=(5, 10))
for i, lc in enumerate(lcs):
color = "grey" if i != 0 else "black"
viz.plot(self.time, lc - i * offset, bincolor=color)
plt.annotate(idxs[i], (self.time.min() + 0.005, 1 - i * offset + offset / 3))
plt.ylim(1 - (i + 0.5) * offset, ylim[1])
plt.title("Comparison stars", loc="left")
plt.grid(color="whitesmoke")
plt.tight_layout()
def plot_psf_fit(self, size=21, cmap="inferno", c="blueviolet", model=Gaussian2D):
"""Plot a 2D gaussian fit of the global psf (extracted from stack fits)
Parameters
----------
size : int, optional
square size of extracted PSF, by default 21
cmap : str, optional
color map of psf image, by default "inferno"
c : str, optional
color of model plot line, by default "blueviolet"
model : prose.blocks, optional
a PsfFit block, by default Gaussian2D
Returns
-------
dict
PSF fit info (theta, std_x, std_y, fwhm_x, fwhm_y)
"""
psf_fit = model()
image = Image(data=self.stack, stars_coords=self.stars, header=self.xarray.attrs)
psf_fit.run(image)
if len(plt.gcf().get_axes()) == 0:
plt.figure(figsize=(12, 4))
viz.plot_marginal_model(psf_fit.epsf, psf_fit.optimized_model, cmap=cmap, c=c)
return {"theta": image.theta,
"std_x": image.psf_sigma_x,
"std_y": image.psf_sigma_y,
"fwhm_x": image.fwhmx,
"fwhm_y": image.fwhmy }
def plot_star_psf(self,star=None,cutout_size=21,print_values=True,plot=True):
if star is None:
star = self.target
cutout = cutouts(self.stack, [self.stars[star]], size=cutout_size)
psf_fit = Moffat2D(cutout_size=cutout_size)
params = ['fwhmx =', 'fwhmy =', 'theta =']
values = []
for i in range(len(params)):
if print_values is True:
print(params[i], psf_fit(cutout.data[0])[i])
values.append(psf_fit(cutout.data[0])[i])
if plot is True:
viz.plot_marginal_model(psf_fit.epsf, psf_fit.optimized_model)
return values
def plot_rms(self, bins=0.005):
"""Plot binned rms of lightcurves vs the CCD equation
Parameters
----------
bins : float, optional
bin size used to compute error, by default 0.005 (in days)
"""
self._check_diff()
viz.plot_rms(
self.diff_fluxes,
self.lcs,
bins=bins,
target=self.target["id"],
highlights=self.comparison_stars)
def plot_systematics(self, fields=None, ylim=(0.98, 1.02)):
"""Plot systematics measurements along target light curve
Parameters
----------
fields : list of str, optional
list of systematic to include (must be in self), by default None
ylim : tuple, optional
plot ylim, by default (0.98, 1.02)
"""
if fields is None:
fields = ["dx", "dy", "fwhm", "airmass", "sky"]
flux = self.diff_flux.copy()
flux /= np.nanmean(flux)
if ylim is None:
ylim = (flux.nanmin() * 0.99, flux.nanmax() * 1.01)
offset = ylim[1] - ylim[0]
if len(plt.gcf().axes) == 0:
plt.figure(figsize=(5 ,10))
viz.plot(self.time, flux, bincolor="black")
for i, field in enumerate(fields):
if field in self:
scaled_data = self.xarray[field].values.copy()
scaled_data = np.nan_to_num(scaled_data, -1)
scaled_data[scaled_data - np.nanmean(scaled_data) > 5*np.nanstd(scaled_data)] = -1
scaled_data = scaled_data - np.median(scaled_data)
scaled_data = scaled_data / np.std(scaled_data)
scaled_data *= np.std(flux)
scaled_data += 1 - (i + 1) * offset
viz.plot(self.time, scaled_data, bincolor="grey")
plt.annotate(field, (self.time.min() + 0.005, 1 - (i + 1) * offset + offset / 3))
else:
i -= 1
plt.ylim(1 - (i + 1.5) * offset, ylim[1])
plt.title("Systematics", loc="left")
plt.grid(color="whitesmoke")
plt.tight_layout()
def plot_raw_diff(self):
"""Plot raw target flux and differantial flux
"""
plt.subplot(211)
plt.title("Differential lightcurve", loc="left")
self.plot()
plt.grid(color="whitesmoke")
plt.subplot(212)
plt.title("Normalized flux", loc="left")
flux = self.xarray.raw_fluxes.isel(star=self.target, apertures=self.aperture).values
plt.plot(self.time, flux, ".", ms=3, label="target", c="C0")
if 'alc' in self:
plt.plot(self.time, self.xarray.alc.isel(apertures=self.aperture).values*np.median(flux), ".", ms=3, c="k", label="artifical star")
plt.legend()
plt.grid(color="whitesmoke")
plt.xlim([np.min(self.time), np.max(self.time)])
plt.tight_layout()
def plot_precision(self, bins=0.005, aperture=None):
"""Plot observation precision estimate against theorethical error (background noise, photon noise and CCD equation)
Parameters
----------
bins : float, optional
bin size used to estimate error, by default 0.005 (in days)
aperture : int, optional
chosen aperture, by default None
"""
n_bin = int(bins / (np.mean(self.exptime) / (60 * 60 * 24)))
assert len(self.time) > n_bin, "Your 'bins' size is less than the total exposure"
x = self.xarray.isel(apertures=self.aperture if aperture is None else aperture).copy()
fluxes = x.raw_fluxes.values
errors = x.raw_errors.values
mean_fluxes = np.mean(fluxes, axis=1)
mean_errors = np.mean(errors, axis=1)
error_estimate = [np.median(binned_statistic(self.time, f, statistic='std', bins=n_bin)[0]) for f in fluxes]
area = x.apertures_area[0].values
# ccd_equation = phot_prose.telescope.error(
# prose_fluxes, tp_area, np.mean(self.sky), np.mean(self.exptime), np.mean(self.airmass))
ccd_equation = (mean_errors / mean_fluxes)
inv_snr_estimate = error_estimate / mean_fluxes
positive_est = inv_snr_estimate > 0
mean_fluxes = mean_fluxes[positive_est]
inv_snr_estimate = inv_snr_estimate[positive_est]
ccd_equation = ccd_equation[positive_est]
sorted_fluxes_idxs = np.argsort(mean_fluxes)
plt.plot(np.log(mean_fluxes), inv_snr_estimate, ".", alpha=0.5, ms=2, c="k",
label=f"flux rms ({0.005 * (60 * 24):.1f} min bins)")
plt.plot(np.log(mean_fluxes)[sorted_fluxes_idxs], (np.sqrt(mean_fluxes) / mean_fluxes)[sorted_fluxes_idxs],
"--", c="k", label="photon noise", alpha=0.5)
plt.plot(np.log(mean_fluxes)[sorted_fluxes_idxs],
(np.sqrt(np.mean(self.sky) * area) / mean_fluxes)[sorted_fluxes_idxs], c="k", label="background noise",
alpha=0.5)
# plt.plot(np.log(prose_fluxes)[s], (prose_e/prose_fluxes)[s], label="CCD equation")
plt.plot(np.log(mean_fluxes)[sorted_fluxes_idxs], ccd_equation[sorted_fluxes_idxs], label="CCD equation")
plt.legend()
plt.ylim(
0.5 * np.percentile(inv_snr_estimate, 2),
1.5 * np.percentile(inv_snr_estimate, 98))
plt.xlim(np.min(np.log(mean_fluxes)), np.max(np.log(mean_fluxes)))
plt.yscale("log")
plt.xlabel("log(ADU)")
plt.ylabel("$SNR^{-1}$")
plt.title("Photometric precision (raw fluxes)", loc="left")
def plot_meridian_flip(self):
"""Plot meridian flip line over existing axe
"""
if self.meridian_flip is not None:
plt.axvline(self.meridian_flip, c="k", alpha=0.15)
_, ylim = plt.ylim()
plt.text(self.meridian_flip, ylim, "meridian flip ", ha="right", rotation="vertical", va="top", color="0.7")
def plot(self, star=None, meridian_flip=True, bins=0.005, color="k", std=True):
"""Plot observation light curve
Parameters
----------
star : [type], optional
[description], by default None
meridian_flip : bool, optional
whether to show meridian flip, by default True
bins : float, optional
bin size in same unit as Observation.time, by default 0.005
color : str, optional
binned points color, by default "k"
std : bool, optional
whether to see standard deviation of bins as error bar, by default True, otherwise theoretical error bat is shown
"""
super().plot(star=star, bins=bins, color=color, std=std)
if meridian_flip:
self.plot_meridian_flip()
def plot_psf(self, star=None, n=40, zscale=False, aperture=None, rin=None, rout=None):
"""Plot star cutout overalid with aperture and radial flux.
Parameters
----------
star : int or list like, optional
if int: star to plot cutout on, if list like (tuple, np.ndarray) of size 2: coords of cutout, by default None
n : int, optional
cutout width and height, by default 40
zscale : bool, optional
whether to apply a zscale to cutout image, by default False
aperture : float, optional
radius of aperture to display, by default None corresponds to best target aperture
rin : [type], optional
radius of inner annulus to display, by default None corresponds to inner radius saved
rout : [type], optional
radius of outer annulus to display, by default None corresponds to outer radius saved
"""
n /= np.sqrt(2)
if isinstance(star, (tuple, list, np.ndarray)):
x, y = star
else:
if star is None:
star = 0
assert isinstance(star, int), "star must be star coordinates or integer index"
x, y = self.stars[star]
Y, X = np.indices(self.stack.shape)
cutout_mask = (np.abs(X - x + 0.5) < n) & (np.abs(Y - y + 0.5) < n)
inside = np.argwhere((cutout_mask).flatten()).flatten()
radii = (np.sqrt((X - x) ** 2 + (Y - y) ** 2)).flatten()[inside]
idxs = np.argsort(radii)
radii = radii[idxs]
pixels = self.stack.flatten()[inside]
pixels = pixels[idxs]
binned_radii, binned_pixels, _ = fast_binning(radii, pixels, bins=1)
fig = plt.figure(figsize=(9.5, 4))
fig.patch.set_facecolor('xkcd:white')
_ = plt.subplot(1, 5, (1, 3))
plt.plot(radii, pixels, "o", fillstyle='none', c="0.7", ms=4)
plt.plot(binned_radii, binned_pixels, c="k")
plt.xlabel("distance from center (pixels)")
plt.ylabel("ADUs")
_, ylim = plt.ylim()
if "apertures_radii" in self and self.aperture != -1:
apertures = self.apertures_radii[:, 0]
aperture = apertures[self.aperture]
if "annulus_rin" in self:
if rin is None:
rin = self.annulus_rin.mean()
if rout is None:
rout = self.annulus_rout.mean()
if aperture is not None:
plt.xlim(0)
plt.text(aperture, ylim, "APERTURE", ha="right", rotation="vertical", va="top")
plt.axvline(aperture, c="k", alpha=0.1)
plt.axvspan(0, aperture, color="0.9", alpha=0.1)
if rin is not None:
plt.axvline(rin, color="k", alpha=0.2)
if rout is not None:
plt.axvline(rout, color="k", alpha=0.2)
if rin is not None:
plt.axvspan(rin, rout, color="0.9", alpha=0.2)
_ = plt.text(rout, ylim, "ANNULUS", ha="right", rotation="vertical", va="top")
n = np.max([np.max(radii), rout +2 if rout else 0])
plt.xlim(0, n)
ax2 = plt.subplot(1, 5, (4, 5))
im = self.stack[int(y - n):int(y + n), int(x - n):int(x + n)]
if zscale:
im = z_scale(im)
plt.imshow(im, cmap="Greys_r", aspect="auto", origin="lower")
plt.axis("off")
if aperture is not None:
ax2.add_patch(plt.Circle((n, n), aperture, ec='grey', fill=False, lw=2))
if rin is not None:
ax2.add_patch(plt.Circle((n, n), rin, ec='grey', fill=False, lw=2))
if rout is not None:
ax2.add_patch(plt.Circle((n, n), rout, ec='grey', fill=False, lw=2))
ax2.text(0.05, 0.05, f"{star}", fontsize=12, color="white", transform=ax2.transAxes)
plt.tight_layout()
def plot_systematics_signal(self, systematics, signal=None, ylim=None, offset=None, figsize=(6, 7)):
"""Plot a systematics and signal model over diff_flux. systeamtics + signal is plotted on top, signal alone on detrended
data on bottom
Parameters
----------
systematics : np.ndarray
signal : np.ndarray
ylim : tuple, optional
ylim of the plot, by default None, using the dispersion of y
offset : tuple, optional
offset between, by default None
figsize : tuple, optional
figure size as in in plt.figure, by default (6, 7)
"""
viz.plot_systematics_signal(self.time, self.diff_flux, systematics, signal, ylim=ylim, offset=offset,
figsize=figsize)
self.plot_meridian_flip()
plt.legend()
self.xlabel()
plt.ylabel("diff. flux")
plt.tight_layout()
viz.paper_style()
def xlabel(self):
"""Plot xlabel (time) according to its units
"""
plt.xlabel(self.time_format.upper().replace("_", "-"))
def where(self, condition):
"""return filtered observation given a boolean mask of time
Parameters
----------
condition : [type]
[description]
Returns
-------
[type]
[description]
"""
new_obs = self.copy()
new_obs.xarray = new_obs.xarray.sel(time=self.time[condition])
return new_obs
def keep_good_stars(self, lower_threshold=3., upper_threshold=35000., trim=10, keep=None, inplace=True):
"""Keep only stars with a median flux higher than `threshold`*sky.
This action will reorganize stars indexes (target id will be recomputed) and reset the differential fluxes to raw.
Parameters
----------
lower_threshold : float
threshold for which stars with flux/sky > threshold are kept, default is 3
trim : float
value in pixels above which stars are kept, default is 10 to avoid stars too close to the edge
keep : int or list
number of stars to exclude (starting from 0 if int).
inplace: bool
whether to replace current object or return a new one
"""
good_stars = np.argwhere((np.median(self.peaks, 1)/np.median(self.sky) > lower_threshold) & (np.median(self.peaks, 1) < upper_threshold)).squeeze()
mask = np.any(np.abs(self.stars[good_stars] - max(self.stack.shape) / 2) > (max(self.stack.shape) - 2 * trim) / 2, axis=1)
bad_stars = np.argwhere(mask == True).flatten()
final_stars = np.delete(good_stars, bad_stars)
if isinstance(keep,int):
final_stars = np.concatenate([final_stars,np.arange(0,keep+1)],axis=0)
final_stars = np.unique(final_stars)
if isinstance(keep,list):
final_stars = np.concatenate([final_stars,keep ], axis=0)
final_stars = np.unique(final_stars)
if inplace:
new_self = self
else:
new_self = self.copy()
new_self.xarray = new_self.xarray.isel(star=final_stars)
if self.target != -1:
new_self.target = np.argwhere(final_stars == new_self.target).flatten()[0]
if not inplace:
return new_self
def plot_flip(self):
plt.axvline(self.meridian_flip, ls="--", c="k", alpha=0.5)
def flip_correction(self, inplace=True):
"""Align all differential fluxes using a step model of the meridian flip
Parameters
----------
inplace : bool, optional
wheter to replace the current Observation or return a new one, by default True
"""
if inplace:
new_self = self
else:
new_self = self.copy()
new_diff_fluxes = | np.zeros_like(self.diff_fluxes) | numpy.zeros_like |
"""
Plot comparisons between WACCM4 sea ice experiments. These are
sea ice thickness and concentration perturbation experiments. This script is
for DAILY data for all variables.
Notes
-----
Author : <NAME>
Date : 6 September 2017
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import nclcmaps as ncm
import datetime
import read_DailyOutput as DO
import calc_Utilities as UT
import cmocean
### Define directories
directorydata = '/surtsey/zlabe/simu/'
directoryfigure = '/home/zlabe/Desktop/Daily/'
#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Plotting Daily Geopotential - %s----' % titletime)
#### Alott time series
year1 = 1900
year2 = 2000
years = np.arange(year1,year2+1,1)
### Add parameters
varnames = ['GEOP','TEMP','U','V']
runnames = [r'HIT',r'FIT',r'CIT',r'FIC',r'FICT']
### Call functions for variable profile data for polar cap
for v in range(len(varnames)):
lat,lon,time,lev,varhit = DO.readMeanExperi(directorydata,
'%s' % varnames[v],
'HIT','profile')
lat,lon,time,lev,varfit = DO.readMeanExperi(directorydata,
'%s' % varnames[v],
'FIT','profile')
lat,lon,time,lev,varcit = DO.readMeanExperi(directorydata,
'%s' % varnames[v],
'CIT','profile')
lat,lon,time,lev,varfic = DO.readMeanExperi(directorydata,
'%s' % varnames[v],
'FIC','profile')
lat,lon,time,lev,varfict = DO.readMeanExperi(directorydata,
'%s' % varnames[v],
'FICT','profile')
### Create 2d array of latitude and longitude
lon2,lat2 = np.meshgrid(lon,lat)
### Compare experiments
experiments = [r'\textbf{$\Delta$SIT}',r'\textbf{$\Delta$SIC}',
r'\textbf{$\Delta$NET}']
runs = [varhit,varfit,varcit,varfic,varfict]
### Compute comparisons for experiments - take ensemble average
diff_FITHIT = np.nanmean(varfit - varhit,axis=0)
diff_FICCIT = np.nanmean(varfic - varcit,axis=0)
diff_FICTHIT = np.nanmean(varfict - varhit,axis=0)
diffruns = | np.asarray([diff_FITHIT,diff_FICCIT,diff_FICTHIT]) | numpy.asarray |
import torch
import torch.utils.data as data
from PIL import Image
import os
import math
import functools
import json
import copy
import cv2
import numpy as np
import pickle
from utils import load_value_file
import tarfile
import threading
import multiprocessing
import zipfile
from io import BytesIO
import io
import time
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
self.name = getattr(fileobj, "name", None)
self.closed = False
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def flush(self):
pass
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position, whence=io.SEEK_SET):
"""Seek to a position in the file.
"""
if whence == io.SEEK_SET:
self.position = min(max(position, 0), self.size)
elif whence == io.SEEK_CUR:
if position < 0:
self.position = max(self.position + position, 0)
else:
self.position = min(self.position + position, self.size)
elif whence == io.SEEK_END:
self.position = max(min(self.size + position, self.size), 0)
else:
raise ValueError("Invalid argument")
return self.position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
b = self.fileobj.read(length)
if len(b) != length:
raise ReadError("unexpected end of data")
buf += b
else:
buf += NUL * length
size -= length
self.position += length
return buf
def readinto(self, b):
buf = self.read(len(b))
b[:len(buf)] = buf
return len(buf)
def close(self):
self.closed = True
#class _FileInFile
class ExFileObject(io.BufferedReader):
def __init__(self, fileobj, tarinfo):
fileobj = _FileInFile(fileobj, tarinfo.offset_data,
tarinfo.size, tarinfo.sparse)
super().__init__(fileobj)
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def accimage_loader(path):
try:
import accimage
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def get_default_image_loader():
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader
else:
return pil_loader
def video_loader(video_dir_path, frame_indices, image_loader):
video = []
for i in frame_indices:
image_path = os.path.join(video_dir_path, 'image_{:05d}.jpg'.format(i))
if os.path.exists(image_path):
video.append(image_loader(image_path))
else:
return video
return video
def get_default_video_loader():
image_loader = get_default_image_loader()
return functools.partial(video_loader, image_loader=image_loader)
def load_annotation_data(data_file_path):
with open(data_file_path, 'r') as data_file:
return json.load(data_file)
def get_class_labels(data):
class_labels_map = {}
index = 0
for class_label in data['labels']:
class_labels_map[class_label] = index
index += 1
return class_labels_map
def get_video_names_and_annotations(data, subset):
video_names = []
annotations = []
for key, value in data['database'].items():
this_subset = value['subset']
if this_subset == subset:
if subset == 'testing':
video_names.append('test/{}'.format(key))
else:
label = value['annotations']['label']
video_names.append('{}/{}'.format(label, key))
annotations.append(value['annotations'])
return video_names, annotations
def make_dataset_tar(root_path, subset, n_samples_for_each_video,
sample_duration, video_paths, audio_paths):
video_names = list(video_paths.keys())
dataset = []
for i in range(len(video_names)):
if i % 10000 == 0:
print('dataset loading [{}/{}]'.format(i, len(video_names)))
video_path = video_names[i]
audio_path = video_path.replace('.jpg', '.npy')
if not audio_path in audio_paths:
continue
#n_frames = video_paths[video_path]
n_frames = 31
begin_t = 1
end_t = n_frames
sample = {
'video': video_path,
'audio': audio_path,
'segment': [begin_t, end_t],
'n_frames': n_frames,
#'video_id': video_names[i][:-14].split('/')[1]
}
sample['label'] = video_names[i][2]
if n_samples_for_each_video == 1:
sample['frame_indices'] = list(range(1, n_frames + 1))
dataset.append(sample)
else:
if n_samples_for_each_video > 1:
step = max(1,
math.ceil((n_frames - 1 - sample_duration) /
(n_samples_for_each_video - 1)))
else:
step = sample_duration
for j in range(1, n_frames, step):
sample_j = copy.deepcopy(sample)
sample_j['frame_indices'] = list(
range(j, min(n_frames + 1, j + sample_duration)))
dataset.append(sample_j)
return dataset
class TarReader(object):
def __init__(self):
super(TarReader, self).__init__()
self.id_context = dict()
self.name2member = dict()
def read(self, tar_file, image_name):
if tar_file in self.id_context:
im = self.id_context[tar_file].extractfile(self.name2member[image_name])
return im.read()
else:
file_handle = tarfile.open(tar_file)
self.id_context[tar_file] = file_handle
im = self.id_context[tar_file].extractfile(self.name2member[image_name])
return im.read()
def getnames(self, tar_file):
if tar_file in self.id_context:
im = self.id_context[tar_file].getnames()
else:
file_handle = tarfile.open(tar_file)
pkl_file = tar_file.replace('.tar', '.pkl')
if not os.path.exists(pkl_file):
members = file_handle.getmembers()
pickle.dump(members, open(pkl_file, 'wb'))
else:
members = pickle.load(open(pkl_file, 'rb'))
file_handle.members = members
file_handle._loaded = True
for m in members:
self.name2member[m.name] = m
self.id_context[tar_file] = file_handle
#self.lock[tar_file] = threading.RLock()
im = self.id_context[tar_file].getnames()
return im
class ZipReader(object):
def __init__(self):
super(ZipReader, self).__init__()
self.id_context = dict()
self.lock = multiprocessing.Lock()
def read(self, zip_file, image_name):
if zip_file in self.id_context:
with self.lock:
im = self.id_context[zip_file].open(image_name)
res = im.read()
return res
else:
file_handle = zipfile.ZipFile(zip_file)
self.id_context[zip_file] = file_handle
im = self.id_context[zip_file].open(image_name)
return im.read()
def getnames(self, zip_file):
if zip_file in self.id_context:
im = self.id_context[zip_file].namelist()
else:
file_handle = zipfile.ZipFile(zip_file)
self.id_context[zip_file] = file_handle
im = self.id_context[zip_file].namelist()
return im
class Lrw_va_tar(data.Dataset):
def __init__(self,
root_path,
subset,
n_samples_for_each_video=1,
spatial_transform=None,
temporal_transform=None,
target_transform=None,
sample_duration=16,
get_loader=get_default_video_loader):
ziplist = []
a = os.walk(root_path)
for b, c, d in a:
for item in d:
if item.endswith('.zip'):
ziplist.append(b + '/' + item)
self.zipreader = ZipReader()
self.video_name_map = {}
self.video_zip_map = {}
self.video_paths = {}
for i, zipname in enumerate(ziplist):
ss = time.time()
namelist = self.zipreader.getnames(zipname)
ee = time.time()
readtime = ee - ss
ss = time.time()
for n in namelist:
#if ('/' + subset + '/') in n:
if n.endswith('.jpg'):
tmp = n.split('/')
newn = '/'.join(tmp[1:])
video_name = newn
self.video_name_map[newn] = n
self.video_zip_map[n] = zipname
if not video_name in self.video_paths:
self.video_paths[video_name] = 0
self.video_paths[video_name] += 1
ee = time.time()
buildtime = ee - ss
print('loading ', i, zipname, readtime, buildtime)
self.audio_zipname = root_path.replace('/video', '') + '/audio/audio.zip'
ss = time.time()
namelist = self.zipreader.getnames(self.audio_zipname)
ee = time.time()
print('loading audio', ee - ss)
self.audio_name_map = {}
self.audio_paths = set()
for n in namelist:
if ('/' + subset + '/') in n:
if n.endswith('.npy'):
tmp = n.split('/')
newn = '/'.join(tmp[1:])
audio_name = '/'.join(tmp[1:])
self.audio_name_map[newn] = n
self.audio_paths.add(audio_name)
self.data = make_dataset_tar(
root_path, subset, n_samples_for_each_video,
sample_duration, self.video_paths, self.audio_paths)
self.spatial_transform = spatial_transform
self.temporal_transform = temporal_transform
self.target_transform = target_transform
def loader(self, path, frame_indices):
#for i in frame_indices:
#image_path = path + '/image_{:05d}.jpg'.format(i)
#if image_path in self.video_name_map:
# video_name = self.video_name_map[image_path]
# tar_name = self.video_tar_map[video_name]
# im = self.tarreader.read(tar_name, video_name)
# im = Image.open(BytesIO(im))
# video.append(im)
image_path = path
video_name = self.video_name_map[image_path]
zip_name = self.video_zip_map[video_name]
im = self.zipreader.read(zip_name, video_name)
im = Image.open(BytesIO(im))
im = np.asarray(im)
video = [Image.fromarray(im[:, i*240:(i+1)*240, :]) for i in frame_indices]
return video
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path = self.data[index]['video']
frame_indices = self.data[index]['frame_indices']
video_tot = len(frame_indices)
if self.temporal_transform is not None:
frame_indices = self.temporal_transform(frame_indices)
clip = self.loader(path, frame_indices)
audio_path = self.data[index]['audio']
audio = self.zipreader.read(self.audio_zipname, self.audio_name_map[audio_path])
audio = np.load(BytesIO(audio), allow_pickle=True)
audio_tot = audio.shape[0]
tmp_indices = | np.array(frame_indices) | numpy.array |
import unittest
import numpy as np
from pysplines.bsplines import Bspline
_TOLERANCE = 5.0e-7
class TestBspline(unittest.TestCase):
def setUp(self):
self.cv = [[0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [2.0, 1.0], [2.0, 2.0]]
self.bspline = Bspline(self.cv, n=120)
def test_edge_points(self):
rvals = np.array(self.bspline.rvals)
control_points = self.bspline.cv
self.assertTrue( | np.linalg.norm(rvals[0] - control_points[0]) | numpy.linalg.norm |
# ----------------------------------------------------------------------
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file tests/fullscale/poroelasticity/cryer/cryer_soln.py
#
# @brief Analytical solution to Cryer's problem.
# Owing to the symmetry of the problem, we only need consider the quarter
# domain case.
#
# -F
# ----------
# | |
# Ux=0 | | P=0
# | |
# | |
# ----------
# Uy=0
#
# Dirichlet boundary conditions
# Ux(0,y) = 0
# Uy(x,0) = 0
# Neumann boundary conditions
# \tau_normal(x,ymax) = -1*Pa
import numpy
# Physical properties
G = 3.0
rho_s = 2500
rho_f = 1000
K_fl = 8.0
K_sg = 10.0
K_d = 4.0
alpha = 0.6
phi = 0.1
k = 1.5
mu_f = 1.0
P_0 = 1.0
R_0 = 1.0
ndim = 3
M = 1.0 / ( phi / K_fl + (alpha - phi) /K_sg)
kappa = k/mu_f
K_u = K_d + alpha*alpha*M
S = (3*K_u + 4*G) / (M*(3*K_d + 4*G)) #(1/M) + ( (3*alpha*alpha) / (3*K_d + 4*G) )#
c = kappa / S
nu = (3*K_d - 2*G) / (2*(3*K_d + G))
nu_u = (3*K_u - 2*G) / (2*(3*K_u + G))
U_R_inf = -1.*(P_0*R_0*(1.-2.*nu))/(2.*G*(1.+nu))
eta = (alpha*(1-2*nu))/(2*(1-nu))
xmin = 0.0 # m
xmax = 1.0 # m
ymin = 0.0 # m
ymax = 1.0 # m
zmin = 0.0 # m
zmax = 1.0 # m
# Time steps
ts = 0.0028666667 # sec
nts = 2
tsteps = numpy.arange(0.0, ts * nts, ts) + ts # sec
# ----------------------------------------------------------------------
class AnalyticalSoln(object):
"""
Analytical solution to Cryer's problem
"""
SPACE_DIM = 3
TENSOR_SIZE = 4
ITERATIONS = 50
EPS = 1e-25
def __init__(self):
self.fields = {
"displacement": self.displacement,
"pressure": self.pressure,
#"trace_strain": self.trace_strain,
"porosity": self.porosity,
"solid_density": self.solid_density,
"fluid_density": self.fluid_density,
"fluid_viscosity": self.fluid_viscosity,
"shear_modulus": self.shear_modulus,
"undrained_bulk_modulus": self.undrained_bulk_modulus,
"drained_bulk_modulus": self.drained_bulk_modulus,
"biot_coefficient": self.biot_coefficient,
"biot_modulus": self.biot_modulus,
"isotropic_permeability": self.isotropic_permeability,
"initial_amplitude": {
"x_neg": self.zero_vector,
"y_neg": self.zero_vector,
"z_neg": self.zero_vector,
"surface_traction": self.surface_traction,
"surface_pressure": self.zero_scalar
}
}
return
def getField(self, name, mesh_entity, pts):
if name in "initial_amplitude":
field = self.fields[name][mesh_entity](pts)
else:
field = self.fields[name](pts)
return field
def zero_scalar(self, locs):
(npts, dim) = locs.shape
return numpy.zeros((1, npts, 1), dtype=numpy.float64)
def zero_vector(self, locs):
(npts, dim) = locs.shape
return numpy.zeros((1, npts, self.SPACE_DIM), dtype=numpy.float64)
def solid_density(self, locs):
"""
Compute solid_density field at locations.
"""
(npts, dim) = locs.shape
solid_density = rho_s * numpy.ones((1, npts, 1), dtype=numpy.float64)
return solid_density
def fluid_density(self, locs):
"""
Compute fluid density field at locations.
"""
(npts, dim) = locs.shape
fluid_density = rho_f * numpy.ones((1, npts, 1), dtype=numpy.float64)
return fluid_density
def porosity(self, locs):
"""
Compute solid_density field at locations.
"""
(npts, dim) = locs.shape
porosity = phi * numpy.ones((1, npts, 1), dtype=numpy.float64)
return porosity
def shear_modulus(self, locs):
"""
Compute shear modulus field at locations.
"""
(npts, dim) = locs.shape
shear_modulus = G * numpy.ones((1, npts, 1), dtype=numpy.float64)
return shear_modulus
def fluid_viscosity(self, locs):
"""
Compute fluid_viscosity field at locations.
"""
(npts, dim) = locs.shape
fluid_viscosity = mu_f * numpy.ones((1, npts, 1), dtype=numpy.float64)
return fluid_viscosity
def undrained_bulk_modulus(self, locs):
"""
Compute undrained bulk modulus field at locations.
"""
(npts, dim) = locs.shape
undrained_bulk_modulus = K_u * numpy.ones((1, npts, 1), dtype=numpy.float64)
return undrained_bulk_modulus
def drained_bulk_modulus(self, locs):
"""
Compute undrained bulk modulus field at locations.
"""
(npts, dim) = locs.shape
drained_bulk_modulus = K_d * numpy.ones((1, npts, 1), dtype=numpy.float64)
return drained_bulk_modulus
def biot_coefficient(self, locs):
"""
Compute biot coefficient field at locations.
"""
(npts, dim) = locs.shape
biot_coefficient = alpha * numpy.ones((1, npts, 1), dtype=numpy.float64)
return biot_coefficient
def biot_modulus(self, locs):
"""
Compute biot modulus field at locations.
"""
(npts, dim) = locs.shape
biot_modulus = M * numpy.ones((1, npts, 1), dtype=numpy.float64)
return biot_modulus
def isotropic_permeability(self, locs):
"""
Compute isotropic permeability field at locations.
"""
(npts, dim) = locs.shape
isotropic_permeability = k * numpy.ones((1, npts, 1), dtype=numpy.float64)
return isotropic_permeability
def displacement(self, locs):
"""
Compute displacement field at locations.
"""
(npts, dim) = locs.shape
ntpts = tsteps.shape[0]
displacement = numpy.zeros((ntpts, npts, dim), dtype=numpy.float64)
x_n = self.cryerZeros()
center = numpy.where(~locs.any(axis=1))[0]
R = numpy.sqrt(locs[:,0]*locs[:,0] + locs[:,1]*locs[:,1] + locs[:,2]*locs[:,2])
theta = numpy.nan_to_num( numpy.arctan( numpy.nan_to_num( numpy.sqrt(locs[:,0]**2 + locs[:,1]**2) / locs[:,2] ) ) )
phi = numpy.nan_to_num( numpy.arctan( numpy.nan_to_num( locs[:,1] / locs[:,0] ) ) )
R_star = R.reshape([R.size,1]) / R_0
x_n.reshape([1,x_n.size])
E = numpy.square(1-nu)*numpy.square(1+nu_u)*x_n - 18*(1+nu)*(nu_u-nu)*(1-nu_u)
t_track = 0
for t in tsteps:
t_star = (c*t)/(R_0**2)
r_exact_N = R_star.ravel() - numpy.nan_to_num(numpy.sum(((12*(1 + nu)*(nu_u - nu)) / \
((1 - 2*nu)*E*R_star*R_star*x_n*numpy.sin(numpy.sqrt(x_n))) ) * \
(3*(nu_u - nu) * (numpy.sin(R_star*numpy.sqrt(x_n)) - R_star*numpy.sqrt(x_n)*numpy.cos(R_star*numpy.sqrt(x_n))) + \
(1 - nu)*(1 - 2*nu)*R_star*R_star*R_star*x_n*numpy.sin(numpy.sqrt(x_n))) * \
numpy.exp(-x_n*t_star),axis=1))
displacement[t_track, :, 0] = (r_exact_N*U_R_inf)*numpy.cos(phi)*numpy.sin(theta)
displacement[t_track, :, 1] = (r_exact_N*U_R_inf)*numpy.sin(phi)*numpy.sin(theta)
displacement[t_track, :, 2] = (r_exact_N*U_R_inf)*numpy.cos(theta)
t_track += 1
return displacement
def pressure(self, locs):
"""
Compute pressure field at locations.
"""
(npts, dim) = locs.shape
ntpts = tsteps.shape[0]
pressure = numpy.zeros((ntpts, npts, 1), dtype=numpy.float64)
center = numpy.where(~locs.any(axis=1))[0]
x_n = self.cryerZeros()
R = | numpy.sqrt(locs[:,0]*locs[:,0] + locs[:,1]*locs[:,1] + locs[:,2]*locs[:,2]) | numpy.sqrt |
import numpy as np
import pytest
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import robotics as rbt
class TestSlerp:
def test_angle_interpolation(self):
assert_almost_equal(
rbt.angle_interpolation(0.75 * np.pi, -0.75 * np.pi, 0.5), -np.pi
)
def test_quaternion_slerp(self):
q0 = rbt.Quaternion(1, 0, 0, 0)
q1 = rbt.Quaternion(0, 1, 0, 0)
assert rbt.quaternion_slerp(q0, q1, 0.5) == rbt.Quaternion(
np.sqrt(2) / 2, np.sqrt(2) / 2, 0, 0
)
q_list = rbt.quaternion_slerp_array(q0, q1, [0.5, 0.5, 0.5])
assert q_list[0] == rbt.Quaternion(np.sqrt(2) / 2, np.sqrt(2) / 2, 0, 0)
assert q_list[1] == rbt.Quaternion(np.sqrt(2) / 2, np.sqrt(2) / 2, 0, 0)
assert q_list[2] == rbt.Quaternion(np.sqrt(2) / 2, np.sqrt(2) / 2, 0, 0)
def test_transform2D_slerp(self):
T0 = rbt.transform2D(0, 0, 0.75 * np.pi)
T1 = rbt.transform2D(0, 0, -0.75 * np.pi)
assert_array_almost_equal(
rbt.transform2D_slerp(T0, T1, 0.5),
np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]),
)
def test_transform2D_slerp_array(self):
T0 = rbt.transform2D(0, 0, np.pi * 0.75)
T1 = rbt.transform2D(0, 0, -np.pi * 0.75)
T_list = rbt.transform2D_slerp_array(T0, T1, [0.3, 0.5])
assert_array_almost_equal(
T_list[0],
np.array([[-0.951057, -0.309017, 0], [0.309017, -0.951057, 0], [0, 0, 1]]),
)
assert_array_almost_equal(
T_list[1], | np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]) | numpy.array |
#! /usr/bin/env python
import sys
sys.path.append("../../utils/")
sys.path.append("gmm/")
sys.path.append("lda/")
import re
import os
import time
import pickle
import pylab as pl
import numpy as np
import pandas as pd
import multiprocessing as mp
from gmm.gmm_main import GMM
from gmm.normal import Normal
from lda_main import LDA
import files_operations as fo
def gmmlda_read_data(data_path, seg_path):
Y, seg_list = np.zeros((0,0)), []
for file in os.listdir(data_path):
data = pd.read_excel(data_path+file, header=None).values
Y = data if not Y.shape[0] else | np.vstack((Y, data)) | numpy.vstack |
import unittest
from ancb import NumpyCircularBuffer
from ancb import ( # type: ignore
star_can_broadcast, can_broadcast
)
from numpy import array_equal, allclose, shares_memory
from numpy import array, zeros, arange, ndarray, ones, empty
from numpy.random import rand, randint
from numpy import fill_diagonal, roll
from itertools import zip_longest
from operator import (
matmul, add, sub, mul, truediv, mod, floordiv, pow,
rshift, lshift, and_, or_, xor, neg, pos, abs, inv, invert,
iadd, iand, ifloordiv, ilshift, imod, imul,
ior, ipow, irshift, isub, itruediv, ixor
)
class TestBroadcastability(unittest.TestCase):
def test_broadcastablity(self):
x = zeros((1, 2, 3, 4, 5))
y = zeros((1, 1, 1, 4, 5))
z = zeros((1, 1, 1, 3, 5))
w = zeros(1)
self.assertTrue(can_broadcast(x.shape, y.shape))
self.assertFalse(can_broadcast(x.shape, z.shape))
self.assertFalse(can_broadcast(y.shape, z.shape))
self.assertTrue(can_broadcast(x.shape, x.shape))
self.assertTrue(can_broadcast(y.shape, y.shape))
self.assertTrue(can_broadcast(z.shape, z.shape))
self.assertTrue(can_broadcast(w.shape, w.shape))
self.assertTrue(can_broadcast(x.shape, w.shape))
self.assertTrue(can_broadcast(y.shape, w.shape))
self.assertTrue(can_broadcast(z.shape, w.shape))
def test_star_broadcastablity(self):
x = zeros((1, 2, 3, 4, 5))
y = zeros((1, 1, 1, 4, 5))
z = zeros((1, 1, 1, 3, 5))
w = zeros(1)
starexpr = zip_longest(x.shape, y.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, z.shape, fillvalue=1)
self.assertFalse(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, z.shape, fillvalue=1)
self.assertFalse(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, x.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, y.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(z.shape, z.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(w.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(z.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
class OperatorTestFactory(type):
def __new__(cls, name, bases, dct):
obj = super().__new__(cls, name, bases, dct)
bin_operators = [
matmul, add, sub, mul, truediv, mod, floordiv, pow
]
un_operators = [neg, pos, abs, invert, inv]
bitbin_operators = [rshift, lshift, and_, or_, xor]
i_operators = [
iadd, ifloordiv, imul, ipow, isub, itruediv
]
bit_ioperators = [
ilshift, irshift, ior, iand, ixor, imod
]
def unop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
test = -arange(3, dtype=int)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(-1)
buffer.append(-2)
res = op(buffer)
self.assertIsInstance(res, ndarray)
self.assertTrue(array_equal(res, op(test))) # unfrag
buffer.append(-3)
test -= 1
res = op(buffer)
self.assertIsInstance(res, ndarray)
self.assertTrue(array_equal(res, op(test))) # frag
return f
def bitbinop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
test = arange(1, 4, dtype=int)
x = randint(3)
buffer = NumpyCircularBuffer(data)
buffer.append(1)
buffer.append(2)
buffer.append(3)
res1 = op(buffer, x)
res2 = op(x, buffer)
self.assertIsInstance(res1, ndarray)
self.assertIsInstance(res2, ndarray)
self.assertTrue(array_equal(res1, op(test, x)))
self.assertTrue(array_equal(res2, op(x, test)))
buffer.append(4)
test += 1
res1 = op(buffer, x)
res2 = op(x, buffer)
self.assertIsInstance(res1, ndarray)
self.assertIsInstance(res2, ndarray)
self.assertTrue(array_equal(res1, op(test, x)))
self.assertTrue(array_equal(res2, op(x, test)))
return f
def binop_testcase(op):
def f(self):
data = zeros(3, dtype=float)
test = arange(1, 4, dtype=float)
x = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(1)
buffer.append(2)
buffer.append(3)
res1 = op(buffer, x)
self.assertIsInstance(res1, ndarray)
self.assertTrue(allclose(res1, op(test, x)))
res2 = op(x, buffer)
self.assertIsInstance(res2, ndarray)
self.assertTrue(allclose(res2, op(x, test)))
buffer.append(4)
test += 1
res1 = op(buffer, x)
self.assertIsInstance(res1, ndarray)
self.assertTrue(allclose(res1, op(test, x)))
res2 = op(x, buffer)
self.assertIsInstance(res2, ndarray)
self.assertTrue(allclose(res2, op(x, test)))
return f
def iop_testcase(op):
def f(self):
data = zeros(3, dtype=float)
data2 = zeros(3, dtype=float)
test1 = arange(1, 4, dtype=float)
test2 = arange(2, 5, dtype=float)
x = rand(3)
buffer1 = NumpyCircularBuffer(data)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(1)
buffer1.append(2)
buffer1.append(3)
buffer2.append(1)
buffer2.append(2)
buffer2.append(3)
op(buffer1, x)
op(test1, x)
self.assertIsInstance(buffer1, NumpyCircularBuffer)
self.assertTrue(array_equal(buffer1 + 0, test1))
buffer2.append(4)
op(buffer2, x)
op(test2, x)
self.assertIsInstance(buffer2, NumpyCircularBuffer)
self.assertTrue(array_equal(buffer2 + 0, test2))
return f
def bitiop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
data2 = zeros(3, dtype=int)
test1 = arange(1, 4, dtype=int)
test2 = arange(2, 5, dtype=int)
x = randint(low=1, high=100, size=3)
buffer1 = NumpyCircularBuffer(data)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(1)
buffer1.append(2)
buffer1.append(3)
buffer2.append(1)
buffer2.append(2)
buffer2.append(3)
op(buffer1, x)
op(test1, x)
self.assertIsInstance(buffer1, NumpyCircularBuffer)
self.assertTrue(allclose(buffer1 + 0, test1))
buffer2.append(4)
op(buffer2, x)
op(test2, x)
self.assertIsInstance(buffer2, NumpyCircularBuffer)
self.assertTrue(allclose(buffer2 + 0, test2))
return f
for op in bin_operators:
setattr(obj, 'test_{}'.format(op.__name__), binop_testcase(op))
for op in bitbin_operators:
setattr(obj, 'test_{}'.format(op.__name__), bitbinop_testcase(op))
for op in un_operators:
setattr(obj, 'test_{}'.format(op.__name__), unop_testcase(op))
for op in i_operators:
setattr(obj, 'test_{}'.format(op.__name__), iop_testcase(op))
for op in bit_ioperators:
setattr(obj, 'test_{}'.format(op.__name__), bitiop_testcase(op))
return(obj)
class TestNumpyCircularBuffer(
unittest.TestCase, metaclass=OperatorTestFactory
):
"""
NumpyCircularBuffer tests
"""
def test_init(self):
data = zeros(3)
buffer = NumpyCircularBuffer(data)
self.assertTrue(array_equal(data, buffer))
def test_fragmentation(self):
data = zeros(3)
buffer = NumpyCircularBuffer(data)
self.assertFalse(buffer.fragmented)
buffer.append(0)
self.assertFalse(buffer.fragmented)
buffer.append(1)
self.assertFalse(buffer.fragmented)
buffer.append(2)
self.assertFalse(buffer.fragmented)
buffer.append(3)
self.assertTrue(buffer.fragmented)
buffer.append(4)
self.assertTrue(buffer.fragmented)
buffer.append(5)
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
def test_matmul_1d1d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
self.assertTrue(allclose(buffer @ C[:1], arange(1) @ C[:1]))
buffer.append(1)
self.assertTrue(allclose(buffer @ C[:2], arange(2) @ C[:2]))
buffer.append(2)
self.assertTrue(allclose(buffer @ C, arange(3) @ C))
buffer.append(3)
self.assertTrue(allclose(buffer @ C, (arange(1, 4)) @ C))
buffer.append(4)
self.assertTrue(allclose(buffer @ C, (arange(2, 5)) @ C))
buffer.append(5)
self.assertTrue(allclose(buffer @ C, (arange(3, 6)) @ C))
buffer.append(6)
self.assertTrue(allclose(buffer @ C, (arange(4, 7)) @ C))
buffer.pop()
self.assertTrue(allclose(buffer @ C[1:], (arange(5, 7)) @ C[1:]))
buffer.pop()
self.assertTrue(allclose(buffer @ C[2:], (arange(6, 7)) @ C[2:]))
def test_matmul_1d2d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 2"""
data = zeros(3)
A = zeros((3, 3))
B = rand(9).reshape(3, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, arange(3) @ A))
self.assertTrue(allclose(res_b, arange(3) @ B))
buffer.append(3)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(allclose(res_a, arange(1, 4) @ A))
self.assertTrue(allclose(res_b, arange(1, 4) @ B))
def test_matmul_2d2d(self):
"""Tests buffer @ X where buffer.ndim == 2"""
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(arange(9, 12))
test += 3
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
def test_matmul_ndnd(self):
"""Tests buffer @ X where X.ndim > 2 and buffer.ndim > 2"""
data = zeros((3, 3, 3))
A = zeros((3, 3, 3))
B = rand(27).reshape(3, 3, 3)
C = rand(12).reshape(3, 4)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(filler + 27)
test += 9
res_a = buffer @ A
res_b = buffer @ B
res_c = buffer @ C
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
self.assertTrue(allclose(res_c, test @ C))
def test_rmatmul_1d1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
res_c = C[:1] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:1] @ arange(1)))
buffer.append(1)
res_c = C[:2] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:2] @ arange(2)))
buffer.append(2)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3)))
buffer.append(3)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(1, 4)))
buffer.append(4)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
buffer.append(6)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(4, 7)))
buffer.pop()
res_c = C[1:] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[1:] @ arange(5, 7)))
buffer.pop()
res_c = C[2:] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[2:] @ arange(6, 7)))
def test_rmatmul_nd1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data = zeros(3)
A = zeros(9).reshape(3, 3)
B = arange(9).reshape(3, 3)
C = arange(3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = A @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertTrue(array_equal(A @ buffer, A @ array([0, 1, 2])))
buffer.append(3)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ array([1, 2, 3])))
self.assertTrue(allclose(res_b, B @ array([1, 2, 3])))
self.assertTrue(allclose(res_c, C @ array([1, 2, 3])))
buffer.append(4)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(2, 5)))
self.assertTrue(allclose(res_b, B @ arange(2, 5)))
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(3, 6)))
self.assertTrue(allclose(res_b, B @ arange(3, 6)))
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
def test_rmatmul_1dnd(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data1 = zeros((3, 3))
data2 = zeros((3, 3, 3))
A = rand(3)
test1 = arange(9).reshape(3, 3)
test2 = arange(27).reshape(3, 3, 3)
buffer1 = NumpyCircularBuffer(data1)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(arange(3))
buffer1.append(arange(3, 6))
buffer1.append(arange(6, 9))
buffer2.append(arange(9).reshape(3, 3))
buffer2.append(arange(9, 18).reshape(3, 3))
buffer2.append(arange(18, 27).reshape(3, 3))
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(9, 12))
buffer2.append(arange(27, 36).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(12, 15))
buffer2.append(arange(36, 45).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(15, 18))
buffer2.append(arange(45, 54).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
def test_rmatmul_2d2d(self):
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
C = rand(12).reshape(4, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
buffer.append([9, 10, 11])
test += 3
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
def test_rmatmul_ndnd(self):
data = zeros((3, 3, 3))
A = zeros(27).reshape(3, 3, 3)
B = arange(27).reshape(3, 3, 3)
C = arange(3*8*3).reshape(3, 8, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
buffer.append(filler + 27)
test += 9
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
def test_matmul2_1d1d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
self.assertTrue(allclose(
buffer.matmul(C[:1], empty(1)), arange(1) @ C[:1]
)
)
buffer.append(1)
self.assertTrue(allclose(
buffer.matmul(C[:2], empty(2)), arange(2) @ C[:2]
)
)
buffer.append(2)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(3) @ C
)
)
buffer.append(3)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(1, 4) @ C
)
)
buffer.append(4)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(2, 5) @ C
)
)
buffer.append(5)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(3, 6) @ C
)
)
buffer.append(6)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(4, 7) @ C
)
)
buffer.pop()
self.assertTrue(allclose(
buffer.matmul(C[1:], empty(2)), arange(5, 7) @ C[1:]
)
)
buffer.pop()
self.assertTrue(allclose(
buffer.matmul(C[2:], empty(1)), arange(6, 7) @ C[2:]
)
)
def test_matmul2_1d2d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 2"""
data = zeros(3)
A = zeros((3, 3))
B = rand(9).reshape(3, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = buffer.matmul(A, empty(3))
res_b = buffer.matmul(B, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, arange(3) @ A))
self.assertTrue(allclose(res_b, arange(3) @ B))
buffer.append(3)
res_a = buffer.matmul(A, empty(3))
res_b = buffer.matmul(B, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(allclose(res_a, arange(1, 4) @ A))
self.assertTrue(allclose(res_b, arange(1, 4) @ B))
def test_matmul2_2d2d(self):
"""Tests buffer @ X where buffer.ndim == 2"""
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = buffer.matmul(A, empty((3, 3)))
res_b = buffer.matmul(B, empty((3, 3)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(arange(9, 12))
test += 3
res_a = buffer.matmul(A, empty((3, 3)))
res_b = buffer.matmul(B, empty((3, 3)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
def test_matmul2_ndnd(self):
"""Tests buffer @ X where X.ndim > 2 and buffer.ndim > 2"""
data = zeros((3, 3, 3))
A = zeros((3, 3, 3))
B = rand(27).reshape(3, 3, 3)
C = rand(12).reshape(3, 4)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = buffer.matmul(A, empty((3, 3, 3)))
res_b = buffer.matmul(B, empty((3, 3, 3)))
res_c = buffer.matmul(C, empty((3, 3, 4)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(filler + 27)
test += 9
res_a = buffer.matmul(A, empty((3, 3, 3)))
res_b = buffer.matmul(B, empty((3, 3, 3)))
res_c = buffer.matmul(C, empty((3, 3, 4)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
self.assertTrue(allclose(res_c, test @ C))
def test_rmatmul2_1d1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
res_c = buffer.rmatmul(C[:1], empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:1] @ arange(1)))
buffer.append(1)
res_c = buffer.rmatmul(C[:2], empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:2] @ arange(2)))
buffer.append(2)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3)))
buffer.append(3)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(1, 4)))
buffer.append(4)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
buffer.append(6)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(4, 7)))
buffer.pop()
res_c = buffer.rmatmul(C[1:], empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[1:] @ arange(5, 7)))
buffer.pop()
res_c = buffer.rmatmul(C[2:], empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[2:] @ arange(6, 7)))
def test_rmatmul2_nd1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data = zeros(3)
A = zeros(9).reshape(3, 3)
B = arange(9).reshape(3, 3)
C = arange(3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = A @ buffer
buffer.rmatmul(A, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertTrue(array_equal(A @ buffer, A @ array([0, 1, 2])))
buffer.append(3)
res_a = buffer.rmatmul(A, empty(3))
res_b = buffer.rmatmul(B, empty(3))
res_c = buffer.rmatmul(C, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ array([1, 2, 3])))
self.assertTrue(allclose(res_b, B @ array([1, 2, 3])))
self.assertTrue(allclose(res_c, C @ array([1, 2, 3])))
buffer.append(4)
res_a = buffer.rmatmul(A, empty(3))
res_b = buffer.rmatmul(B, empty(3))
res_c = buffer.rmatmul(C, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(2, 5)))
self.assertTrue(allclose(res_b, B @ arange(2, 5)))
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_a = buffer.rmatmul(A, empty(3))
res_b = buffer.rmatmul(B, empty(3))
res_c = buffer.rmatmul(C, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(3, 6)))
self.assertTrue(allclose(res_b, B @ arange(3, 6)))
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
def test_rmatmul2_1dnd(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data1 = zeros((3, 3))
data2 = zeros((3, 3, 3))
A = rand(3)
test1 = arange(9).reshape(3, 3)
test2 = arange(27).reshape(3, 3, 3)
buffer1 = NumpyCircularBuffer(data1)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(arange(3))
buffer1.append( | arange(3, 6) | numpy.arange |
"""
Copyright 2013 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy as cp
import cvxpy.settings as s
from cvxpy.transforms.partial_optimize import partial_optimize
from cvxpy.expressions.variable import Variable
from cvxpy.expressions.constants import Parameter, Constant
from cvxpy.reductions.solvers.defines import INSTALLED_MI_SOLVERS
import numpy as np
from cvxpy import Problem, Minimize
from cvxpy.tests.base_test import BaseTest
import unittest
import scipy.sparse as sp
import scipy.stats
class TestAtoms(BaseTest):
""" Unit tests for the atoms module. """
def setUp(self) -> None:
self.a = Variable(name='a')
self.x = Variable(2, name='x')
self.y = Variable(2, name='y')
self.A = Variable((2, 2), name='A')
self.B = Variable((2, 2), name='B')
self.C = Variable((3, 2), name='C')
def test_add_expr_copy(self) -> None:
"""Test the copy function for AddExpresion class.
"""
atom = self.x + self.y
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.A, self.B])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.A)
self.assertTrue(copy.args[1] is self.B)
self.assertEqual(copy.get_data(), atom.get_data())
def test_norm_inf(self) -> None:
"""Test the norm_inf class.
"""
exp = self.x+self.y
atom = cp.norm_inf(exp)
# self.assertEqual(atom.name(), "norm_inf(x + y)")
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
assert atom.is_convex()
assert (-atom).is_concave()
self.assertEqual(cp.norm_inf(atom).curvature, s.CONVEX)
self.assertEqual(cp.norm_inf(-atom).curvature, s.CONVEX)
def test_norm1(self) -> None:
"""Test the norm1 class.
"""
exp = self.x+self.y
atom = cp.norm1(exp)
# self.assertEqual(atom.name(), "norm1(x + y)")
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(cp.norm1(atom).curvature, s.CONVEX)
self.assertEqual(cp.norm1(-atom).curvature, s.CONVEX)
def test_list_input(self) -> None:
"""Test that list input is rejected.
"""
with self.assertRaises(Exception) as cm:
cp.max([cp.Variable(), 1])
self.assertTrue(str(cm.exception) in (
"The input must be a single CVXPY Expression, not a list. "
"Combine Expressions using atoms such as bmat, hstack, and vstack."))
with self.assertRaises(Exception) as cm:
cp.norm([1, cp.Variable()])
self.assertTrue(str(cm.exception) in (
"The input must be a single CVXPY Expression, not a list. "
"Combine Expressions using atoms such as bmat, hstack, and vstack."))
x = cp.Variable()
y = cp.Variable()
with self.assertRaises(Exception) as cm:
cp.norm([x, y]) <= 1
self.assertTrue(str(cm.exception) in (
"The input must be a single CVXPY Expression, not a list. "
"Combine Expressions using atoms such as bmat, hstack, and vstack."))
def test_quad_form(self) -> None:
"""Test quad_form atom.
"""
P = Parameter((2, 2), symmetric=True)
expr = cp.quad_form(self.x, P)
assert not expr.is_dcp()
def test_power(self) -> None:
"""Test the power class.
"""
from fractions import Fraction
for shape in [(1, 1), (3, 1), (2, 3)]:
x = Variable(shape)
y = Variable(shape)
exp = x + y
for p in 0, 1, 2, 3, 2.7, .67, -1, -2.3, Fraction(4, 5):
atom = cp.power(exp, p)
self.assertEqual(atom.shape, shape)
if p > 1 or p < 0:
self.assertEqual(atom.curvature, s.CONVEX)
elif p == 1:
self.assertEqual(atom.curvature, s.AFFINE)
elif p == 0:
self.assertEqual(atom.curvature, s.CONSTANT)
else:
self.assertEqual(atom.curvature, s.CONCAVE)
if p != 1:
self.assertEqual(atom.sign, s.NONNEG)
# Test copy with args=None
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data(), atom.get_data())
assert cp.power(-1, 2).value == 1
# Test the geo_mean class.
def test_geo_mean(self) -> None:
atom = cp.geo_mean(self.x)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
# Test copy with args=None
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data(), atom.get_data())
# Test the harmonic_mean class.
def test_harmonic_mean(self) -> None:
atom = cp.harmonic_mean(self.x)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
# Test the pnorm class.
def test_pnorm(self) -> None:
atom = cp.pnorm(self.x, p=1.5)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=1)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=2)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
expr = cp.norm(self.A, 2, axis=0)
self.assertEqual(expr.shape, (2,))
atom = cp.pnorm(self.x, p='inf')
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p='Inf')
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=np.inf)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=.5)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=.7)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=-.1)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=-1)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=-1.3)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
# Test copy with args=None
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data(), atom.get_data())
def test_matrix_norms(self) -> None:
"""
Matrix 1-norm, 2-norm (sigma_max), infinity-norm,
Frobenius norm, and nuclear-norm.
"""
for p in [1, 2, np.inf, 'fro', 'nuc']:
for var in [self.A, self.C]:
atom = cp.norm(var, p)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
var.value = np.random.randn(*var.shape)
self.assertAlmostEqual(atom.value, np.linalg.norm(var.value, ord=p))
pass
def test_quad_over_lin(self) -> None:
# Test quad_over_lin DCP.
atom = cp.quad_over_lin(cp.square(self.x), self.a)
self.assertEqual(atom.curvature, s.CONVEX)
atom = cp.quad_over_lin(-cp.square(self.x), self.a)
self.assertEqual(atom.curvature, s.CONVEX)
atom = cp.quad_over_lin(cp.sqrt(self.x), self.a)
self.assertEqual(atom.curvature, s.UNKNOWN)
assert not atom.is_dcp()
# Test quad_over_lin shape validation.
with self.assertRaises(Exception) as cm:
cp.quad_over_lin(self.x, self.x)
self.assertEqual(str(cm.exception),
"The second argument to quad_over_lin must be a scalar.")
def test_elemwise_arg_count(self) -> None:
"""Test arg count for max and min variants.
"""
with self.assertRaises(Exception) as cm:
cp.maximum(1)
self.assertTrue(str(cm.exception) in (
"__init__() takes at least 3 arguments (2 given)",
"__init__() missing 1 required positional argument: 'arg2'"))
with self.assertRaises(Exception) as cm:
cp.minimum(1)
self.assertTrue(str(cm.exception) in (
"__init__() takes at least 3 arguments (2 given)",
"__init__() missing 1 required positional argument: 'arg2'"))
def test_matrix_frac(self) -> None:
"""Test for the matrix_frac atom.
"""
atom = cp.matrix_frac(self.x, self.A)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
# Test matrix_frac shape validation.
with self.assertRaises(Exception) as cm:
cp.matrix_frac(self.x, self.C)
self.assertEqual(str(cm.exception),
"The second argument to matrix_frac must be a square matrix.")
with self.assertRaises(Exception) as cm:
cp.matrix_frac(Variable(3), self.A)
self.assertEqual(str(cm.exception),
"The arguments to matrix_frac have incompatible dimensions.")
def test_max(self) -> None:
"""Test max.
"""
# One arg, test sign.
self.assertEqual(cp.max(1).sign, s.NONNEG)
self.assertEqual(cp.max(-2).sign, s.NONPOS)
self.assertEqual(cp.max(Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.max(0).sign, s.ZERO)
# Test with axis argument.
self.assertEqual(cp.max(Variable(2), axis=0, keepdims=True).shape, (1,))
self.assertEqual(cp.max(Variable(2), axis=1).shape, (2,))
self.assertEqual(cp.max(Variable((2, 3)), axis=0, keepdims=True).shape, (1, 3))
self.assertEqual(cp.max(Variable((2, 3)), axis=1).shape, (2,))
# Invalid axis.
with self.assertRaises(Exception) as cm:
cp.max(self.x, axis=4)
self.assertEqual(str(cm.exception),
"Invalid argument for axis.")
def test_min(self) -> None:
"""Test min.
"""
# One arg, test sign.
self.assertEqual(cp.min(1).sign, s.NONNEG)
self.assertEqual(cp.min(-2).sign, s.NONPOS)
self.assertEqual(cp.min(Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.min(0).sign, s.ZERO)
# Test with axis argument.
self.assertEqual(cp.min(Variable(2), axis=0).shape, tuple())
self.assertEqual(cp.min(Variable(2), axis=1).shape, (2,))
self.assertEqual(cp.min(Variable((2, 3)), axis=0).shape, (3,))
self.assertEqual(cp.min(Variable((2, 3)), axis=1).shape, (2,))
# Invalid axis.
with self.assertRaises(Exception) as cm:
cp.min(self.x, axis=4)
self.assertEqual(str(cm.exception),
"Invalid argument for axis.")
# Test sign logic for maximum.
def test_maximum_sign(self) -> None:
# Two args.
self.assertEqual(cp.maximum(1, 2).sign, s.NONNEG)
self.assertEqual(cp.maximum(1, Variable()).sign, s.NONNEG)
self.assertEqual(cp.maximum(1, -2).sign, s.NONNEG)
self.assertEqual(cp.maximum(1, 0).sign, s.NONNEG)
self.assertEqual(cp.maximum(Variable(), 0).sign, s.NONNEG)
self.assertEqual(cp.maximum(Variable(), Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.maximum(Variable(), -2).sign, s.UNKNOWN)
self.assertEqual(cp.maximum(0, 0).sign, s.ZERO)
self.assertEqual(cp.maximum(0, -2).sign, s.ZERO)
self.assertEqual(cp.maximum(-3, -2).sign, s.NONPOS)
# Many args.
self.assertEqual(cp.maximum(-2, Variable(), 0, -1, Variable(), 1).sign,
s.NONNEG)
# Promotion.
self.assertEqual(cp.maximum(1, Variable(2)).sign,
s.NONNEG)
self.assertEqual(cp.maximum(1, Variable(2)).shape,
(2,))
# Test sign logic for minimum.
def test_minimum_sign(self) -> None:
# Two args.
self.assertEqual(cp.minimum(1, 2).sign, s.NONNEG)
self.assertEqual(cp.minimum(1, Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.minimum(1, -2).sign, s.NONPOS)
self.assertEqual(cp.minimum(1, 0).sign, s.ZERO)
self.assertEqual(cp.minimum(Variable(), 0).sign, s.NONPOS)
self.assertEqual(cp.minimum(Variable(), Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.minimum(Variable(), -2).sign, s.NONPOS)
self.assertEqual(cp.minimum(0, 0).sign, s.ZERO)
self.assertEqual(cp.minimum(0, -2).sign, s.NONPOS)
self.assertEqual(cp.minimum(-3, -2).sign, s.NONPOS)
# Many args.
self.assertEqual(cp.minimum(-2, Variable(), 0, -1, Variable(), 1).sign,
s.NONPOS)
# Promotion.
self.assertEqual(cp.minimum(-1, Variable(2)).sign,
s.NONPOS)
self.assertEqual(cp.minimum(-1, Variable(2)).shape,
(2,))
def test_sum(self) -> None:
"""Test the sum atom.
"""
self.assertEqual(cp.sum(1).sign, s.NONNEG)
self.assertEqual(cp.sum(Constant([1, -1])).sign, s.UNKNOWN)
self.assertEqual(cp.sum(Constant([1, -1])).curvature, s.CONSTANT)
self.assertEqual(cp.sum(Variable(2)).sign, s.UNKNOWN)
self.assertEqual(cp.sum(Variable(2)).shape, tuple())
self.assertEqual(cp.sum(Variable(2)).curvature, s.AFFINE)
self.assertEqual(cp.sum(Variable((2, 1)), keepdims=True).shape, (1, 1))
# Mixed curvature.
mat = np.array([[1, -1]])
self.assertEqual(cp.sum(mat @ cp.square(Variable(2))).curvature, s.UNKNOWN)
# Test with axis argument.
self.assertEqual(cp.sum(Variable(2), axis=0).shape, tuple())
self.assertEqual(cp.sum(Variable(2), axis=1).shape, (2,))
self.assertEqual(cp.sum(Variable((2, 3)), axis=0, keepdims=True).shape, (1, 3))
self.assertEqual(cp.sum(Variable((2, 3)), axis=0, keepdims=False).shape, (3,))
self.assertEqual(cp.sum(Variable((2, 3)), axis=1).shape, (2,))
# Invalid axis.
with self.assertRaises(Exception) as cm:
cp.sum(self.x, axis=4)
self.assertEqual(str(cm.exception),
"Invalid argument for axis.")
A = sp.eye(3)
self.assertEqual(cp.sum(A).value, 3)
A = sp.eye(3)
self.assertItemsAlmostEqual(cp.sum(A, axis=0).value, [1, 1, 1])
def test_multiply(self) -> None:
"""Test the multiply atom.
"""
self.assertEqual(cp.multiply([1, -1], self.x).sign, s.UNKNOWN)
self.assertEqual(cp.multiply([1, -1], self.x).curvature, s.AFFINE)
self.assertEqual(cp.multiply([1, -1], self.x).shape, (2,))
pos_param = Parameter(2, nonneg=True)
neg_param = Parameter(2, nonpos=True)
self.assertEqual(cp.multiply(pos_param, pos_param).sign, s.NONNEG)
self.assertEqual(cp.multiply(pos_param, neg_param).sign, s.NONPOS)
self.assertEqual(cp.multiply(neg_param, neg_param).sign, s.NONNEG)
self.assertEqual(cp.multiply(neg_param, cp.square(self.x)).curvature, s.CONCAVE)
# Test promotion.
self.assertEqual(cp.multiply([1, -1], 1).shape, (2,))
self.assertEqual(cp.multiply(1, self.C).shape, self.C.shape)
self.assertEqual(cp.multiply(self.x, [1, -1]).sign, s.UNKNOWN)
self.assertEqual(cp.multiply(self.x, [1, -1]).curvature, s.AFFINE)
self.assertEqual(cp.multiply(self.x, [1, -1]).shape, (2,))
# Test the vstack class.
def test_vstack(self) -> None:
atom = cp.vstack([self.x, self.y, self.x])
self.assertEqual(atom.name(), "Vstack(x, y, x)")
self.assertEqual(atom.shape, (3, 2))
atom = cp.vstack([self.A, self.C, self.B])
self.assertEqual(atom.name(), "Vstack(A, C, B)")
self.assertEqual(atom.shape, (7, 2))
entries = []
for i in range(self.x.shape[0]):
entries.append(self.x[i])
atom = cp.vstack(entries)
self.assertEqual(atom.shape, (2, 1))
# self.assertEqual(atom[1,0].name(), "vstack(x[0,0], x[1,0])[1,0]")
with self.assertRaises(Exception) as cm:
cp.vstack([self.C, 1])
self.assertEqual(str(cm.exception),
"All the input dimensions except for axis 0 must match exactly.")
with self.assertRaises(Exception) as cm:
cp.vstack([self.x, Variable(3)])
self.assertEqual(str(cm.exception),
"All the input dimensions except for axis 0 must match exactly.")
with self.assertRaises(TypeError) as cm:
cp.vstack()
def test_reshape(self) -> None:
"""Test the reshape class.
"""
expr = cp.reshape(self.A, (4, 1))
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (4, 1))
expr = cp.reshape(expr, (2, 2))
self.assertEqual(expr.shape, (2, 2))
expr = cp.reshape(cp.square(self.x), (1, 2))
self.assertEqual(expr.sign, s.NONNEG)
self.assertEqual(expr.curvature, s.CONVEX)
self.assertEqual(expr.shape, (1, 2))
with self.assertRaises(Exception) as cm:
cp.reshape(self.C, (5, 4))
self.assertEqual(str(cm.exception),
"Invalid reshape dimensions (5, 4).")
# Test C-style reshape.
a = np.arange(10)
A_np = np.reshape(a, (5, 2), order='C')
A_cp = cp.reshape(a, (5, 2), order='C')
self.assertItemsAlmostEqual(A_np, A_cp.value)
X = cp.Variable((5, 2))
prob = cp.Problem(cp.Minimize(0), [X == A_cp])
prob.solve()
self.assertItemsAlmostEqual(A_np, X.value)
a_np = np.reshape(A_np, 10, order='C')
a_cp = cp.reshape(A_cp, 10, order='C')
self.assertItemsAlmostEqual(a_np, a_cp.value)
x = cp.Variable(10)
prob = cp.Problem(cp.Minimize(0), [x == a_cp])
prob.solve()
self.assertItemsAlmostEqual(a_np, x.value)
# Test more complex C-style reshape: matrix to another matrix
b = np.array([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
])
b_reshaped = b.reshape((2, 6), order='C')
X = cp.Variable(b.shape)
X_reshaped = cp.reshape(X, (2, 6), order='C')
prob = cp.Problem(cp.Minimize(0), [X_reshaped == b_reshaped])
prob.solve()
self.assertItemsAlmostEqual(b_reshaped, X_reshaped.value)
self.assertItemsAlmostEqual(b, X.value)
def test_vec(self) -> None:
"""Test the vec atom.
"""
expr = cp.vec(self.C)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (6,))
expr = cp.vec(self.x)
self.assertEqual(expr.shape, (2,))
expr = cp.vec(cp.square(self.a))
self.assertEqual(expr.sign, s.NONNEG)
self.assertEqual(expr.curvature, s.CONVEX)
self.assertEqual(expr.shape, (1,))
def test_diag(self) -> None:
"""Test the diag atom.
"""
expr = cp.diag(self.x)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (2, 2))
expr = cp.diag(self.A)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (2,))
expr = cp.diag(self.x.T)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (2, 2))
psd_matrix = np.array([[1, -1], [-1, 1]])
expr = cp.diag(psd_matrix)
self.assertEqual(expr.sign, s.NONNEG)
self.assertEqual(expr.curvature, s.CONSTANT)
self.assertEqual(expr.shape, (2,))
with self.assertRaises(Exception) as cm:
cp.diag(self.C)
self.assertEqual(str(cm.exception),
"Argument to diag must be a vector or square matrix.")
# Test that diag is PSD
w = np.array([1.0, 2.0])
expr = cp.diag(w)
self.assertTrue(expr.is_psd())
expr = cp.diag(-w)
self.assertTrue(expr.is_nsd())
expr = cp.diag(np.array([1, -1]))
self.assertFalse(expr.is_psd())
self.assertFalse(expr.is_nsd())
def test_trace(self) -> None:
"""Test the trace atom.
"""
expr = cp.trace(self.A)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, tuple())
with self.assertRaises(Exception) as cm:
cp.trace(self.C)
self.assertEqual(str(cm.exception),
"Argument to trace must be a square matrix.")
def test_log1p(self) -> None:
"""Test the log1p atom.
"""
expr = cp.log1p(1)
self.assertEqual(expr.sign, s.NONNEG)
self.assertEqual(expr.curvature, s.CONSTANT)
self.assertEqual(expr.shape, tuple())
expr = cp.log1p(-0.5)
self.assertEqual(expr.sign, s.NONPOS)
def test_upper_tri(self) -> None:
with self.assertRaises(Exception) as cm:
cp.upper_tri(self.C)
self.assertEqual(str(cm.exception),
"Argument to upper_tri must be a square matrix.")
def test_vec_to_upper_tri(self) -> None:
from cvxpy.atoms.affine.upper_tri import vec_to_upper_tri
x = Variable(shape=(3,))
X = vec_to_upper_tri(x)
x.value = np.array([1, 2, 3])
actual = X.value
expect = np.array([[1, 2], [0, 3]])
assert np.allclose(actual, expect)
y = Variable(shape=(1,))
y.value = np.array([4])
Y = vec_to_upper_tri(y, strict=True)
actual = Y.value
expect = np.array([[0, 4], [0, 0]])
assert np.allclose(actual, expect)
A_expect = np.array([[0, 11, 12, 13],
[0, 0, 16, 17],
[0, 0, 0, 21],
[0, 0, 0, 0]])
a = np.array([11, 12, 13, 16, 17, 21])
A_actual = vec_to_upper_tri(a, strict=True).value
assert np.allclose(A_actual, A_expect)
def test_huber(self) -> None:
# Valid.
cp.huber(self.x, 1)
with self.assertRaises(Exception) as cm:
cp.huber(self.x, -1)
self.assertEqual(str(cm.exception),
"M must be a non-negative scalar constant.")
with self.assertRaises(Exception) as cm:
cp.huber(self.x, [1, 1])
self.assertEqual(str(cm.exception),
"M must be a non-negative scalar constant.")
# M parameter.
M = Parameter(nonneg=True)
# Valid.
cp.huber(self.x, M)
M.value = 1
self.assertAlmostEqual(cp.huber(2, M).value, 3)
# Invalid.
M = Parameter(nonpos=True)
with self.assertRaises(Exception) as cm:
cp.huber(self.x, M)
self.assertEqual(str(cm.exception),
"M must be a non-negative scalar constant.")
# Test copy with args=None
atom = cp.huber(self.x, 2)
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
# As get_data() returns a Constant, we have to check the value
self.assertEqual(copy.get_data()[0].value, atom.get_data()[0].value)
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data()[0].value, atom.get_data()[0].value)
def test_sum_largest(self) -> None:
"""Test the sum_largest atom and related atoms.
"""
with self.assertRaises(Exception) as cm:
cp.sum_largest(self.x, -1)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
with self.assertRaises(Exception) as cm:
cp.lambda_sum_largest(self.x, 2.4)
self.assertEqual(str(cm.exception),
"First argument must be a square matrix.")
with self.assertRaises(Exception) as cm:
cp.lambda_sum_largest(Variable((2, 2)), 2.4)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
with self.assertRaises(ValueError) as cm:
cp.lambda_sum_largest([[1, 2], [3, 4]], 2).value
self.assertEqual(str(cm.exception),
"Input matrix was not Hermitian/symmetric.")
# Test copy with args=None
atom = cp.sum_largest(self.x, 2)
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with lambda_sum_largest, which is in fact an AddExpression
atom = cp.lambda_sum_largest(Variable((2, 2)), 2)
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
def test_sum_smallest(self) -> None:
"""Test the sum_smallest atom and related atoms.
"""
with self.assertRaises(Exception) as cm:
cp.sum_smallest(self.x, -1)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
with self.assertRaises(Exception) as cm:
cp.lambda_sum_smallest(Variable((2, 2)), 2.4)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
def test_index(self) -> None:
"""Test the copy function for index.
"""
# Test copy with args=None
shape = (5, 4)
A = Variable(shape)
atom = A[0:2, 0:1]
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
B = Variable((4, 5))
copy = atom.copy(args=[B])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is B)
self.assertEqual(copy.get_data(), atom.get_data())
def test_bmat(self) -> None:
"""Test the bmat atom.
"""
v_np = np.ones((3, 1))
expr = np.vstack([ | np.hstack([v_np, v_np]) | numpy.hstack |
import argparse
import os
import sys
import numpy as np
import pdb
from tqdm import tqdm
import cv2
import glob
import numpy as np
from numpy import *
import matplotlib
#matplotlib.use("Agg")
#matplotlib.use("wx")
#matplotlib.use('tkagg')
import matplotlib.pyplot as plt
import scipy
from scipy.special import softmax
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torch.nn as nn
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from PIL import Image
# class load_data(Dataset):
# def __init__(self,args,img_path):
# super().__init__()
# self.args = args
# self.img_path = img_path
# def __getitem__(self,img_path):
# image = Image.open(self.img_path).convert('RGB')
# image = np.array(image).astype(np.float32).transpose((2, 0, 1))
# image = torch.from_numpy(image).float()
# return image
def get_model(nclass,args):
model = DeepLab(num_classes=nclass,
backbone=args.backbone,
output_stride=args.out_stride,
sync_bn=args.sync_bn,
freeze_bn=args.freeze_bn)
# Using cuda
if args.cuda:
model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
patch_replication_callback(model)
model = model.cuda()
checkpoint = torch.load(args.resume)
if args.cuda:
model.module.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
return model
def get_pred(img_path,model,args):
model.eval()
image = Image.open(img_path).convert('RGB')
#image = image.resize((512,512), Image.ANTIALIAS)
image = np.array(image).astype(np.float32).transpose((2, 0, 1))
image = np.expand_dims(image, axis=0)
image = torch.from_numpy(image).float()
if args.cuda:
image = image.cuda()
with torch.no_grad():
output = model(image)
#pdb.set_trace()
# normalize = nn.Softmax(dim=1)
# output = normalize(output)
pred = output.data.cpu().numpy()
return pred
def F1_loss(pred,target):
N = np.logical_or(pred,target) # logical
Tp = np.logical_and(pred,target)
Fn = np.subtract(target,Tp) # element-wise subtraction in pytorch
#Fn = np.bitwise_xor(target,Tp)
Fp = np.subtract(pred,Tp)
Tn = np.subtract(N,np.logical_or(Tp,Fp,Fn))
#pdb.set_trace()
precision = np.sum(Tp)/(np.sum(Tp)+np.sum(Fp))
recall = np.sum(Tp)/(np.sum(Tp)+np.sum(Fn))
F1 = (2*np.sum(Tp))/(2*np.sum(Tp)+np.sum(Fn)+np.sum(Fp))
#F1 = np.true_divide(np.add(2*Tp,Fn,Fp),2*Tp)
#F1 = np.true_divide(np.sum(np.multiply(2,Tp),Fn,Fp),np.multiply(2,Tp))
#F1 = np.true_divide(np.multiply(2,Tp),np.multiply(np.sum(Tp,Fn),np.sum(Tp,Fn)))
#accuracy = np.true_divide(np.add(Tp,Tn),np.add(Tp,Tn,Fp,Fn))
accuracy = np.sum(Tp+Tn)/np.sum(N)
return F1 , accuracy, precision, recall
def F1_rwi(pred,target):
#pred = pred[:,:,0] # using only the red channel
#target = target[:,:,0]
N = np.logical_or(pred, target) # logical
Tp = np.logical_and(pred, target)
Fn = np.bitwise_xor(target, Tp) # element-wise subtraction in pytorch
Fp = np.bitwise_xor(pred, Tp)
xx= np.logical_or(np.logical_or(Tp,Fp), Fn)
Tn = np.bitwise_xor(N, xx)
precision = Tp.sum()/(Tp.sum()+ Fp.sum() )
recall = Tp.sum()/(Tp.sum()+ Fn.sum())
F1 = 2*Tp.sum() /(2*Tp.sum()+ Fn.sum()+ Fp.sum())
accuracy = (Tp.sum()+Tn.sum())/N.sum()
return F1, accuracy, precision, recall
if __name__=='__main__':
#### Parameters and paths:
nclass = 2
save_rrc_res_path = "/path/to/deepLabV3Plus/deeplabv3plus_pixelWise/results/validation_images/B_260/"
model_path = "/path/to/deepLabV3Plus/deeplabv3plus_pixelWise/results/icdar_models/run/icdar/deeplab-resnet/model_best.pth.tar"
alphabet="#abcdefghijklmnopqrstuvwxyz1234567890@"
img_path = "/path/to/GAN_text/data/text_segmentation/test/A/"
gt_path = "/path/to/GAN_text/data/text_segmentation/test/B_gt_1chanel/"
### args
parser = argparse.ArgumentParser(description="PyTorch DeeplabV3Plus Heatmap Prediction")
parser.add_argument('--backbone', type=str, default='resnet',
choices=['resnet', 'xception', 'drn', 'mobilenet'],
help='backbone name (default: resnet)')
parser.add_argument('--freeze-bn', type=bool, default=False,
help='whether to freeze bn parameters (default: False)')
parser.add_argument('--out-stride', type=int, default=16,
help='network output stride (default: 8)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--sync-bn', type=bool, default=None,
help='whether to use sync bn (default: auto)')
##checking point
parser.add_argument('--resume', type=str, default= model_path,
help='put the path to resuming file if needed')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
if args.sync_bn is None:
if args.cuda and len(args.gpu_ids) > 1:
args.sync_bn = True
else:
args.sync_bn = False
image_files = sorted(glob.glob(img_path+'*.png')) #'*.jpg'))
trained_model = get_model(nclass,args)
f1_all = []
accuracy_all = []
f1_all_rwi = []
accuracy_all_rwi = []
#for img_path in sys.argv[1:]:
#for i in range(0,10):
for i in range(0,len(image_files)):
img_path = image_files[i]
print("image path is: {}".format(img_path))
img_name = img_path.split('/')[-1].split('.')[0]
gt = asarray(Image.open(gt_path+img_name+'.png'))
#trained_model = get_model(nclass,args)
#pdb.set_trace()
# load_test_data = load_data(args,img_path)
# dataloader = DataLoader(load_test_data)
# for ii, img_test in enumerate(dataloader):
pred = get_pred(img_path,trained_model,args)
pred = softmax(pred, axis=1)
#image_source = cv2.imread(img_path)
#image_source = cv2.resize(image_source, (512, 512))
#pdb.set_trace()
#fig = plt.figure()
# plt.imshow(pred.squeeze()[1,:,:])
# plt.show()
# res = pred.squeeze()[1,:,:]>0.3
#res = np.argmax(pred.squeeze(), axis=0)
#pdb.set_trace()
# plt.imshow(res)
# plt.show()
#ret,pred_bin = cv2.threshold(pred.squeeze()[1,:,:],0.2,255,cv2.THRESH_BINARY)
pred_bin = np.argmax(pred.squeeze(), axis=0)
#pdb.set_trace()
f1, acc, prc, rcl = F1_loss(pred_bin>5,gt>5)
print("F1 is {}, accuracy is {}, precision is {}, recall is {}".format(f1,acc,prc,rcl))
#pdb.set_trace()
pred_bin_8 = pred_bin.astype(np.uint8)
f1_rwi, acc_rwi, prc_rwi, rcl_rwi = F1_rwi(pred_bin_8>5,gt>5)
print("F1_rwi is {}, accuracy_rwi is {}, precision_rwi is {}, recall_rwi is {}".format(f1_rwi,acc_rwi,prc_rwi,rcl_rwi))
f1_all.append(f1)
accuracy_all.append(acc)
f1_all_rwi.append(f1_rwi)
accuracy_all_rwi.append(acc_rwi)
print("the average of F1 is {}".format(np.mean(f1_all)))
print("the average accuracy is {}".format(np.mean(accuracy_all)))
print("the average of F1_rwi is {}".format( | np.mean(f1_all_rwi) | numpy.mean |
__doc__ = """ External forcing for rod test module for Elastica implementation"""
import sys
# System imports
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pytest
from elastica.external_forces import (
NoForces,
GravityForces,
EndpointForces,
UniformTorques,
UniformForces,
MuscleTorques,
inplace_addition,
inplace_substraction,
)
from elastica.utils import Tolerance
from examples.JointCases.external_force_class_for_joint_test import (
EndpointForcesSinusoidal,
)
def mock_rod_init(self):
self.n_elems = 0.0
self.external_forces = 0.0
self.external_torques = 0.0
self.director_collection = 0.0
self.rest_lengths = 0.0
MockRod = type("MockRod", (object,), {"__init__": mock_rod_init})
class TestNoForces:
def test_no_forces_applied(self):
"""No force on the rod. Test purely
to improve coverage characteristics
"""
mock_rod = MockRod()
ext_no_forces = NoForces()
correct_external_forces = np.random.rand(3, 20)
mock_rod.external_forces = correct_external_forces
ext_no_forces.apply_forces(mock_rod)
assert_allclose(
mock_rod.external_forces, correct_external_forces, atol=Tolerance.atol()
)
def test_no_torques_applied(self):
"""No torques on the rod. Test purely
to improve coverage characteristics
"""
mock_rod = MockRod()
ext_no_forces = NoForces()
correct_external_torques = np.random.rand(3, 20)
mock_rod.external_torques = correct_external_torques
ext_no_forces.apply_torques(mock_rod)
assert_allclose(
mock_rod.external_torques, correct_external_torques, atol=Tolerance.atol()
)
# The minimum number of nodes in a system is 2
@pytest.mark.parametrize("n_elem", [2, 4, 16])
def test_gravity_forces(n_elem):
# tests uniform gravity
dim = 3
mock_rod = MockRod()
mass = np.random.randn(n_elem)
acceleration_gravity = np.random.rand(dim)
correct_external_forces = (
mass * np.broadcast_to(acceleration_gravity, (n_elem, dim)).T
)
mock_rod.mass = mass
mock_rod.external_forces = np.zeros((dim, n_elem))
ext_gravity_forces = GravityForces(acceleration_gravity)
ext_gravity_forces.apply_forces(mock_rod)
assert_allclose(
mock_rod.external_forces, correct_external_forces, atol=Tolerance.atol()
)
# The minimum number of nodes in a system is 2
@pytest.mark.parametrize("n_elem", [2, 4, 16])
@pytest.mark.parametrize("rampupTime", [5, 10, 15])
@pytest.mark.parametrize("time", [0, 8, 20])
def test_endpoint_forces(n_elem, rampupTime, time):
dim = 3
mock_rod = MockRod()
mock_rod.external_forces = np.zeros((dim, n_elem))
if rampupTime > time:
factor = time / rampupTime
elif rampupTime <= time:
factor = 1.0
start_force = np.random.rand(dim)
end_force = np.random.rand(dim)
ext_endpt_forces = EndpointForces(start_force, end_force, rampupTime)
ext_endpt_forces.apply_forces(mock_rod, time)
assert_allclose(
mock_rod.external_forces[..., 0], start_force * factor, atol=Tolerance.atol()
)
assert_allclose(
mock_rod.external_forces[..., -1], end_force * factor, atol=Tolerance.atol()
)
# The minimum number of nodes in a system is 2
@pytest.mark.parametrize("n_elem", [2, 4, 16])
@pytest.mark.parametrize("torques", [5, 10, 15])
def test_uniform_torques(n_elem, torques, time=0.0):
dim = 3
mock_rod = MockRod()
mock_rod.external_torques = np.zeros((dim, n_elem))
mock_rod.n_elems = n_elem
mock_rod.director_collection = np.repeat(
np.identity(3)[:, :, np.newaxis], n_elem, axis=2
)
torque = np.random.rand()
direction = np.array([1.0, 0.0, 0.0])
uniform_torques = UniformTorques(torque, direction)
uniform_torques.apply_torques(mock_rod, time)
assert_allclose(mock_rod.external_torques.sum(), torque, atol=Tolerance.atol())
# The minimum number of nodes in a system is 2
@pytest.mark.parametrize("n_elem", [2, 4, 16])
@pytest.mark.parametrize("forces", [5, 10, 15])
def test_uniform_forces(n_elem, forces, time=0.0):
dim = 3
mock_rod = MockRod()
mock_rod.external_forces = | np.zeros((dim, n_elem + 1)) | numpy.zeros |
import matplotlib.pyplot as plt
import numpy as np
from MLG import imagepath, paperpath, path
from imageio import imread
import matplotlib.cbook as cbook
from MLG.utils import color_own
from matplotlib import rc
__all__ = ['dark','Image_Window','Image_precision','Image_Illustration','Image_Illustration_Multi','Image_compare_micro','Image_astroshift', 'create_all_Image']
def dark(onof = 0):
if onof is 'on': plt.style.use('dark_background')
elif onof is 'off': plt.style.use('default')
elif onof == True: plt.style.use('dark_background')
else: plt.style.use('default')
def Image_Window(string = 'resolve_Window', pres = False):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
else: black = color_own([0.,0.,0.,1])
c1 = color_own([0,1,1,1])
c2 = color_own([1,0,0,1])
c3 = color_own([1,1,0.2,1])
c4 = color_own([0.4,0.4,0.4,1])
c_star = [c1,c2,c3,c4]
c_grid1 = color_own([0,int(dark),1,1])
c_grid2 = color_own([0.5,1,0,1])
star= np.array([[0, 0],[0.1,0.9],[-1,-1.1],[-0.5,0.1]])
fig = plt.figure(figsize = [12,12])
x_width = 0.059
y_width = 0.177
#------------------------------------------------------------
# axis
plt.xticks( fontsize = 25)
plt.yticks( fontsize = 25)
plt.grid(True)
plt.axis('equal')
plt.axis([-1.5,1.5,-1.4,1.6])
plt.ylabel('Across-scan direction (AC) [arcsec]', fontsize = 30)
plt.xlabel('Along-scan direction (AL) [arcsec]', fontsize = 30)
#------------------------------------------------------------
#------------------------------------------------------------
#Grid Major Star
for i in range(-6,7):
plt.plot([-6*x_width,6*x_width], [i*y_width,i*y_width], c = c_grid1,linewidth = 3)
plt.plot([i*x_width,i*x_width], [-6*y_width,6*y_width], c = c_grid1, linewidth = 3)
plt.text(0,1.4,"Along-scan direction\n $12\,\mathrm{pix} \\times 0.059 \mathrm{''/pix} = 0.708\mathrm{''}$",fontsize = 25, verticalalignment = 'center', horizontalalignment = 'center', rotation = 0)
plt.text(0.7,0,"Across-scan direction\n $12\,\mathrm{pix} \\times 0.177 \mathrm{''/pix} = 2.124\mathrm{''}$",fontsize = 25, verticalalignment = 'center', horizontalalignment = 'center', rotation = 90)
plt.arrow(0,6*y_width+2*x_width, -6*x_width+0.02,0,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.arrow(0,6*y_width+2*x_width, 6*x_width-0.02,0,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.arrow(8*x_width,0,0, -6*y_width+0.02,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.arrow(8*x_width,0,0, 6*y_width-0.02,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.scatter(star[:1,0], star[:1,1], marker=(5, 1),c = c_star[:1], s = [3000], zorder = 1000)
if pres: fig.savefig(imagepath + string + '_1.png', format = 'png')
#------------------------------------------------------------
#------------------------------------------------------------
#Grid Minor Star
plt.scatter(star[1:3,0], star[1:3,1], marker=(5, 1),c = c_star[1:3], s = [2000,2000], zorder = 1000)
if pres: fig.savefig(imagepath + string + '_2.png', format = 'png')
for i in range(-5,8):
plt.plot([-15*x_width,-6*x_width], [i*y_width,i*y_width], c = c_grid2,linewidth = 3, zorder = -1)
for i in range(-15,-5):
plt.plot([i*x_width,i*x_width], [-5*y_width,7*y_width], c = c_grid2, linewidth = 3, zorder = -1)
plt.scatter(star[3:,0], star[3:,1], marker=(5, 1),c = c_star[3:], s = [2000], zorder = 1000)
if pres: fig.savefig(imagepath + string + '_3.png', format = 'png')
#------------------------------------------------------------
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_precision(string = 'Sig_vs_Gmag', Gaia_precision = path+'InputTable/resolution_Gaia.png', pres = False):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
color1 = color_own([0.85,0,0,1])
color2 = color_own([0,0,1,1])
color3 = color_own([0,1,1,1])
color4 = color_own([0.5,1,0,1])
color5 = color_own([1,1,0,1])
else:
black = color_own([0.,0.,0.,1])
color1 = color_own([0.85,0,0,1])
color2 = color_own([0,0,1,1])
color3 = color_own([0,1,1,1])
color4 = color_own([0,1,0,1])
color5 = color_own([1,1,0,1])
fig = plt.figure(figsize = [12,10])
Gmag = np.arange(4,22,0.01)
datafile = cbook.get_sample_data(Gaia_precision)
img = imread(datafile)
z = 10 ** (0.4 * (np.maximum(Gmag, 14) - 15)) #(14-np.minimum(Gmag, 14))
z2 = 10 ** (0.4 * (np.maximum(Gmag, 12) - 15))
sig_pi = (-1.631 + 680.766 * z2 + 32.732 * z2**2)**0.5/1000
sig_fov2 =(-1.631 + 680.766 * z + 32.732 * z**2)**0.5/1000 *7.75 +0.1
sig_fov3 = sig_fov2 / np.sqrt(9)
plt.plot([0,1],[-5,-5], c = color1, linewidth = 3, label = 'formal precision from Gaia DR2 (per CCD)' )
plt.plot([0,1],[-5,-5], c = color2, linewidth = 3, label = 'actual precision from Gaia DR2 (per CCD)' )
plt.yticks([np.log10(i) for i in [20,10, 5,2,1, 0.5,0.2,0.1, 0.05,0.02, 0.01]],[20,10, 5,2,1, 0.5,0.2,0.1, 0.05,0.02,0.01], fontsize = 25)
plt.xticks( fontsize = 25)
plt.ylabel('Standard deviation of AL field angle [mas]', fontsize = 30)
plt.xlabel('G magnitude', fontsize = 30)
plt.imshow(img, zorder=0, extent=[5, 21.04, np.log10(0.0195),np.log10(10)])
plt.axis('auto')
plt.xlim([4,22])
plt.ylim([np.log10(0.005),np.log10(40)])
if pres:
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '_1.png', format = 'png')
plt.plot(Gmag,np.log10(sig_pi), '--',c = color3, dashes =(5,5), linewidth = 3, label= 'predicted end-of-mission parallax error')
if pres:
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '_2.png', format = 'png')
plt.plot(Gmag,np.log10(sig_fov2), ':' , c = color4, linewidth = 5, label= 'used Standard deviation (per CCD)' )
if pres:
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '_3.png', format = 'png')
plt.plot(Gmag,np.log10(sig_fov3) ,c = color5,linewidth = 7, label= 'used Standard deviation for 9 CCD observations' )
plt.plot([5, 21.04, 21.04,5,5], [np.log10(0.0195),np.log10(0.0195),np.log10(10),np.log10(10),np.log10(0.0195)], linewidth = 2, color = [0.5,0.5,0.5,1], zorder = 0.1)
plt.axis('auto')
plt.xlim([4,22])
plt.ylim([np.log10(0.005),np.log10(40)])
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_Illustration(string = 'Illustration'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
color1 = color_own([0,1,1,1])
color2 = color_own([1,0.5,0,1])
color3 = color_own([0.5,1,0,1])
color4 = color_own([1,0,1,1])
color5 = color_own([0,1,1,1])
color6 = color_own([0,1,1,1])
else:
black = color_own([0.,0.,0.,1])
color1 = color_own([0,0,1,1])
color2 = color_own([1,0.5,0,1])
color3 = color_own([0,1,0,1])
color4 = color_own([1,0,1,1])
color5 = color_own([0,1,1,1])
color6 = color_own([0,1,1,1])
t = np.array([12, 35, 41, 61, 73, 89])
scandir = np.array([0.1, 0.7, 0.4, 0.8 , 0.2, 0.1])*np.pi
x1 = np.linspace(1,13,100) + 0.3 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y1 = np.linspace(1,3,100) + 0.3* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
x2 = np.linspace(3,7,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y2 = np.linspace(7,4.5,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
d = np.sqrt((x1-x2)**2 + (y1-y2)**2)
TE = 1.5
X2 = x2 + (x2-x1) * TE/(d**2 +2*TE)
Y2 = y2 + (y2-y1) * TE/(d**2 +2*TE)
dX2 = x1-X2
dY2 = y1-Y2
dx2 = x1-x2
dy2 = y1-y2
fig = plt.figure(figsize= (12,8))
ax = plt.subplot(111)
ax.axis('equal')
ax.axis('off')
for i in range(len(t)):
xm1 =np.array([-1,1]) * np.cos(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1]) * np.sin(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1]) * np.cos(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1]) * np.sin(scandir[i]) + Y2[t[i]]
dsc = ((dx2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dy2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dSC = ((dX2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dY2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
ttX2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X2[t[i]],X2[t[i]]])
ttY2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y2[t[i]],Y2[t[i]]])
ttx2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],x2[t[i]],x2[t[i]]])
tty2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],y2[t[i]],y2[t[i]]])
if i % 2 == 0:
plt.arrow(ttx2[2],tty2[2], 0.0001*(ttx2[2]-ttx2[1]),0.0001*(tty2[2]-tty2[1]),color= color1, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx2[1],tty2[1], 0.0001*(ttx2[1]-ttx2[2]),0.0001*(tty2[1]-tty2[2]),color= color1, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[2],ttY2[2], 0.0001*(ttX2[2]-ttX2[1]),0.0001*(ttY2[2]-ttY2[1]),color= color2, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[1],ttY2[1], 0.0001*(ttX2[1]-ttX2[2]),0.0001*(ttY2[1]-ttY2[2]),color= color2, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
plt.plot(ttx2[0:2],tty2[0:2],color = black, linestyle= ':')
plt.plot(ttX2[0:2],ttY2[0:2],color = black, linestyle= ':')
plt.plot(ttx2[1:3],tty2[1:3],color = color1,linewidth = 3 , linestyle= '--',dashes=(10, 10))
plt.plot(ttX2[1:3],ttY2[1:3],color = color2, linewidth = 3,linestyle= '-')
plt.plot(ttx2[2:],tty2[2:],color = black, linestyle= ':')
plt.plot(ttX2[2:],ttY2[2:],color = black, linestyle= ':')
if i% 2 == 0:
plt.plot(xm2,ym2, color = black, linewidth = 3,zorder = 1)
plt.plot(xm1,ym1, color = black, linewidth = 3,zorder = 1)
else:
plt.plot(xm2,ym2, color = 'grey', linewidth = 2, zorder = -1)
plt.plot(xm1,ym1, color = 'grey', linewidth = 2, zorder = -1)
#if i ==0 :
plt.plot(x1,y1, color = color3, linewidth = 3)
plt.plot(x2,y2, color = color1, linestyle= '--',dashes=(10, 5), linewidth = 3, zorder = -1)
plt.plot(X2,Y2, color = color2, linewidth = 3)
plt.xlim([-0.5,14])
xr = 12
yr = 7
plt.text(xr-0.8,0,'RA $\cdot$ cos(Dec)',verticalalignment = 'center',fontsize = 25)
plt.text(0,yr + 0.25,'Dec',fontsize = 25, horizontalalignment = 'center', rotation = 90)
plt.arrow(-0.025,0,xr-1,0,width = 0.05,overhang = 0.5,head_width = 0.5, head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.arrow(0,-0.025,0,yr-0.5,width = 0.05,overhang = 0.5,head_width = 0.5,head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.text(2,1.5,'Lens',color = color3, fontsize = 25, horizontalalignment = 'center', rotation = 0, weight = 'bold')
plt.text(4,7.5,'Star 1',color = color2,fontsize = 25, horizontalalignment = 'center', rotation = 0,weight = 'bold')
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_Illustration2 (string = 'Illustration'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.,0.,0.,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,1,0,1])
else:
black = color_own([0.,0.,0.,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,1,0,1])
t = np.array([12, 35, 41, 61, 73, 89])
scandir = np.array([0.1, 0.7, 0.4, 0.8 , 0.2, 0.1])*np.pi
#Position_lens
x1 = np.linspace(1,13,100) + 0.3 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y1 = np.linspace(1,3,100) + 0.3* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
#unlensed Position_source
x2 = np.linspace(5,9,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y2 = np.linspace(7,4.5,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
d = np.sqrt((x1-x2)**2 + (y1-y2)**2)
TE = 2
X2 = x2 + (x2-x1) * TE/(d**2 +2*TE)
Y2 = y2 + (y2-y1) * TE/(d**2 +2*TE)
dX2 = x1-X2
dY2 = y1-Y2
dx2 = x1-x2
dy2 = y1-y2
fig = plt.figure(figsize= (12,8))
ax = plt.subplot(111)
ax.axis('equal')
ax.axis('off')
#---------------------------------------------------------------
#axis
plt.xlim([-0.5,14])
xr = 12
yr = 7
plt.text(xr-0.8,0,'RA $\cdot$ cos(Dec)',verticalalignment = 'center',fontsize = 25)
plt.text(0,yr + 0.25,'Dec',fontsize = 25, horizontalalignment = 'center', rotation = 90)
plt.arrow(-0.025,0,xr-1,0,width = 0.05,overhang = 0.5,head_width = 0.5, head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.arrow(0,-0.025,0,yr-0.5,width = 0.05,overhang = 0.5,head_width = 0.5,head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.text(2,1.5,'Lens',color = grey, fontsize = 25, horizontalalignment = 'center', rotation = 0, weight = 'bold')
#---------------------------------------------------------------
# Motion source
plt.plot(x1,y1, color = grey, linewidth = 7)
fig.savefig(imagepath + string + '_1.png', format = 'png')
plt.text(4,7.5,'Source',color = blue,fontsize = 25, horizontalalignment = 'center', rotation = 0,weight = 'bold')
plt.plot(x2,y2, color = cyan, linestyle= '--',dashes=(10, 5), linewidth = 3, zorder = -1)
fig.savefig(imagepath + string + '_2.png', format = 'png')
plt.plot(X2,Y2, color = blue, linewidth = 3)
for i in range(len(t)):
plt.plot([x2[t[i]],X2[t[i]]],[y2[t[i]],Y2[t[i]]],':',color = black)
fig.savefig(imagepath + string + '_3.png', format = 'png')
delta = 0.05
for i in range(len(t)):
xm1 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + Y2[t[i]]
plt.plot(xm2,ym2, color = black, linewidth = 1,zorder = 1)
plt.plot(xm1,ym1, color = black, linewidth = 1,zorder = 1)
fig.savefig(imagepath + string + '_4.png', format = 'png')
for i in range(len(t)):
xm1 =np.array([-1,1,1,-1,-1]) * | np.cos(scandir[i]) | numpy.cos |
from spikeinterface.core import BinaryRecordingExtractor, BaseRecordingSegment, BaseSorting, BaseSortingSegment
from spikeinterface.core.core_tools import write_binary_recording
from probeinterface import read_prb, write_prb
import json
import numpy as np
from pathlib import Path
try:
import pandas as pd
HAVE_PANDAS = True
except:
HAVE_PANDAS = False
class ALFSortingExtractor(BaseSorting):
extractor_name = 'ALFSorting'
installed = HAVE_PANDAS
is_writable = True
installation_mesg = "To use the SHYBRID extractors, install SHYBRID: \n\n pip install shybrid\n\n"
def __init__(self, folder_path, sampling_frequency=30000):
assert self.installed, self.installation_mesg
# check correct parent folder:
self._folder_path = Path(folder_path)
if 'probe' not in self._folder_path.name:
raise ValueError('folder name should contain "probe", containing channels, clusters.* .npy datasets')
# load datasets as mmap into a dict:
required_alf_datasets = ['spikes.times', 'spikes.clusters']
found_alf_datasets = dict()
for alf_dataset_name in self.file_loc.iterdir():
if 'spikes' in alf_dataset_name.stem or 'clusters' in alf_dataset_name.stem:
if 'npy' in alf_dataset_name.suffix:
dset = np.load(alf_dataset_name, mmap_mode='r', allow_pickle=True)
found_alf_datasets.update({alf_dataset_name.stem: dset})
elif 'metrics' in alf_dataset_name.stem:
found_alf_datasets.update({alf_dataset_name.stem: pd.read_csv(alf_dataset_name)})
# check existence of datasets:
if not any([i in found_alf_datasets for i in required_alf_datasets]):
raise Exception(f'could not find {required_alf_datasets} in folder')
spike_clusters = found_alf_datasets['spikes.clusters']
spike_times = found_alf_datasets['spikes.times']
# load units properties:
total_units = 0
properties = dict()
for alf_dataset_name, alf_dataset in found_alf_datasets.items():
if 'clusters' in alf_dataset_name:
if 'clusters.metrics' in alf_dataset_name:
for property_name, property_values in found_alf_datasets[alf_dataset_name].iteritems():
properties[property_name] = property_values.tolist()
else:
property_name = alf_dataset_name.split('.')[1]
properties[property_name] = alf_dataset
if total_units == 0:
total_units = alf_dataset.shape[0]
if 'clusters.metrics' in found_alf_datasets and \
found_alf_datasets['clusters.metrics'].get('cluster_id') is not None:
unit_ids = found_alf_datasets['clusters.metrics'].get('cluster_id').tolist()
else:
unit_ids = list(range(total_units))
BaseSorting.__init__(self, unit_ids=unit_ids, sampling_frequency=sampling_frequency)
sorting_segment = ALFSortingSegment(spike_clusters, spike_times, sampling_frequency)
self.add_sorting_segment(sorting_segment)
# add properties
for property_name, values in properties.items():
self.set_property(property_name, values)
self._kwargs = {'folder_path': str(Path(folder_path).absolute()), 'sampling_frequency': sampling_frequency}
# @staticmethod
# def write_sorting(sorting, save_path):
# assert HAVE_PANDAS, ALFSortingExtractor.installation_mesg
# # write cluster properties as clusters.<property_name>.npy
# save_path = Path(save_path)
# csv_property_names = ['cluster_id', 'cluster_id.1', 'num_spikes', 'firing_rate',
# 'presence_ratio', 'presence_ratio_std', 'frac_isi_viol',
# 'contamination_est', 'contamination_est2', 'missed_spikes_est',
# 'cum_amp_drift', 'max_amp_drift', 'cum_depth_drift', 'max_depth_drift',
# 'ks2_contamination_pct', 'ks2_label', 'amplitude_cutoff', 'amplitude_std',
# 'epoch_name', 'isi_viol']
# clusters_metrics_df = pd.DataFrame()
# for property_name in sorting.get_unit_property_names(0):
# data = sorting.get_units_property(property_name=property_name)
# if property_name not in csv_property_names:
# np.save(save_path / f'clusters.{property_name}', data)
# else:
# clusters_metrics_df[property_name] = data
# clusters_metrics_df.to_csv(save_path / 'clusters.metrics.csv')
# # save spikes.times, spikes.clusters
# clusters_number = []
# unit_spike_times = []
# for unit_no, unit_id in enumerate(sorting.get_unit_ids()):
# unit_spike_train = sorting.get_unit_spike_train(unit_id=unit_id)
# if unit_spike_train is not None:
# unit_spike_times.extend(np.array(unit_spike_train) / sorting.get_sampling_frequency())
# clusters_number.extend([unit_no] * len(unit_spike_train))
# unit_spike_train = np.array(unit_spike_times)
# clusters_number = np.array(clusters_number)
# spike_times_ids = np.argsort(unit_spike_train)
# spike_times = unit_spike_train[spike_times_ids]
# spike_clusters = clusters_number[spike_times_ids]
# np.save(save_path / 'spikes.times', spike_times)
# np.save(save_path / 'spikes.clusters', spike_clusters)
class ALFSortingSegment(BaseSortingSegment):
def __init__(self, spike_clusters, spike_times, sampling_frequency):
self._spike_clusters = spike_clusters
self._spike_times = spike_times
self._sampling_frequency = sampling_frequency
BaseSortingSegment.__init__(self)
def get_unit_spike_train(self,
unit_id,
start_frame,
end_frame,
) -> np.ndarray:
# must be implemented in subclass
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = np.inf
spike_times = self._spike_time[ | np.where(self._spike_clusters == unit_id) | numpy.where |
import argparse
import colorsys
import math
import os
import random
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pyglet
import trimesh
from PIL import Image, ImageEnhance
from tqdm import tqdm
from OpenGL.GL import GL_LINEAR_MIPMAP_LINEAR
import pyrender
from archiver import Archiver, SceneData
from pyrender import (DirectionalLight, Mesh, Node, OffscreenRenderer,
PerspectiveCamera, PointLight, RenderFlags, Scene,
Primitive)
texture_directory = os.path.join(os.path.dirname(__file__), "..", "textures")
object_directory = os.path.join(os.path.dirname(__file__), "objects")
floor_textures = [
"{}/lg_floor_d.tga".format(texture_directory),
"{}/lg_style_01_floor_blue_d.tga".format(texture_directory),
"{}/lg_style_01_floor_orange_bright_d.tga".format(texture_directory),
]
wall_textures = [
"{}/lg_style_01_wall_cerise_d.tga".format(texture_directory),
"{}/lg_style_01_wall_green_bright_d.tga".format(texture_directory),
"{}/lg_style_01_wall_red_bright_d.tga".format(texture_directory),
"{}/lg_style_02_wall_yellow_d.tga".format(texture_directory),
"{}/lg_style_03_wall_orange_bright_d.tga".format(texture_directory),
]
objects = [
pyrender.objects.Capsule,
pyrender.objects.Cylinder,
pyrender.objects.Icosahedron,
pyrender.objects.Box,
pyrender.objects.Sphere,
]
def set_random_texture(node, path):
texture_image = Image.open(path).convert("RGB")
primitive = node.mesh.primitives[0]
assert isinstance(primitive, Primitive)
primitive.material.baseColorTexture.source = texture_image
primitive.material.baseColorTexture.sampler.minFilter = GL_LINEAR_MIPMAP_LINEAR
def build_scene(floor_textures, wall_textures, fix_light_position=False):
scene = Scene(
bg_color=np.array([153 / 255, 226 / 255, 249 / 255]),
ambient_light=np.array([0.5, 0.5, 0.5, 1.0]))
floor_trimesh = trimesh.load("{}/floor.obj".format(object_directory))
mesh = Mesh.from_trimesh(floor_trimesh, smooth=False)
node = Node(
mesh=mesh,
rotation=pyrender.quaternion.from_pitch(-math.pi / 2),
translation=np.array([0, 0, 0]))
texture_path = random.choice(floor_textures)
set_random_texture(node, texture_path)
scene.add_node(node)
texture_path = random.choice(wall_textures)
wall_trimesh = trimesh.load("{}/wall.obj".format(object_directory))
mesh = Mesh.from_trimesh(wall_trimesh, smooth=False)
node = Node(mesh=mesh, translation=np.array([0, 1.15, -3.5]))
set_random_texture(node, texture_path)
scene.add_node(node)
mesh = Mesh.from_trimesh(wall_trimesh, smooth=False)
node = Node(
mesh=mesh,
rotation=pyrender.quaternion.from_yaw(math.pi),
translation=np.array([0, 1.15, 3.5]))
set_random_texture(node, texture_path)
scene.add_node(node)
mesh = Mesh.from_trimesh(wall_trimesh, smooth=False)
node = Node(
mesh=mesh,
rotation=pyrender.quaternion.from_yaw(-math.pi / 2),
translation=np.array([3.5, 1.15, 0]))
set_random_texture(node, texture_path)
scene.add_node(node)
mesh = Mesh.from_trimesh(wall_trimesh, smooth=False)
node = Node(
mesh=mesh,
rotation=pyrender.quaternion.from_yaw(math.pi / 2),
translation=np.array([-3.5, 1.15, 0]))
set_random_texture(node, texture_path)
scene.add_node(node)
light = DirectionalLight(color=np.ones(3), intensity=10)
if fix_light_position == True:
translation = np.array([1, 1, 1])
else:
xz = np.random.uniform(-1, 1, size=2)
translation = np.array([xz[0], 1, xz[1]])
yaw, pitch = compute_yaw_and_pitch(translation)
node = Node(
light=light,
rotation=genearte_camera_quaternion(yaw, pitch),
translation=translation)
scene.add_node(node)
return scene
def place_objects(scene,
colors,
objects,
max_num_objects=3,
min_num_objects=1,
discrete_position=False,
rotate_object=False):
# Place objects
directions = [-1.5, 0.0, 1.5]
available_positions = []
for z in directions:
for x in directions:
available_positions.append((x, z))
available_positions = np.array(available_positions)
num_objects = random.choice(range(min_num_objects, max_num_objects + 1))
indices = np.random.choice(
np.arange(len(available_positions)), replace=False, size=num_objects)
for xz in available_positions[indices]:
node = random.choice(objects)()
node.mesh.primitives[0].color_0 = random.choice(colors)
if discrete_position == False:
xz += np.random.uniform(-0.3, 0.3, size=xz.shape)
if rotate_object:
yaw = | np.random.uniform(0, math.pi * 2, size=1) | numpy.random.uniform |
import pytest
import tensorflow as tf
import numpy as np
from scipy.ndimage.measurements import mean as label_mean
from skimage.segmentation import relabel_sequential as sk_relabel_sequential
from rdcnet.losses.embedding_loss import InstanceEmbeddingLossBase, SpatialInstanceEmbeddingLossBase, InstanceMeanIoUEmbeddingLoss, MarginInstanceEmbeddingLoss, relabel_sequential
class DummySpatialInstanceEmbeddingLoss(SpatialInstanceEmbeddingLossBase):
def _center_dist_to_probs(self, one_hot, center_dist):
pass
def test__unbatched_soft_jaccard():
'''Verifies that the soft Jaccard loss behaves as keras MeanIoU when
probabilities are either 0 or 1 and that background masking works
'''
_unbatched_soft_jaccard = DummySpatialInstanceEmbeddingLoss(
)._unbatched_soft_jaccard
# check with/without background on simple example
yt = np.array([0, 0, 1, 1, 2, 2])[..., None]
yp = np.array([0, 1, 0, 1, 2, 2])[..., None]
one_hot = tf.cast(tf.one_hot(tf.squeeze(yt, -1), 3), tf.float32)
probs = tf.cast(tf.one_hot(tf.squeeze(yp, -1), 3), tf.float32)
loss = _unbatched_soft_jaccard(one_hot[..., 1:], probs[...,
1:]).numpy().mean()
np.testing.assert_almost_equal(loss, (1 - 1 / 2) / 2, decimal=3)
def test__unbatched_label_to_hot():
_unbatched_label_to_hot = DummySpatialInstanceEmbeddingLoss(
)._unbatched_label_to_hot
np.random.seed(25)
labels = np.random.choice(range(5), size=(10, 10, 1)).astype(np.int32)
hot_labels = _unbatched_label_to_hot(labels)
# #channels == #unique labels - bg
assert hot_labels.shape == (10, 10, 4)
for idx, l in enumerate([1, 2, 3, 4]):
hot_slice = hot_labels[..., idx].numpy().astype(bool)
l_mask = labels.squeeze() == l
np.testing.assert_array_equal(hot_slice, l_mask)
def test_relabel_sequential():
| np.random.seed(25) | numpy.random.seed |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.utils import parallel_backend
from sklearn.utils._testing import _convert_container
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression(n_jobs):
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
X, y = load_boston(return_X_y=True)
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
X = np.hstack([X, y_with_little_noise])
clf = RandomForestRegressor(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] >
result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression_pandas(n_jobs):
pd = pytest.importorskip("pandas")
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
dataset = load_iris()
X, y = dataset.data, dataset.target
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
# Adds feature correlated with y as the last column
X = pd.DataFrame(X, columns=dataset.feature_names)
X['correlated_feature'] = y_with_little_noise
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_mixed_types():
rng = np.random.RandomState(42)
n_repeats = 4
# Last column is correlated with y
X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T
y = np.array([0, 1, 0, 1])
clf = make_pipeline(SimpleImputer(), LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
# use another random state
rng = | np.random.RandomState(0) | numpy.random.RandomState |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
data=pd.read_csv(path)
data=data.rename(columns={ "Total" : "Total_Medals"})
data['Better_Event'] = np.where(data['Total_Summer']>data["Total_Winter"],"Summer","Winter")
data['Better_Event'] = | np.where( data['Total_Summer']==data["Total_Winter"] , "both" , data['Better_Event']) | numpy.where |
#!/usr/bin/env python
import renderapi
import argparse
import numpy as np
import os
from PIL import Image
from glob import glob
def gen_matches(flow_dir, match_name, n, stack, render_connect_params):
render = renderapi.connect(**render_connect_params)
tilespecs = renderapi.tilespec.get_tile_specs_from_stack(
stack, render=render)
spec_to_size_x = {tile.tileId: tile.maxX for tile in tilespecs}
spec_to_size_y = {tile.tileId: tile.maxY for tile in tilespecs}
for base in glob("{}/*_bottom_x.tiff".format(flow_dir)):
base = base[:-14] # Remove the _bottom_x.tiff
scale = np.float(base.split("_")[-1]) # Grab scale
inv_scale = 1/scale
base = "_".join(base.split("_")[:-1]) # Restore it without scale
top_bottom = ["top", "bottom"]
base_split = base.split("/")[-1].split("~")
groups = base_split[0].split("_")
print(groups[0], groups[1])
if len(renderapi.pointmatch.get_matches_from_group_to_group(match_name, groups[0], groups[1], render=render)):
continue
tiles = base_split[1:]
w = []
p = []
q = []
for s in top_bottom:
im_x = np.array(Image.open(
base+"_{:.2f}_".format(scale) + s+"_x.tiff"))
im_y = np.array(Image.open(
base+"_{:.2f}_".format(scale) + s+"_y.tiff"))
rand = | np.random.random([n, 2]) | numpy.random.random |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-08-12 at 17:16
@author: cook
"""
import numpy as np
from astropy import constants as cc
from astropy import units as uu
from scipy.optimize import curve_fit
import warnings
import os
from apero import core
from apero.core import constants
from apero.core import math as mp
from apero import lang
from apero.core.core import drs_log
from apero.core.core import drs_file
from apero.core.core import drs_database
from apero.io import drs_data
from apero.io import drs_fits
from apero.science.calib import flat_blaze
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'science.telluric.general.py'
__INSTRUMENT__ = 'None'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# get param dict
ParamDict = constants.ParamDict
DrsFitsFile = drs_file.DrsFitsFile
# Get function string
display_func = drs_log.display_func
# Get Logging function
WLOG = drs_log.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
TextDict = lang.drs_text.TextDict
# alias pcheck
pcheck = core.pcheck
# Speed of light
# noinspection PyUnresolvedReferences
speed_of_light_ms = cc.c.to(uu.m / uu.s).value
# noinspection PyUnresolvedReferences
speed_of_light = cc.c.to(uu.km / uu.s).value
# =============================================================================
# Define functions
# =============================================================================
def get_whitelist(params, **kwargs):
func_name = __NAME__ + '.get_whitelist()'
# get pseudo constants
pconst = constants.pload(instrument=params['INSTRUMENT'])
# get parameters from params/kwargs
relfolder = pcheck(params, 'TELLU_LIST_DIRECOTRY', 'directory', kwargs,
func_name)
filename = pcheck(params, 'TELLU_WHITELIST_NAME', 'filename', kwargs,
func_name)
# load the white list
wout = drs_data.load_text_file(params, filename, relfolder, kwargs,
func_name, dtype=str)
whitelist, whitelistfile = wout
# must clean names
whitelist = list(map(pconst.DRS_OBJ_NAME, whitelist))
# return the whitelist
return whitelist, whitelistfile
def get_blacklist(params, **kwargs):
func_name = __NAME__ + '.get_blacklist()'
# get pseudo constants
pconst = constants.pload(instrument=params['INSTRUMENT'])
# get parameters from params/kwargs
relfolder = pcheck(params, 'TELLU_LIST_DIRECOTRY', 'directory', kwargs,
func_name)
filename = pcheck(params, 'TELLU_BLACKLIST_NAME', 'filename', kwargs,
func_name)
# load the white list
bout = drs_data.load_text_file(params, filename, relfolder, kwargs,
func_name, dtype=str)
blacklist, blacklistfile = bout
# must clean names
blacklist = list(map(pconst.DRS_OBJ_NAME, blacklist))
# return the whitelist
return blacklist, blacklistfile
def normalise_by_pblaze(params, image, header, fiber, **kwargs):
func_name = __NAME__ + '.normalise_by_pblaze()'
# get properties from params/kwargs
blaze_p = pcheck(params, 'MKTELLU_BLAZE_PERCENTILE', 'blaze_p', kwargs,
func_name)
cut_blaze_norm = pcheck(params, 'MKTELLU_CUT_BLAZE_NORM', 'cut_blaze_norm',
kwargs, func_name)
# ----------------------------------------------------------------------
# copy the image
image1 = np.array(image)
# ----------------------------------------------------------------------
# load the blaze file for this fiber
blaze_file, blaze = flat_blaze.get_blaze(params, header, fiber)
# copy blaze
blaze_norm = np.array(blaze)
# loop through blaze orders, normalize blaze by its peak amplitude
for order_num in range(image1.shape[0]):
# normalize the spectrum
spo, bzo = image1[order_num], blaze[order_num]
# normalise image
image1[order_num] = spo / np.nanpercentile(spo, blaze_p)
# normalize the blaze
blaze_norm[order_num] = bzo / np.nanpercentile(bzo, blaze_p)
# ----------------------------------------------------------------------
# find where the blaze is bad
with warnings.catch_warnings(record=True) as _:
badblaze = blaze_norm < cut_blaze_norm
# ----------------------------------------------------------------------
# set bad blaze to NaN
blaze_norm[badblaze] = np.nan
# set to NaN values where spectrum is zero
zeromask = image1 == 0
image1[zeromask] = np.nan
# divide spectrum by blaze
with warnings.catch_warnings(record=True) as _:
image1 = image1 / blaze_norm
# ----------------------------------------------------------------------
# parameter dictionary
nprops = ParamDict()
nprops['BLAZE'] = blaze
nprops['NBLAZE'] = blaze_norm
nprops['BLAZE_PERCENTILE'] = blaze_p
nprops['BLAZE_CUT_NORM'] = cut_blaze_norm
nprops['BLAZE_FILE'] = blaze_file
# set sources
keys = ['BLAZE', 'NBLAZE', 'BLAZE_PERCENTILE', 'BLAZE_CUT_NORM',
'BLAZE_FILE']
nprops.set_sources(keys, func_name)
# return the normalised image and the properties
return image1, nprops
def get_non_tellu_objs(params, recipe, fiber, filetype=None, dprtypes=None,
robjnames=None):
"""
Get the objects of "filetype" and "
:param params:
:param fiber:
:param filetype:
:param dprtypes:
:param robjnames:
:return:
"""
# get the telluric star names (we don't want to process these)
objnames, _ = get_whitelist(params)
objnames = list(objnames)
# deal with filetype being string
if isinstance(filetype, str):
filetype = filetype.split(',')
# deal with dprtypes being string
if isinstance(dprtypes, str):
dprtypes = dprtypes.split(',')
# construct kwargs
fkwargs = dict()
if filetype is not None:
fkwargs['KW_OUTPUT'] = filetype
if dprtypes is not None:
fkwargs['KW_DPRTYPE'] = dprtypes
# # find files
out = drs_fits.find_files(params, recipe, kind='red', return_table=True,
fiber=fiber, **fkwargs)
obj_filenames, obj_table = out
# filter out telluric stars
obj_stars, obj_names = [], []
# loop around object table and only keep non-telluric stars
for row in range(len(obj_table)):
# get object name
iobjname = obj_table['KW_OBJNAME'][row]
# if required object name is set
if robjnames is not None:
if iobjname in robjnames:
obj_stars.append(obj_filenames[row])
if iobjname not in obj_names:
obj_names.append(iobjname)
# if in telluric list skip
elif iobjname not in objnames:
obj_stars.append(obj_filenames[row])
if iobjname not in obj_names:
obj_names.append(iobjname)
# return absolute path names and object names
return obj_stars, obj_names
def get_tellu_objs(params, key, objnames=None, **kwargs):
"""
Get objects defined be "key" from telluric database (in list objname)
:param params:
:param key:
:param objnames:
:param kwargs:
:return:
"""
# deal with column to select from entries
column = kwargs.get('column', 'filename')
objcol = kwargs.get('objcol', 'objname')
# ----------------------------------------------------------------------
# deal with objnames
if isinstance(objnames, str):
objnames = [objnames]
# ----------------------------------------------------------------------
# load telluric obj entries (based on key)
obj_entries = load_tellu_file(params, key=key, inheader=None, mode='ALL',
return_entries=True, n_entries='all',
required=False)
# add to type
typestr = str(key)
# ----------------------------------------------------------------------
# keep only objects with objnames
mask = np.zeros(len(obj_entries)).astype(bool)
# deal with no object found
if len(obj_entries) == 0:
return []
elif objnames is not None:
# storage for found objects
found_objs = []
# loop around objnames
for objname in objnames:
# update the mask
mask |= obj_entries[objcol] == objname
# only add to the mask if objname found
if objname in obj_entries[objcol]:
# update the found objs
found_objs.append(objname)
# update type string
typestr += ' OBJNAME={0}'.format(', '.join(found_objs))
# ----------------------------------------------------------------------
# deal with all entries / one column return
if column in [None, 'None', '', 'ALL']:
outputs = obj_entries[mask]
else:
outputs = np.unique(obj_entries[column][mask])
# ----------------------------------------------------------------------
# deal with getting absolute paths
if column == 'filename':
abspaths = []
# loop around filenames
for filename in outputs:
# get absolute path
abspath = drs_database.get_db_abspath(params, filename,
where='telluric')
# append to list
abspaths.append(abspath)
# push back into outputs
outputs = list(abspaths)
# ----------------------------------------------------------------------
# display how many files found
margs = [len(outputs), typestr]
WLOG(params, '', TextEntry('40-019-00039', args=margs))
return outputs
def get_sp_linelists(params, **kwargs):
func_name = __NAME__ + '.get_sp_linelists()'
# get pseudo constants
pconst = constants.pload(instrument=params['INSTRUMENT'])
# get parameters from params/kwargs
relfolder = pcheck(params, 'TELLU_LIST_DIRECOTRY', 'directory', kwargs,
func_name)
othersfile = pcheck(params, 'TELLUP_OTHERS_CCF_FILE', 'filename', kwargs,
func_name)
waterfile = pcheck(params, 'TELLUP_H2O_CCF_FILE', 'filename', kwargs,
func_name)
# load the others file list
mask_others, _ = drs_data.load_ccf_mask(params, directory=relfolder,
filename=othersfile)
mask_water, _ = drs_data.load_ccf_mask(params, directory=relfolder,
filename=waterfile)
# return masks
return mask_others, mask_water
# =============================================================================
# pre-cleaning functions
# =============================================================================
def tellu_preclean(params, recipe, infile, wprops, fiber, rawfiles, combine,
**kwargs):
"""
Main telluric pre-cleaning functionality.
Pass an e2ds image and return the telluric-corrected data.
This is a rough model fit and we will need to perform PCA correction on
top of it.
Will fit both water and all dry components of the absorption separately.
Underlying idea: We correct with a super naive tapas fit and iterate
until the CCF of the telluric absorption falls to zero. We have 2 degrees
of freedom, the dry and water components of the atmosphere.
The instrument profile is defined by two additional parameters
[ww -> FWHM, ex_gau -> kernel shape parameter].
Again, this is just a cleanup PRIOR to PCA correction, so if the code is
not perfect in it's correction, this is fine as we will empirically
determine the residuals and fit them in a subsequent step.
we set bounds to the limits of the reasonable domain for both parameters.
:param params:
:param recipe:
:param infile:
:param wprops:
:param fiber:
:param rawfiles:
:param combine:
:return:
"""
# set the function name
func_name = __NAME__ + '.tellu_preclean()'
# ----------------------------------------------------------------------
# look for precleaned file
loadprops = read_tellu_preclean(params, recipe, infile, fiber)
# if precleaned load and return
if loadprops is not None:
return loadprops
# ----------------------------------------------------------------------
# get parameters from parameter dictionary
do_precleaning = pcheck(params, 'TELLUP_DO_PRECLEANING', 'do_precleaning',
kwargs, func_name)
default_water_abso = pcheck(params, 'TELLUP_D_WATER_ABSO',
'default_water_abso', kwargs, func_name)
ccf_scan_range = pcheck(params, 'TELLUP_CCF_SCAN_RANGE', 'ccf_scan_range',
kwargs, func_name)
clean_ohlines = pcheck(params, 'TELLUP_CLEAN_OH_LINES', 'clean_ohlines',
kwargs, func_name)
remove_orders = pcheck(params, 'TELLUP_REMOVE_ORDS', 'remove_orders',
kwargs, func_name, mapf='list', dtype=int)
snr_min_thres = pcheck(params, 'TELLUP_SNR_MIN_THRES', 'snr_min_thres',
kwargs, func_name)
dexpo_thres = pcheck(params, 'TELLUP_DEXPO_CONV_THRES', 'dexpo_thres',
kwargs, func_name)
max_iterations = pcheck(params, 'TELLUP_DEXPO_MAX_ITR', 'max_iterations',
kwargs, func_name)
ker_width = pcheck(params, 'TELLUP_ABSO_EXPO_KWID', 'ker_width', kwargs,
func_name)
ker_shape = pcheck(params, 'TELLUP_ABSO_EXPO_KEXP', 'ker_shape', kwargs,
func_name)
trans_thres = pcheck(params, 'TELLUP_TRANS_THRES', 'trans_thres', kwargs,
func_name)
trans_siglim = pcheck(params, 'TELLUP_TRANS_SIGLIM', 'trans_siglim', kwargs,
func_name)
force_airmass = pcheck(params, 'TELLUP_FORCE_AIRMASS', 'force_airmass',
kwargs, func_name)
others_bounds = pcheck(params, 'TELLUP_OTHER_BOUNDS', 'others_bounds',
kwargs, func_name, mapf='list', dtype=float)
water_bounds = pcheck(params, 'TELLUP_WATER_BOUNDS', 'water_bounds', kwargs,
func_name, mapf='list', dtype=float)
ker_thres = pcheck(params, 'TELLUP_ABSO_EXPO_KTHRES', 'ker_thres', kwargs,
func_name)
wavestart = pcheck(params, 'EXT_S1D_WAVESTART', 'wavestart', kwargs,
func_name)
waveend = pcheck(params, 'EXT_S1D_WAVEEND', 'waveend', kwargs, func_name)
dvgrid = pcheck(params, 'EXT_S1D_BIN_UVELO', 'dvgrid', kwargs, func_name)
# ----------------------------------------------------------------------
# get image and header from infile
header = infile.header
# get airmass from header
hdr_airmass = infile.get_key('KW_AIRMASS', dtype=float)
# copy e2ds input image
image_e2ds_ini = np.array(infile.data)
# get shape of the e2ds
nbo, nbpix = image_e2ds_ini.shape
# get wave map for the input e2ds
wave_e2ds = wprops['WAVEMAP']
# ----------------------------------------------------------------------
# define storage of quality control
qc_values, qc_names, qc_logic, qc_pass = [], [], [], []
# need to add dummy values for these qc
# 1. snr < snr_min_thres (pos = 0)
qc_values.append(np.nan)
qc_names.append('EXTSNR')
qc_logic.append('EXTSNR < {0}'.format(snr_min_thres))
qc_pass.append(np.nan)
# 2. ccf is NaN (pos = 1)
qc_values.append(np.nan)
qc_names.append('NUM_NAN_CCF')
qc_logic.append('NUM_NAN_CCF > 0')
qc_pass.append(np.nan)
# 3. exponent for others out of bounds (pos = 2 and 3)
qc_values += [np.nan, np.nan]
qc_names += ['EXPO_OTHERS L', 'EXPO_OTHERS U']
qc_logic += ['EXPO_OTHERS L < {0}'.format(others_bounds[0]),
'EXPO_OTHERS U > {0}'.format(others_bounds[1])]
qc_pass += [np.nan, np.nan]
# 4. exponent for water out of bounds (pos 4 and 5)
qc_values += [np.nan, np.nan]
qc_names += ['EXPO_WATER L', 'EXPO_WATER U']
qc_logic += ['EXPO_WATER L < {0}'.format(water_bounds[0]),
'EXPO_WATER U > {0}'.format(water_bounds[1])]
qc_pass += [np.nan, np.nan]
# 5. max iterations exceeded (pos = 6)
qc_values.append(np.nan)
qc_names.append('ITERATIONS')
qc_logic.append('ITERATIONS = {0}'.format(max_iterations - 1))
qc_pass.append(np.nan)
# dev note: if adding a new one must add tfailmsgs for all uses in qc
# (mk_tellu and fit_tellu)
# ----------------------------------------------------------------------
# remove OH lines if required
if clean_ohlines:
image_e2ds, sky_model = clean_ohline_pca(params, image_e2ds_ini,
wave_e2ds)
# else just copy the image and set the sky model to zeros
else:
image_e2ds = np.array(image_e2ds_ini)
sky_model = np.zeros_like(image_e2ds_ini)
# ----------------------------------------------------------------------
if not do_precleaning:
# log progress
WLOG(params, '', TextEntry('10-019-00008'))
# populate qc params
qc_params = [qc_names, qc_values, qc_logic, qc_pass]
# populate parameter dictionary
props = ParamDict()
props['CORRECTED_E2DS'] = image_e2ds
props['TRANS_MASK'] = np.ones_like(image_e2ds_ini).astype(bool)
props['ABSO_E2DS'] = np.ones_like(image_e2ds_ini)
props['SKY_MODEL'] = sky_model
props['EXPO_WATER'] = np.nan
props['EXPO_OTHERS'] = np.nan
props['DV_WATER'] = np.nan
props['DV_OTHERS'] = np.nan
props['CCFPOWER_WATER'] = np.nan
props['CCFPOWER_OTHERS'] = np.nan
props['QC_PARAMS'] = qc_params
# set sources
keys = ['CORRECTED_E2DS', 'TRANS_MASK', 'ABSO_E2DS', 'EXPO_WATER',
'EXPO_OTHERS', 'DV_WATER', 'DV_OTHERS', 'CCFPOWER_WATER',
'CCFPOWER_OTHERS', 'QC_PARAMS', 'SKY_MODEL']
props.set_sources(keys, func_name)
# ------------------------------------------------------------------
# add constants used (can come from kwargs)
props['TELLUP_DO_PRECLEANING'] = do_precleaning
props['TELLUP_D_WATER_ABSO'] = default_water_abso
props['TELLUP_CCF_SCAN_RANGE'] = ccf_scan_range
props['TELLUP_CLEAN_OH_LINES'] = clean_ohlines
props['TELLUP_REMOVE_ORDS'] = remove_orders
props['TELLUP_SNR_MIN_THRES'] = snr_min_thres
props['TELLUP_DEXPO_CONV_THRES'] = dexpo_thres
props['TELLUP_DEXPO_MAX_ITR'] = max_iterations
props['TELLUP_ABSO_EXPO_KWID'] = ker_width
props['TELLUP_ABSO_EXPO_KEXP'] = ker_shape
props['TELLUP_TRANS_THRES'] = trans_thres
props['TELLUP_TRANS_SIGLIM'] = trans_siglim
props['TELLUP_FORCE_AIRMASS'] = force_airmass
props['TELLUP_OTHER_BOUNDS'] = others_bounds
props['TELLUP_WATER_BOUNDS'] = water_bounds
props['TELLUP_ABSO_EXPO_KTHRES'] = ker_thres
props['TELLUP_WAVE_START'] = wavestart
props['TELLUP_WAVE_END'] = waveend
props['TELLUP_DVGRID'] = dvgrid
# set sources
keys = ['TELLUP_D_WATER_ABSO', 'TELLUP_CCF_SCAN_RANGE',
'TELLUP_CLEAN_OH_LINES', 'TELLUP_REMOVE_ORDS',
'TELLUP_SNR_MIN_THRES', 'TELLUP_DEXPO_CONV_THRES',
'TELLUP_DEXPO_MAX_ITR', 'TELLUP_ABSO_EXPO_KWID',
'TELLUP_ABSO_EXPO_KEXP', 'TELLUP_TRANS_THRES',
'TELLUP_TRANS_SIGLIM', 'TELLUP_FORCE_AIRMASS',
'TELLUP_OTHER_BOUNDS', 'TELLUP_WATER_BOUNDS',
'TELLUP_ABSO_EXPO_KTHRES', 'TELLUP_WAVE_START',
'TELLUP_WAVE_END', 'TELLUP_DVGRID', 'TELLUP_DO_PRECLEANING']
props.set_sources(keys, func_name)
# ------------------------------------------------------------------
# return props
return props
# ----------------------------------------------------------------------
# we ravel the wavelength grid to make it a 1d array of increasing
# wavelength. We will trim the overlapping domain between orders
keep = np.ones_like(wave_e2ds).astype(bool)
# keep track of where orders are
orders, _ = np.indices(wave_e2ds.shape)
# loop around 2nd to last-1 order and compare -1th and +1th order
for order_num in range(1, nbo - 1):
# get wavelengths not in order beforetellu_preclean
before = wave_e2ds[order_num] > wave_e2ds[order_num - 1][::-1]
# get wavelengths not in order after
after = wave_e2ds[order_num] < wave_e2ds[order_num + 1][::-1]
# combine mask
keep[order_num] = before & after
# set whole first order to zeros (rejected)
keep[0] = np.zeros(nbpix).astype(bool)
# set whole last order to zeros (rejected)
keep[-1] = np.zeros(nbpix).astype(bool)
# ----------------------------------------------------------------------
# force into 1D and apply keep map
flatkeep = keep.ravel()
wavemap = wave_e2ds.ravel()[flatkeep]
spectrum = image_e2ds.ravel()[flatkeep]
spectrum_ini = image_e2ds_ini.ravel()[flatkeep]
orders = orders.ravel()[flatkeep]
# ----------------------------------------------------------------------
# load tapas in correct format
spl_others, spl_water = load_tapas_spl(params, recipe, header)
# ----------------------------------------------------------------------
# load the snr from e2ds file
snr = infile.read_header_key_1d_list('KW_EXT_SNR', nbo, dtype=float)
# remove infinite / NaN snr
snr[~np.isfinite(snr)] = 0.0
# remove snr from these orders (due to thermal background)
for order_num in remove_orders:
snr[order_num] = 0.0
# make sure we have at least one order above the min snr requiredment
if np.nanmax(snr) < snr_min_thres:
# update qc params
qc_values[0] = np.nanmax(snr)
qc_pass[0] = 0
qc_params = [qc_names, qc_values, qc_logic, qc_pass]
# return qc_exit_tellu_preclean
return qc_exit_tellu_preclean(params, recipe, image_e2ds, infile,
wave_e2ds, qc_params, sky_model)
else:
qc_values[0] = np.nanmax(snr)
qc_pass[0] = 1
# mask all orders below min snr
for order_num in range(nbo):
# only mask if snr below threshold
if snr[order_num] < snr_min_thres:
# find order mask (we only want to remove values in this order
order_mask = orders == order_num
# apply low snr mask to spectrum
spectrum[order_mask] = np.nan
# for numerical stabiility, remove NaNs. Setting to zero biases a bit
# the CCF, but this should be OK after we converge
spectrum[~ | np.isfinite(spectrum) | numpy.isfinite |
#!/usr/bin/env python
# -*- coding=utf-8 -*-
###########################################################################
# Copyright (C) 2013-2016 by Caspar. All rights reserved.
# File Name: txtclf.py
# Author: <NAME>
# E-mail: <EMAIL>
# Created Time: 2016-07-05 14:39:18
###########################################################################
#
import os, sys, difflib, itertools
from time import time
import numpy as np
import scipy as sp
import scipy.stats as stats
import pandas as pd
from sklearn.base import clone
from sklearn.preprocessing import MinMaxScaler, LabelBinarizer, label_binarize, normalize
from sklearn.multiclass import OneVsRestClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold, KFold, GridSearchCV, RandomizedSearchCV
from sklearn import metrics
from .util import io, func, plot
from .util import math as imath
common_cfg = {}
def init(plot_cfg={}, plot_common={}):
if (len(plot_cfg) > 0 and plot_cfg['MON'] is not None):
plot.MON = plot_cfg['MON']
global common_cfg
if (len(plot_common) > 0):
common_cfg = plot_common
def get_featw(pipeline, feat_num):
feat_w_dict, sub_feat_w = [{} for i in range(2)]
filt_feat_idx = feature_idx = np.arange(feat_num)
for component in ('featfilt', 'clf'):
if (type(pipeline) != Pipeline):
if (component == 'featfilt'):
continue
else:
cmpn = pipeline
elif (component in pipeline.named_steps):
cmpn = pipeline.named_steps[component]
else:
continue
if (hasattr(cmpn, 'estimators_')):
for i, estm in enumerate(cmpn.estimators_):
filt_subfeat_idx = feature_idx[:]
if (hasattr(estm, 'get_support')):
filt_subfeat_idx = feature_idx[estm.get_support()]
for measure in ('feature_importances_', 'coef_', 'scores_'):
if (hasattr(estm, measure)):
filt_subfeat_w = getattr(estm, measure)
subfeat_w = (filt_subfeat_w.min() - 1) * np.ones_like(feature_idx)
# subfeat_w[filt_subfeat_idx] = normalize(estm.feature_importances_, norm='l1')
subfeat_w[filt_subfeat_idx] = filt_subfeat_w
# print 'Sub FI shape: (%s)' % ','.join([str(x) for x in filt_subfeat_w.shape])
# print 'Feature Importance inside %s Ensemble Method: %s' % (component, filt_subfeat_w)
sub_feat_w[(component, i)] = subfeat_w
if (hasattr(component, 'get_support')):
filt_feat_idx = feature_idx[component.get_support()]
for measure in ('feature_importances_', 'coef_', 'scores_'):
if (hasattr(cmpn, measure)):
filt_feat_w = getattr(cmpn, measure)
# print '*' * 80 + '\n%s\n'%filt_feat_w + '*' * 80
feat_w = (filt_feat_w.min() - 1) * np.ones_like(feature_idx)
# feat_w[filt_feat_idx] = normalize(filt_feat_w, norm='l1')
feat_w[filt_feat_idx] = filt_feat_w
# print '*' * 80 + '\n%s\n'%feat_w + '*' * 80
feat_w_dict[(component, measure)] = feat_w
print('FI shape: (%s)' % ','.join([str(x) for x in feat_w_dict[(component, measure)].shape]))
print('Sample 10 Feature from %s.%s: %s' % (component, measure, feat_w[feat_w > 0][:10]))
# print 'Feature Importance from %s.%s: %s' % (component, measure, feat_w)
return feat_w_dict, sub_feat_w
def get_score(pipeline, X_test, mltl=False):
if ((not isinstance(pipeline, Pipeline) and hasattr(pipeline, 'predict_proba')) or(isinstance(pipeline.named_steps['clf'], OneVsRestClassifier) and hasattr(pipeline.named_steps['clf'].estimators_[0], 'predict_proba')) or (not isinstance(pipeline.named_steps['clf'], OneVsRestClassifier) and hasattr(pipeline, 'predict_proba'))):
if (mltl):
return pipeline.predict_proba(X_test)
else:
# return pipeline.predict_proba(X_test)[:, 1]
return pipeline.predict_proba(X_test)
elif (hasattr(pipeline, 'decision_function')):
return pipeline.decision_function(X_test)
else:
print('Neither probability estimate nor decision function is supported in the classification model!')
return [0] * Y_test.shape[0]
# Benchmark
def benchmark(pipeline, X_train, Y_train, X_test, Y_test, mltl=False, signed=False, average='micro'):
print('+' * 80)
print('Training Model: ')
print(pipeline)
t0 = time()
pipeline.fit(X_train, Y_train)
train_time = time() - t0
print('train time: %0.3fs' % train_time)
t0 = time()
orig_pred = pred = pipeline.predict(X_test)
orig_prob = prob = pipeline.predict_proba(X_test) if hasattr(pipeline, 'predict_proba') else pipeline.decision_function(X_test)
test_time = time() - t0
print('+' * 80)
print('Testing: ')
print('test time: %0.3fs' % test_time)
is_mltl = mltl
if (signed):
Y_test = np.column_stack([np.abs(Y_test).reshape((Y_test.shape[0],-1))] + [label_binarize(lb, classes=[-1,1,0])[:,1] for lb in (np.sign(Y_test).astype('int8').reshape((Y_test.shape[0],-1))).T]) if (len(Y_test.shape) < 2 or Y_test.shape[1] == 1 or np.where(Y_test<0)[0].shape[0]>0) else Y_test
pred = np.column_stack([np.abs(pred).reshape((pred.shape[0],-1))] + [label_binarize(lb, classes=[-1,1,0])[:,1] for lb in (np.sign(pred).astype('int8').reshape((pred.shape[0],-1))).T]) if (len(pred.shape) < 2 or pred.shape[1] == 1 or np.where(pred<0)[0].shape[0]>0) else pred
is_mltl = True
try:
accuracy = metrics.accuracy_score(Y_test, pred)
except ValueError as e:
print(e)
Y_test, pred = Y_test.ravel(), pred.ravel()
accuracy = metrics.accuracy_score(Y_test, pred)
print('accuracy: %0.3f' % accuracy)
if (is_mltl and average == 'all'):
micro_precision = metrics.precision_score(Y_test, pred, average='micro')
print('micro-precision: %0.3f' % micro_precision)
micro_recall = metrics.recall_score(Y_test, pred, average='micro')
print('micro-recall: %0.3f' % micro_recall)
micro_fscore = metrics.fbeta_score(Y_test, pred, beta=1, average='micro')
print('micro-fscore: %0.3f' % micro_fscore)
macro_precision = metrics.precision_score(Y_test, pred, average='macro')
print('macro-precision: %0.3f' % macro_precision)
macro_recall = metrics.recall_score(Y_test, pred, average='macro')
print('macro-recall: %0.3f' % macro_recall)
macro_fscore = metrics.fbeta_score(Y_test, pred, beta=1, average='macro')
print('macro-fscore: %0.3f' % macro_fscore)
else:
precision = metrics.precision_score(Y_test, pred, average=average if is_mltl else 'binary')
print('precision: %0.3f' % precision)
recall = metrics.recall_score(Y_test, pred, average=average if is_mltl else 'binary')
print('recall: %0.3f' % recall)
fscore = metrics.fbeta_score(Y_test, pred, beta=1, average=average if is_mltl else 'binary')
print('fscore: %0.3f' % fscore)
print('classification report:')
# print metrics.classification_report(Y_test, pred)
metric_df = pd.DataFrame(metrics.classification_report(Y_test, pred, output_dict=True)).T[['precision', 'recall', 'f1-score', 'support']]
print(metric_df)
print('confusion matrix:')
if (is_mltl):
pass
else:
print(metrics.confusion_matrix(Y_test, pred))
print('+' * 80)
clf = pipeline.named_steps['clf'] if (type(pipeline) is Pipeline) else pipeline
if ((isinstance(clf, OneVsRestClassifier) and hasattr(clf.estimators_[0], 'predict_proba')) or (not isinstance(clf, OneVsRestClassifier) and hasattr(pipeline, 'predict_proba'))):
if (mltl):
scores = pipeline.predict_proba(X_test)
if (type(scores) == list):
scores = np.concatenate([score[:, -1].reshape((-1, 1)) for score in scores], axis=1)
else:
scores = pipeline.predict_proba(X_test)[:, -1]
elif (hasattr(pipeline, 'decision_function')):
scores = pipeline.decision_function(X_test)
else:
print('Neither probability estimate nor decision function is supported in the classification model! ROC and PRC figures will be invalid.')
scores = [0] * Y_test.shape[0]
if (signed and (len(scores.shape) < 2 or scores.shape[1] < pred.shape[1])):
scores = np.concatenate([np.abs(scores).reshape((scores.shape[0],-1))] + [label_binarize(lb, classes=[-1,1,0])[:,:2] for lb in (np.sign(scores).astype('int8').reshape((scores.shape[0],-1))).T], axis=1)
if (is_mltl):
if ((len(Y_test.shape) == 1 or Y_test.shape[1] == 1) and len(np.unique(Y_test)) > 2):
lbz = LabelBinarizer()
Y_test = lbz.fit_transform(Y_test)
def micro():
# Micro-average ROC curve
y_true = np.array(Y_test)
s_array = np.array(scores)
if (len(s_array.shape) == 3):
s_array = s_array[:,:,1].reshape((s_array.shape[0],s_array.shape[1],))
if (y_true.shape[0] == s_array.shape[1] and y_true.shape[1] == s_array.shape[0]):
s_array = s_array.T
return metrics.roc_curve(y_true.ravel(), s_array.ravel())
def macro():
# Macro-average ROC curve
n_classes = Y_test.shape[1]
fpr, tpr = [dict() for i in range(2)]
for i in range(n_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(Y_test[:, i], scores[:, i])
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
return all_fpr, mean_tpr, _
if (average == 'micro'):
roc = micro()
elif (average == 'macro'):
roc = macro()
elif (average == 'all'):
micro_roc = micro()
macro_roc = macro()
if (type(scores) == list):
scores = np.array(scores)[:,:,0]
prc = metrics.precision_recall_curve(Y_test.ravel(), scores.ravel()) # Only micro-prc is supported
else:
roc = metrics.roc_curve(Y_test, scores)
prc = metrics.precision_recall_curve(Y_test, scores)
# print 'ROC:\n%s\n%s' % (roc[0], roc[1])
# print 'PRC:\n%s\n%s' % (prc[0], prc[1])
print('Training and Testing X shape: %s; %s' % (', '.join(['(%s)' % ','.join([str(x) for x in X.shape]) for X in X_train]) if type(X_train) is list else '(%s)' % ','.join([str(x) for x in X_train.shape]), ', '.join(['(%s)' % ','.join([str(x) for x in X.shape]) for X in X_test]) if type(X_test) is list else '(%s)' % ','.join([str(x) for x in X_test.shape])))
feat_w_dict, sub_feat_w = [{} for i in range(2)]
filt_feat_idx = feature_idx = np.arange(X_train[0].shape[1] if type(X_train) is list else X_train.shape[1])
for component in ('featfilt', 'clf'):
if (type(pipeline) != Pipeline):
if (component == 'featfilt'):
continue
else:
cmpn = pipeline
elif (component in pipeline.named_steps):
cmpn = pipeline.named_steps[component]
else:
continue
if (hasattr(cmpn, 'estimators_')):
for i, estm in enumerate(cmpn.estimators_):
filt_subfeat_idx = filt_feat_idx[:]
if (hasattr(estm, 'get_support')):
filt_subfeat_idx = filt_feat_idx[estm.get_support()]
for measure in ('feature_importances_', 'coef_', 'scores_'):
if (hasattr(estm, measure)):
filt_subfeat_w = getattr(estm, measure)
subfeat_w = (filt_subfeat_w.min() - 1) * np.ones_like(feature_idx)
# subfeat_w[filt_subfeat_idx][:len(estm.feature_importances_)] = normalize(estm.feature_importances_, norm='l1')
subfeat_w[filt_subfeat_idx][:len(filt_subfeat_w)] = filt_subfeat_w
# print 'Sub FI shape: (%s)' % ','.join([str(x) for x in filt_subfeat_w.shape])
# print 'Feature Importance inside %s Ensemble Method: %s' % (component, filt_subfeat_w)
sub_feat_w[(component, i)] = subfeat_w
for measure in ('feature_importances_', 'coef_', 'scores_'):
if (hasattr(cmpn, measure)):
filt_feat_w = getattr(cmpn, measure)
# print '*' * 80 + '\n%s\n'%filt_feat_w + '*' * 80
feat_w = (filt_feat_w.min() - 1) * np.ones_like(feature_idx)
# feat_w[filt_feat_idx][:filt_feat_w.shape[1] if len(filt_feat_w.shape) > 1 else len(filt_feat_w)] = normalize(filt_feat_w[1,:] if len(filt_feat_w.shape) > 1 else filt_feat_w, norm='l1')
feat_w[filt_feat_idx][:filt_feat_w.shape[1] if len(filt_feat_w.shape) > 1 else len(filt_feat_w)] = filt_feat_w[1,:] if len(filt_feat_w.shape) > 1 else filt_feat_w
# print '*' * 80 + '\n%s\n'%feat_w + '*' * 80
feat_w_dict[(component, measure)] = feat_w
print('FI shape: (%s)' % ','.join([str(x) for x in feat_w_dict[(component, measure)].shape]))
print('Sample 10 Feature from %s.%s: %s' % (component, measure, feat_w[feat_w > 0][:10]))
# print 'Feature Importance from %s.%s: %s' % (component, measure, feat_w)
if (hasattr(cmpn, 'get_support')):
filt_feat_idx = filt_feat_idx[cmpn.get_support()]
print('\n')
if (is_mltl and average == 'all'):
return {'accuracy':accuracy, 'micro-precision':micro_precision, 'micro-recall':micro_recall, 'micro-fscore':micro_fscore, 'macro-precision':macro_precision, 'macro-recall':macro_recall, 'macro-fscore':macro_fscore, 'train_time':train_time, 'test_time':test_time, 'micro-roc':micro_roc, 'macro-roc':macro_roc, 'prc':prc, 'feat_w':feat_w_dict, 'sub_feat_w':sub_feat_w, 'pred_lb':orig_pred, 'metrics':metric_df}
else:
return {'accuracy':accuracy, 'precision':precision, 'recall':recall, 'fscore':fscore, 'train_time':train_time, 'test_time':test_time, 'roc':roc, 'prc':prc, 'feat_w':feat_w_dict, 'sub_feat_w':sub_feat_w, 'pred_lb':orig_pred, 'pred_prob':orig_prob, 'metrics':metric_df}
# Calculate the venn digram overlaps
def pred_ovl(preds, pred_true=None, axis=1):
if (axis == 0):
preds = preds.T
if (pred_true is not None):
pred_true = pred_true.reshape((-1,))
# Row represents feature, column represents instance
var_num, dim = preds.shape[0], preds.shape[1]
orig_idx = np.arange(var_num)
if (len(preds.shape) < 2 or preds.shape[1] == 1):
if (pred_true is None):
return np.ones(shape=(1,), dtype='int')
else:
overlap_mt = np.ones(shape=(1,2), dtype='int')
overlap_mt[0,1] = orig_idx[preds.reshape((-1,)) == pred_true].shape[0]
return overlap_mt
# Calculate possible subsets of all the instance indices
subset_idx = list(imath.subset(list(range(dim)), min_crdnl=1))
# Initialize result matrix
if (pred_true is None):
overlap_mt = np.zeros(shape=(len(subset_idx),), dtype='int')
else:
overlap_mt = np.zeros(shape=(len(subset_idx), 2), dtype='int')
# Calculate overlap for each subset
for i, idx in enumerate(subset_idx):
rmn_idx = set(range(dim)) - set(idx)
# Select the positions of the target instance that without any overlap with other instances
pred_sum, chsn_sum, rmn_sum = preds.sum(axis=1), preds[:,idx].sum(axis=1), preds[:,list(rmn_idx)].sum(axis=1)
condition = np.all([np.logical_or(chsn_sum == 0, chsn_sum == len(idx)), np.logical_or(rmn_sum == 0, rmn_sum == len(rmn_idx)), np.logical_or(pred_sum == len(idx), pred_sum == len(rmn_idx))], axis=0)
if (pred_true is None):
overlap_mt[i] = orig_idx[condition].shape[0]
else:
# And the selected positions should be true
true_cond = np.logical_and(condition, preds[:,idx[0]] == pred_true)
overlap_mt[i,0] = orig_idx[condition].shape[0]
overlap_mt[i,1] = orig_idx[true_cond].shape[0]
return overlap_mt
def save_featw(features, crsval_featw, crsval_subfeatw, cfg_param={}, lbid=''):
lbidstr = ('_' + (str(lbid) if lbid != -1 else 'all')) if lbid is not None and lbid != '' else ''
for k, v in crsval_featw.items():
measure_str = k.replace(' ', '_').strip('_').lower()
feat_w_mt = np.column_stack(v)
mms = MinMaxScaler()
feat_w_mt = mms.fit_transform(feat_w_mt)
feat_w_avg = feat_w_mt.mean(axis=1)
feat_w_std = feat_w_mt.std(axis=1)
sorted_idx = np.argsort(feat_w_avg, axis=-1)[::-1]
# sorted_idx = sorted(range(feat_w_avg.shape[0]), key=lambda k: feat_w_avg[k])[::-1]
sorted_feat_w = np.column_stack((features[sorted_idx], feat_w_avg[sorted_idx], feat_w_std[sorted_idx]))
feat_w_df = pd.DataFrame(sorted_feat_w, index=sorted_idx, columns=['Feature Name', 'Importance Mean', 'Importance Std'])
if (cfg_param.setdefault('save_featw', False)):
feat_w_df.to_excel('featw%s_%s.xlsx' % (lbidstr, measure_str))
if (cfg_param.setdefault('save_featw_npz', False)):
io.write_df(feat_w_df, 'featw%s_%s' % (lbidstr, measure_str), with_idx=True)
if (cfg_param.setdefault('plot_featw', False)):
plot.plot_bar(feat_w_avg[sorted_idx[:10]].reshape((1,-1)), feat_w_std[sorted_idx[:10]].reshape((1,-1)), features[sorted_idx[:10]], labels=None, title='Feature importances', fname='fig_featw%s_%s' % (lbidstr, measure_str), plot_cfg=common_cfg)
for k, v in crsval_subfeatw.items():
measure_str = k.replace(' ', '_').strip('_').lower()
subfeat_w_mt = np.column_stack(v)
mms = MinMaxScaler()
subfeat_w_mt = mms.fit_transform(subfeat_w_mt)
subfeat_w_avg = subfeat_w_mt.mean(axis=1)
subfeat_w_std = subfeat_w_mt.std(axis=1)
sorted_idx = np.argsort(subfeat_w_avg, axis=-1)[::-1]
sorted_subfeat_w = np.column_stack((features[sorted_idx], subfeat_w_avg[sorted_idx], subfeat_w_std[sorted_idx]))
subfeat_w_df = pd.DataFrame(sorted_subfeat_w, index=sorted_idx, columns=['Feature Name', 'Importance Mean', 'Importance Std'])
if (cfg_param.setdefault('save_subfeatw', False)):
subfeat_w_df.to_excel('subfeatw%s_%s.xlsx' % (lbidstr, measure_str))
if (cfg_param.setdefault('save_subfeatw_npz', False)):
io.write_df(subfeat_w_df, 'subfeatw%s_%s' % (lbidstr, measure_str), with_idx=True)
if (cfg_param.setdefault('plot_subfeatw', False)):
plot.plot_bar(subfeat_w_avg[sorted_idx[:10]].reshape((1,-1)), subfeat_w_std[sorted_idx[:10]].reshape((1,-1)), features[sorted_idx[:10]], labels=None, title='Feature importances', fname='fig_subfeatw_%s' % measure_str, plot_cfg=common_cfg)
# Classification
def classification(X_train, Y_train, X_test, model_iter, model_param={}, cfg_param={}, global_param={}, lbid=''):
print('Classifing...')
global common_cfg
FILT_NAMES, CLF_NAMES, PL_NAMES, PL_SET = model_param['glb_filtnames'], model_param['glb_clfnames'], global_param['pl_names'], global_param['pl_set']
lbidstr = ('_' + (str(lbid) if lbid != -1 else 'all')) if lbid is not None and lbid != '' else ''
to_hdf, hdf5_fpath = cfg_param.setdefault('to_hdf', False), '%s' % 'crsval_dataset.h5' if cfg_param.setdefault('hdf5_fpath', 'crsval_dataset.h5') is None else cfg_param['hdf5_fpath']
# Format the data
if (type(X_train) == list):
assert all([len(x) == len(X_train[0]) for x in X_train[1:]])
X_train = [pd.DataFrame(x) if (type(x) != pd.io.parsers.TextFileReader and type(x) != pd.DataFrame) else x for x in X_train]
X_train = [pd.concat(x) if (type(x) == pd.io.parsers.TextFileReader and not to_hdf) else x for x in X_train]
else:
if (type(X_train) != pd.io.parsers.TextFileReader and type(X_train) != pd.DataFrame):
X_train = pd.DataFrame(X_train)
X_train = pd.concat(X_train) if (type(X_train) == pd.io.parsers.TextFileReader and not to_hdf) else X_train
if (type(X_test) == list):
assert all([len(x) == len(X_test[0]) for x in X_test[1:]])
X_test = [pd.DataFrame(x) if (type(x) != pd.io.parsers.TextFileReader and type(x) != pd.DataFrame) else x for x in X_test]
X_test = [pd.concat(x) if (type(x) == pd.io.parsers.TextFileReader and not to_hdf) else x for x in X_test]
else:
if (type(X_test) != pd.io.parsers.TextFileReader and type(X_test) != pd.DataFrame):
X_test = pd.DataFrame(X_test)
X_test = pd.concat(X_test) if (type(X_test) == pd.io.parsers.TextFileReader and not to_hdf) else X_test
if (type(Y_train) != pd.io.parsers.TextFileReader and type(Y_train) != pd.DataFrame):
Y_train = pd.DataFrame(Y_train)
Y_train_mt = Y_train.values.reshape((Y_train.shape[0],)) if (len(Y_train.shape) == 1 or Y_train.shape[1] == 1) else Y_train.values
mltl=True if len(Y_train_mt.shape) > 1 and Y_train_mt.shape[1] > 1 or 2 in Y_train_mt else False
print('Classification is starting...')
preds, probs, scores = [[] for i in range(3)]
crsval_featw, crsval_subfeatw = [{} for i in range(2)]
for vars in model_iter(**model_param):
if (global_param['comb']):
mdl_name, mdl = [vars[x] for x in range(2)]
else:
filt_name, filter, clf_name, clf= [vars[x] for x in range(4)]
print('#' * 80)
# Assemble a pipeline
if ('filter' in locals() and filter != None):
model_name = '%s [Ft Filt] & %s [CLF]' % (filt_name, clf_name)
pipeline = Pipeline([('featfilt', clone(filter)), ('clf', clf)])
elif ('clf' in locals() and clf != None):
model_name = '%s [CLF]' % clf_name
pipeline = Pipeline([('clf', clf)])
else:
model_name = mdl_name
pipeline = mdl if (type(mdl) is Pipeline) else Pipeline([('clf', mdl)])
if (model_name in PL_SET): continue
PL_NAMES.append(model_name)
PL_SET.add(model_name)
print(model_name)
# Build the model
print('+' * 80)
print('Training Model: ')
print(pipeline)
t0 = time()
pipeline.fit(X_train, Y_train_mt)
train_time = time() - t0
print('train time: %0.3fs' % train_time)
t0 = time()
pred = pipeline.predict(X_test)
prob = pipeline.predict_proba(X_test)
test_time = time() - t0
print('+' * 80)
print('Testing: ')
print('test time: %0.3fs' % test_time)
preds.append(pred)
probs.append(prob)
scores.append(get_score(pipeline, X_test, mltl))
# Save predictions and model
if (cfg_param.setdefault('save_pred', True)):
io.write_npz(dict(pred_lb=pred, pred_prob=prob), 'clf_pred_%s%s' % (model_name.replace(' ', '_').lower(), lbidstr))
if (cfg_param.setdefault('save_model', True)):
mdl_name = '%s' % model_name.replace(' ', '_').lower()
if (all([hasattr(pipeline.steps[i][1], 'save') for i in range(len(pipeline.steps))])):
for sub_mdl_name, mdl in pipeline.steps:
mdl.save('%s_%s%s' % (mdl_name, sub_mdl_name.replace(' ', '_').lower(), lbidstr), **global_param.setdefault('mdl_save_kwargs', {}))
else:
io.write_obj(pipeline, '%s%s' % (mdl_name, lbidstr))
# Feature importances
feat_w, sub_feat_w = get_featw(pipeline, X_train[0].shape[1] if (type(X_train) is list) else X_train.shape[1])
for k, v in feat_w.items():
key = '%s_%s_%s' % (model_name, k[0], k[1])
crsval_featw.setdefault(key, []).append(v)
for k, v in sub_feat_w.items():
key = '%s_%s_%s' % (model_name, k[0], k[1])
crsval_subfeatw.setdefault(key, []).append(v)
print('\n')
if (len(preds) > 1):
# Prediction overlap
preds_mt = np.column_stack([x.ravel() for x in preds])
povl = np.array(pred_ovl(preds_mt))
# Spearman's rank correlation
spmnr, spmnr_pval = stats.spearmanr(preds_mt)
# Kendall rank correlation
# kendalltau = stats.kendalltau(preds_mt)[0]
# Pearson correlation
# pearson = tats.pearsonr(preds_mt)[0]
## Save performance data
povl_idx = [' & '.join(x) for x in imath.subset(PL_NAMES, min_crdnl=1)]
povl_df = pd.DataFrame(povl, index=povl_idx, columns=['pred_ovl'])
spmnr_df = pd.DataFrame(spmnr, index=PL_NAMES, columns=PL_NAMES)
spmnr_pval_df = pd.DataFrame(spmnr_pval, index=PL_NAMES, columns=PL_NAMES)
if (cfg_param.setdefault('save_povl', False)):
povl_df.to_excel('cpovl_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_povl_npz', False)):
io.write_df(povl_df, 'povl_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_spmnr', False)):
spmnr_df.to_excel('spmnr_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_spmnr_npz', False)):
io.write_df(spmnr_df, 'spmnr_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_spmnr_pval', False)):
spmnr_pval_df.to_excel('spmnr_pval_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_spmnr_pval_npz', False)):
io.write_df(spmnr_pval_df, 'spmnr_pval_clf%s.npz' % lbidstr, with_idx=True)
save_featw(X_train[0].columns.values if (type(X_train) is list) else X_train.columns.values, crsval_featw, crsval_subfeatw, cfg_param=cfg_param, lbid=lbid)
return preds, scores
def kf2data(kf, X, Y, to_hdf=False, hdf5_fpath='crsval_dataset.h5'):
if (to_hdf):
import h5py
from keras.utils.io_utils import HDF5Matrix
hdf5_fpath = hdf5_fpath if hdf5_fpath else os.path.abspath('crsval_dataset.h5')
for i, (train_idx, test_idx) in enumerate(kf):
if (type(X)==list):
if (type(X[0]) == pd.io.parsers.TextFileReader):
pass
assert all([len(x) == len(X[0]) for x in X[1:]])
X_train, X_test = [x[train_idx,:] for x in X] if to_hdf and type(X[0]) == HDF5Matrix or type(X[0]) != pd.DataFrame else [x.iloc[train_idx,:] for x in X], [x[test_idx,:] for x in X] if to_hdf and type(X[0]) == HDF5Matrix or type(X[0]) != pd.DataFrame else [x.iloc[test_idx,:] for x in X]
train_idx_df, test_idx_df = pd.DataFrame(np.arange(X_train[0].shape[0]), index=X[0].index[train_idx]), pd.DataFrame(np.arange(X_test[0].shape[0]), index=X[0].index[test_idx])
else:
if (type(X) == pd.io.parsers.TextFileReader):
pass
X_train, X_test = X[train_idx] if to_hdf and type(X) == HDF5Matrix or type(X) != pd.DataFrame else X.iloc[train_idx,:], X[test_idx] if to_hdf and type(X) == HDF5Matrix or type(X) != pd.DataFrame else X.iloc[test_idx,:]
train_idx_df, test_idx_df = pd.DataFrame(np.arange(X_train.shape[0]), index=None if to_hdf and type(X) == HDF5Matrix or type(X) != pd.DataFrame else X.index[train_idx]), pd.DataFrame(np.arange(X_test.shape[0]), index=None if to_hdf and type(X) == HDF5Matrix or type(X) != pd.DataFrame else X.index[test_idx])
Y_train, Y_test = Y[train_idx], Y[test_idx]
# Y_train = Y_train.reshape((Y_train.shape[0],)) if (len(Y_train.shape) > 1 and Y_train.shape[1] == 1) else Y_train
# Y_test = Y_test.reshape((Y_test.shape[0],)) if (len(Y_test.shape) > 1 and Y_test.shape[1] == 1) else Y_test
if (to_hdf):
with h5py.File(hdf5_fpath, 'w') as hf:
if (type(X_train) == list):
for idx, x_train in enumerate(X_train):
hf.create_dataset('X_train%i' % idx, data=x_train.values if type(X) != HDF5Matrix else x_train[:])
else:
hf.create_dataset('X_train', data=X_train.values if type(X) != HDF5Matrix else X_train[:])
if (type(X_test) == list):
for idx, x_test in enumerate(X_test):
hf.create_dataset('X_test%i' % idx, data=x_test.values if type(X) != HDF5Matrix else x_test[:])
else:
hf.create_dataset('X_test', data=X_test.values if type(X) != HDF5Matrix else X_test[:])
hf.create_dataset('Y_train', data=Y_train if type(Y) != HDF5Matrix else Y_train[:])
hf.create_dataset('Y_test', data=Y_test if type(Y) != HDF5Matrix else Y_test[:])
yield i, [HDF5Matrix(hdf5_fpath, 'X_train%i' % idx) for idx in range(len(X_train))] if (type(X_train) == list) else HDF5Matrix(hdf5_fpath, 'X_train'), [HDF5Matrix(hdf5_fpath, 'X_test%i' % idx) for idx in range(len(X_test))] if (type(X_test) == list) else HDF5Matrix(hdf5_fpath, 'X_test'), HDF5Matrix(hdf5_fpath, 'Y_train'), HDF5Matrix(hdf5_fpath, 'Y_test'), train_idx_df, test_idx_df
# The implementation of HDF5Matrix is not good since it keep all the hdf5 file opened, so we need to manually close them.
remove_hfps = []
for hfpath, hf in HDF5Matrix.refs.items():
if (hfpath.startswith(hdf5_fpath)):
hf.close()
remove_hfps.append(hfpath)
for hfpath in remove_hfps:
HDF5Matrix.refs.pop(hfpath, None)
else:
yield i, [x.values for x in X_train] if (type(X_train) == list) else X_train.values, [x.values for x in X_test] if (type(X_test) == list) else X_test.values, Y_train, Y_test, train_idx_df, test_idx_df
# Evaluation
def evaluate(X_train, Y_train, X_test, Y_test, model_iter, model_param={}, avg='micro', kfold=5, cfg_param={}, global_param={}, lbid=''):
print('Evaluating...')
from keras.utils.io_utils import HDF5Matrix
global common_cfg
FILT_NAMES, CLF_NAMES, PL_NAMES, PL_SET = model_param['glb_filtnames'], model_param['glb_clfnames'], global_param['pl_names'], global_param['pl_set']
lbidstr = ('_' + (str(lbid) if lbid != -1 else 'all')) if lbid is not None and lbid != '' else ''
# Format the data
if (type(X_train) == list):
assert all([len(x) == len(X_train[0]) for x in X_train[1:]])
X_train = [pd.DataFrame(x) if (type(x) != pd.io.parsers.TextFileReader and type(x) != pd.DataFrame) else x for x in X_train]
X_train = [pd.concat(x) if (type(x) == pd.io.parsers.TextFileReader and not to_hdf) else x for x in X_train]
else:
if (type(X_train) != pd.io.parsers.TextFileReader and type(X_train) != pd.DataFrame):
X_train = pd.DataFrame(X_train) if type(X_train) != HDF5Matrix else X_train
X_train = pd.concat(X_train) if (type(X_train) == pd.io.parsers.TextFileReader and not to_hdf) else X_train
if (type(Y_train) != pd.io.parsers.TextFileReader and type(Y_train) != pd.DataFrame):
Y_train = pd.DataFrame(Y_train) if (type(Y_train) == pd.io.parsers.TextFileReader and not to_hdf) else Y_train
if (type(Y_train) != HDF5Matrix):
Y_train = Y_train.values.reshape((Y_train.shape[0],)) if (len(Y_train.shape) == 1 or Y_train.shape[1] == 1) else Y_train.values
else:
Y_train = Y_train
if (type(X_test) == list):
assert all([len(x) == len(X_test[0]) for x in X_test[1:]])
X_test = [pd.DataFrame(x) if (type(x) != pd.io.parsers.TextFileReader and type(x) != pd.DataFrame) else x for x in X_test]
X_test = [pd.concat(x) if (type(x) == pd.io.parsers.TextFileReader and not to_hdf) else x for x in X_test]
else:
if (type(X_test) != pd.io.parsers.TextFileReader and type(X_test) != pd.DataFrame):
X_test = pd.DataFrame(X_test) if type(X_test) != HDF5Matrix else X_test
X_test = pd.concat(X_test) if (type(X_test) == pd.io.parsers.TextFileReader and not to_hdf) else X_test
if (type(Y_test) != pd.io.parsers.TextFileReader and type(Y_test) != pd.DataFrame):
Y_test = pd.DataFrame(Y_test) if (type(Y_test) == pd.io.parsers.TextFileReader and not to_hdf) else Y_test
if (type(Y_test) != HDF5Matrix):
Y_test = Y_test.values.reshape((Y_test.shape[0],)) if (len(Y_test.shape) == 1 or Y_test.shape[1] == 1) else Y_test.values
else:
Y_test = Y_test
is_mltl = True if len(Y_train.shape) > 1 and Y_train.shape[1] > 1 or 2 in Y_train else False
print('Benchmark is starting...')
mean_fpr = np.linspace(0, 1, 100)
mean_recall = np.linspace(0, 1, 100)
xdf = X_train[0] if type(X_train)==list else X_train
roc_dict, prc_dict, featw_data, subfeatw_data = [{} for i in range(4)]
## Copy from cross_validate function Start ##
del PL_NAMES[:]
PL_SET.clear()
if (cfg_param.setdefault('npg_ratio', None) is not None):
npg_ratio = cfg_param['npg_ratio']
Y_train = np.array(Y_train) # HDF5Matrix is not working in matrix slicing and boolean operation
y = Y_train[:,0] if (len(Y_train.shape) > 1) else Y_train
if (1.0 * np.abs(y).sum() / Y_train.shape[0] < 1.0 / (npg_ratio + 1)):
all_true = np.arange(Y_train.shape[0])[y > 0].tolist()
all_false = np.arange(Y_train.shape[0])[y <= 0].tolist()
true_id = np.random.choice(len(all_true), size=int(1.0 / npg_ratio * len(all_false)), replace=True)
true_idx = [all_true[i] for i in true_id]
all_train_idx = sorted(set(true_idx + all_false))
X_train = [x.iloc[all_train_idx] if type(x) != HDF5Matrix else x[all_train_idx] for x in X_train] if (type(X_train) is list) else X_train.iloc[all_train_idx] if type(x) != HDF5Matrix else X_train[all_train_idx]
Y_train = Y_train[all_train_idx,:] if (len(Y_train.shape) > 1) else Y_train[all_train_idx]
results, preds = [[] for x in range(2)]
# Y_test = np.column_stack([np.abs(Y_test).reshape((Y_test.shape[0],-1))] + [label_binarize(lb, classes=[-1,1,0])[:,1] for lb in (np.sign(Y_test).astype('int8').reshape((Y_test.shape[0],-1))).T]) if (len(Y_test.shape) < 2 or Y_test.shape[1] == 1 or np.where(Y_test<0)[0].shape[0]>0) else Y_test
for vars in model_iter(**model_param):
if (global_param['comb']):
mdl_name, mdl = [vars[x] for x in range(2)]
else:
filt_name, filter, clf_name, clf= [vars[x] for x in range(4)]
print('#' * 80)
# Assemble a pipeline
if ('filter' in locals() and filter != None):
model_name = '%s [Ft Filt] & %s [CLF]' % (filt_name, clf_name)
pipeline = Pipeline([('featfilt', clone(filter)), ('clf', clf)])
elif ('clf' in locals() and clf != None):
model_name = '%s [CLF]' % clf_name
pipeline = Pipeline([('clf', clf)])
else:
model_name = mdl_name
pipeline = mdl
if (model_name in PL_SET): continue
PL_NAMES.append(model_name)
PL_SET.add(model_name)
print(model_name)
# Benchmark results
bm_results = benchmark(pipeline, X_train, Y_train, X_test, Y_test, mltl=is_mltl, signed=global_param.setdefault('signed', True if np.where(Y_train<0)[0].shape[0]>0 else False), average=avg)
# Clear the model environment (e.g. GPU resources)
del pipeline
# if (type(pipeline) is Pipeline):
# for cmpn in pipeline.named_steps.values():
# if (getattr(cmpn, "clear", None)): cmpn.clear()
# else:
# if (getattr(pipeline, "clear", None)):
# pipeline.clear()
# Obtain the results
if (is_mltl and avg == 'all'):
results.append([bm_results[x] for x in ['accuracy', 'micro-precision', 'micro-recall', 'micro-fscore', 'macro-precision', 'macro-recall', 'macro-fscore', 'train_time', 'test_time']])
else:
results.append([bm_results[x] for x in ['accuracy', 'precision', 'recall', 'fscore', 'train_time', 'test_time']])
preds.append(bm_results['pred_lb'])
if (cfg_param.setdefault('save_pred', False)):
io.write_npz(dict(pred_lb=bm_results['pred_lb'], pred_prob=bm_results['pred_prob'], true_lb=Y_test), 'pred_%s%s' % (model_name.replace(' ', '_').lower(), lbidstr))
if (is_mltl and avg == 'all'):
micro_id, macro_id = '-'.join([model_name,'micro']), '-'.join([model_name,'macro'])
roc_dict[micro_id] = roc_dict.setdefault(micro_id, 0) + np.interp(mean_fpr, bm_results['micro-roc'][0], bm_results['micro-roc'][1])
roc_dict[macro_id] = roc_dict.setdefault(macro_id, 0) + np.interp(mean_fpr, bm_results['macro-roc'][0], bm_results['macro-roc'][1])
else:
roc_dict[model_name] = roc_dict.setdefault(model_name, 0) + np.interp(mean_fpr, bm_results['roc'][0], bm_results['roc'][1])
prc_dict[model_name] = prc_dict.setdefault(model_name, 0) + np.interp(mean_recall, bm_results['prc'][0], bm_results['prc'][1])
for k, v in bm_results['feat_w'].items():
key = '%s_%s_%s' % (model_name, k[0], k[1])
featw_data[key] = v
for k, v in bm_results['sub_feat_w'].items():
key = '%s_%s_%s' % (model_name, k[0], k[1])
subfeatw_data[key] = v
print('\n')
# Prediction overlap
if (True if len(Y_train.shape) > 1 and Y_train.shape[1] > 1 else False):
preds_mt = np.column_stack([x.ravel() for x in preds])
else:
preds_mt = np.column_stack(preds)
preds.append(Y_test)
tpreds_mt = np.column_stack([x.ravel() for x in preds])
## Copy from cross_validate function End ##
povl = pred_ovl(preds_mt, Y_test)
# Spearman's rank correlation
spearman = stats.spearmanr(tpreds_mt)
# Kendall rank correlation
# kendalltau = stats.kendalltau(preds_mt)
# Pearson correlation
# pearson = stats.pearsonr(preds_mt)
## Save performance data
if (is_mltl and avg == 'all'):
metric_idx = ['Accuracy', 'Micro Precision', 'Micro Recall', 'Micro F score', 'Macro Precision', 'Macro Recall', 'Macro F score', 'Train time', 'Test time']
else:
metric_idx = ['Accuracy', 'Precision', 'Recall', 'F score', 'Train time', 'Test time']
perf_df = pd.DataFrame(np.array(results).T, index=metric_idx, columns=PL_NAMES)
povl_idx = [' & '.join(x) for x in imath.subset(PL_NAMES, min_crdnl=1)]
povl_df = pd.DataFrame(np.array(povl), index=povl_idx, columns=['pred_ovl', 'tpred_ovl'])
spmnr_val_df = pd.DataFrame(spearman[0], index=PL_NAMES+['Annotations'], columns=PL_NAMES+['Annotations'])
spmnr_pval_df = pd.DataFrame(spearman[1], index=PL_NAMES+['Annotations'], columns=PL_NAMES+['Annotations'])
if (cfg_param.setdefault('save_tpred', True)):
io.write_npz(tpreds_mt, 'tpred_clf%s' % lbidstr)
if (cfg_param.setdefault('save_perf', True)):
perf_df.to_excel('perf_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_perf_npz', False)):
io.write_df(perf_df, 'perf_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_povl', False)):
povl_df.to_excel('povl_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_povl_npz', False)):
io.write_df(povl_df, 'povl_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_spmnr', False)):
spmnr_val_df.to_excel('spmnr_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_spmnr_npz', False)):
io.write_df(spmnr_val_df, 'spmnr_clf%s.npz' % lbidstr, with_idx=True)
if (cfg_param.setdefault('save_spmnr_pval', False)):
spmnr_pval_df.to_excel('spmnr_pval_clf%s.xlsx' % lbidstr)
if (cfg_param.setdefault('save_spmnr_pval_npz', False)):
io.write_df(spmnr_pval_df, 'spmnr_pval_clf%s.npz' % lbidstr, with_idx=True)
# Feature importances
try:
save_featw(xdf.columns.values if type(xdf) != HDF5Matrix else np.arange(xdf.shape[1]), featw_data, subfeatw_data, cfg_param=cfg_param, lbid=lbid)
except Exception as e:
print(e)
## Plot figures
if (is_mltl and avg == 'all'):
micro_roc_data, micro_roc_labels, micro_roc_aucs, macro_roc_data, macro_roc_labels, macro_roc_aucs = [[] for i in range(6)]
else:
roc_data, roc_labels, roc_aucs = [[] for i in range(3)]
prc_data, prc_labels, prc_aucs = [[] for i in range(3)]
for pl in PL_NAMES:
if (is_mltl and avg == 'all'):
micro_id, macro_id = '-'.join([pl,'micro']), '-'.join([pl,'macro'])
micro_mean_tpr, macro_mean_tpr = roc_dict[micro_id], roc_dict[macro_id]
micro_roc_auc = metrics.auc(mean_fpr, micro_mean_tpr)
macro_roc_auc = metrics.auc(mean_fpr, macro_mean_tpr)
micro_roc_data.append([mean_fpr, micro_mean_tpr])
micro_roc_aucs.append(micro_roc_auc)
micro_roc_labels.append('%s (AUC=%0.2f)' % (pl, micro_roc_auc))
macro_roc_data.append([mean_fpr, macro_mean_tpr])
macro_roc_aucs.append(macro_roc_auc)
macro_roc_labels.append('%s (AUC=%0.2f)' % (pl, macro_roc_auc))
else:
mean_tpr = roc_dict[pl]
mean_roc_auc = metrics.auc(mean_fpr, mean_tpr)
roc_data.append([mean_fpr, mean_tpr])
roc_aucs.append(mean_roc_auc)
roc_labels.append('%s (AUC=%0.2f)' % (pl, mean_roc_auc))
mean_prcn = prc_dict[pl]
mean_prc_auc = metrics.auc(mean_recall, mean_prcn)
prc_data.append([mean_recall, mean_prcn])
prc_aucs.append(mean_prc_auc)
prc_labels.append('%s (AUC=%0.2f)' % (pl, mean_prc_auc))
group_dict = {}
for i, pl in enumerate(PL_NAMES):
group_dict.setdefault(tuple(set(difflib.get_close_matches(pl, PL_NAMES))), []).append(i)
if (not cfg_param.setdefault('group_by_name', False) or len(group_dict) == len(PL_NAMES)):
groups = None
else:
group_array = np.array(group_dict.values())
group_array.sort()
groups = group_array.tolist()
if (is_mltl and avg == 'all'):
aucs_df = pd.DataFrame([micro_roc_aucs, macro_roc_aucs, prc_aucs], index=['Micro ROC AUC', 'Macro ROC AUC', 'PRC AUC'], columns=PL_NAMES)
if (cfg_param.setdefault('plot_roc', True)):
plot.plot_roc(micro_roc_data, micro_roc_labels, groups=groups, fname='micro_roc%s'%lbidstr, plot_cfg=common_cfg)
plot.plot_roc(macro_roc_data, macro_roc_labels, groups=groups, fname='macro_roc%s'%lbidstr, plot_cfg=common_cfg)
else:
aucs_df = pd.DataFrame([roc_aucs, prc_aucs], index=['ROC AUC', 'PRC AUC'], columns=PL_NAMES)
if (cfg_param.setdefault('plot_roc', True)):
plot.plot_roc(roc_data, roc_labels, groups=groups, fname='roc%s'%lbidstr, plot_cfg=common_cfg)
if (cfg_param.setdefault('plot_prc', True)):
plot.plot_prc(prc_data, prc_labels, groups=groups, fname='prc%s'%lbidstr, plot_cfg=common_cfg)
if (cfg_param.setdefault('save_auc', False)):
aucs_df.to_excel('auc%s.xlsx' % lbidstr)
filt_num, clf_num = len(FILT_NAMES), len(CLF_NAMES)
if (cfg_param.setdefault('plot_metric', False)):
for mtrc in metric_idx:
mtrc_avg_list, mtrc_std_list = [[] for i in range(2)]
if (global_param['comb']):
mtrc_avg = perf_avg_df.ix[mtrc,:].values.reshape((1,-1))
mtrc_std = perf_std_df.ix[mtrc,:].values.reshape((1,-1))
plot.plot_bar(mtrc_avg, mtrc_std, xlabels=PL_NAMES, labels=None, title='%s by Classifier and Feature Selection' % mtrc, fname='%s_clf_ft%s' % (mtrc.replace(' ', '_').lower(), lbidstr), plot_cfg=common_cfg)
else:
for i in range(filt_num):
offset = i * clf_num
mtrc_avg_list.append(perf_avg_df.ix[mtrc,offset:offset+clf_num].values.reshape((1,-1)))
mtrc_std_list.append(perf_std_df.ix[mtrc,offset:offset+clf_num].values.reshape((1,-1)))
mtrc_avg = np.concatenate(mtrc_avg_list)
mtrc_std = | np.concatenate(mtrc_std_list) | numpy.concatenate |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import numpy as np
import scipy
def compute_metrics(x):
sx = np.sort(-x, axis=1)
d = np.diag(-x)
d = d[:, np.newaxis]
ind = sx - d
ind = np.where(ind == 0)
ind = ind[1]
metrics = {}
metrics['R1'] = float(np.sum(ind == 0)) / len(ind)
metrics['R5'] = float(np.sum(ind < 5)) / len(ind)
metrics['R10'] = float( | np.sum(ind < 10) | numpy.sum |
"""
Links back truth files to mocks.
The objective of this script is to extract
additional information from the mocks that
were used to build a 'targets' and 'truth' catalogs.
To be able to do this recovery one needs access to
three files:
* truth.fits: generated by select_mock_targets (desitarget)
* map_id_filename.txt: generated by select_mock_targets (desitarget)
* the original mock files used to generate truth.fits.
This scripts operates by:
* reading the 'MOCKID' colum in the truth file, to decode the
fileid and rowid in the original mock file.
* reading the file 'map_id_filename.txt' that stores the correspodence
between filenumber and mock filenames.
* for each fileid read the original mock and use the rowid to extract
the information we need.
In this example we work with 'MWS_MAIN' sources to extract the 'vX' variable
stored in the mocks, but not in the truth file.
"""
import numpy as np
import os
import argparse
import yaml
from desitarget.mock.io import decode_rownum_filenum
from astropy.table import Table
parser = argparse.ArgumentParser()
parser.add_argument('--config','-c',default='input.yaml')
parser.add_argument("--input_dir", "-I", help="Path to the truth.fits and target.fits files", type=str, default="./")
args = parser.parse_args()
with open(args.config,'r') as pfile:
params = yaml.load(pfile)
#defines the target and variable to recover from the mock
source_name = 'MWS_MAIN'
variable_to_recover = 'vX'
# load the map_id_filename
map_id_filename = np.loadtxt(os.path.join(args.input_dir,'map_id_filename.txt'),
dtype={'names': ('SOURCENAME', 'FILEID', 'FILENAME'),
'formats': ('S10', 'i4', 'S256')})
# load truth
truth_table = Table.read(os.path.join(args.input_dir, 'truth.fits'))
print('loaded {} truth items'.format(len(truth_table)))
# decode rowid and fileid for the targets of interest
ii = truth_table['SOURCETYPE']==source_name
rowid, fileid = decode_rownum_filenum(truth_table['MOCKID'][ii])
# get the fileids to be read
fileid_to_read = np.array(list(set(fileid)))
print('fileid to be read {}'.format(fileid_to_read))
# prepare the arrays to save the variable to match
n = | np.count_nonzero(ii) | numpy.count_nonzero |
# Copyright 2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the expval method of the :mod:`pennylane_lightning.LightningQubit` device.
"""
import pytest
import numpy as np
import pennylane as qml
from conftest import U, U2, A
np.random.seed(42)
THETA = np.linspace(0.11, 1, 3)
PHI = np.linspace(0.32, 1, 3)
VARPHI = np.linspace(0.02, 1, 3)
@pytest.mark.parametrize("theta, phi", list(zip(THETA, PHI)))
class TestExpval:
"""Test expectation values"""
def test_identity_expectation(self, theta, phi, qubit_device_3_wires, tol):
"""Test that identity expectation value (i.e. the trace) is 1"""
dev = qubit_device_3_wires
O1 = qml.Identity(wires=[0])
O2 = qml.Identity(wires=[1])
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1])],
rotations=[*O1.diagonalizing_gates(), *O2.diagonalizing_gates()],
)
res = np.array([dev.expval(O1), dev.expval(O2)])
assert np.allclose(res, np.array([1, 1]), tol)
def test_pauliz_expectation(self, theta, phi, qubit_device_3_wires, tol):
"""Test that PauliZ expectation value is correct"""
dev = qubit_device_3_wires
O1 = qml.PauliZ(wires=[0])
O2 = qml.PauliZ(wires=[1])
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1])],
rotations=[*O1.diagonalizing_gates(), *O2.diagonalizing_gates()],
)
res = np.array([dev.expval(O1), dev.expval(O2)])
assert np.allclose(res, np.array([np.cos(theta), np.cos(theta) * np.cos(phi)]), tol)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_paulix_expectation(self, theta, phi, qubit_device_3_wires, tol, C):
"""Test that PauliX expectation value is correct"""
dev = qubit_device_3_wires
O1 = qml.PauliX(wires=[0])
O2 = qml.PauliX(wires=[1])
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1])],
rotations=[*O1.diagonalizing_gates(), *O2.diagonalizing_gates()],
)
res = np.array([dev.expval(O1), dev.expval(O2)], dtype=C)
assert np.allclose(res, np.array([np.sin(theta) * np.sin(phi), np.sin(phi)], dtype=C))
def test_pauliy_expectation(self, theta, phi, qubit_device_3_wires, tol):
"""Test that PauliY expectation value is correct"""
dev = qubit_device_3_wires
O1 = qml.PauliY(wires=[0])
O2 = qml.PauliY(wires=[1])
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1])],
rotations=[*O1.diagonalizing_gates(), *O2.diagonalizing_gates()],
)
res = np.array([dev.expval(O1), dev.expval(O2)])
assert np.allclose(res, np.array([0, -np.cos(theta) * np.sin(phi)]), tol)
def test_hadamard_expectation(self, theta, phi, qubit_device_3_wires, tol):
"""Test that Hadamard expectation value is correct"""
dev = qubit_device_3_wires
O1 = qml.Hadamard(wires=[0])
O2 = qml.Hadamard(wires=[1])
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1])],
rotations=[*O1.diagonalizing_gates(), *O2.diagonalizing_gates()],
)
res = np.array([dev.expval(O1), dev.expval(O2)])
expected = np.array(
[np.sin(theta) * np.sin(phi) + np.cos(theta), np.cos(theta) * np.cos(phi) + np.sin(phi)]
) / np.sqrt(2)
assert np.allclose(res, expected, tol)
@pytest.mark.parametrize("theta,phi,varphi", list(zip(THETA, PHI, VARPHI)))
class TestTensorExpval:
"""Test tensor expectation values"""
def test_paulix_pauliy(self, theta, phi, varphi, qubit_device_3_wires, tol):
"""Test that a tensor product involving PauliX and PauliY works
correctly"""
dev = qubit_device_3_wires
obs = qml.PauliX(0) @ qml.PauliY(2)
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = np.sin(theta) * np.sin(phi) * np.sin(varphi)
assert np.allclose(res, expected)
def test_pauliz_identity(self, theta, phi, varphi, qubit_device_3_wires, tol):
"""Test that a tensor product involving PauliZ and Identity works
correctly"""
dev = qubit_device_3_wires
obs = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = np.cos(varphi) * np.cos(phi)
assert np.allclose(res, expected, tol)
def test_pauliz_hadamard_pauliy(self, theta, phi, varphi, qubit_device_3_wires, tol):
"""Test that a tensor product involving PauliZ and PauliY and hadamard
works correctly"""
dev = qubit_device_3_wires
obs = qml.PauliZ(0) @ qml.Hadamard(1) @ qml.PauliY(2)
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = -(np.cos(varphi) * np.sin(phi) + np.sin(varphi) * np.cos(theta)) / np.sqrt(2)
assert | np.allclose(res, expected, tol) | numpy.allclose |
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image
from src.models.class_patcher import patcher
from src.utils.imgproc import *
class patcher(patcher):
def __init__(self, body='./body/body_noy.png', **options):
super().__init__('ノイ', body=body, pantie_position=[147, 133], **options)
self.mask = io.imread('./mask/mask_noy.png')
def convert(self, image):
pantie = np.array(image)
# prepare for moving from hip to front
patch = | np.copy(pantie[-140:-5, 546:, :]) | numpy.copy |
import os
from flask import Flask, json, jsonify, request, render_template
import tensorflow as tf
import pandas as pd
import numpy as np
import six
import random
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
model_sequential = tf.keras.models.load_model("sequential")
@app.route('/')
def main():
return render_template('main.html')
@app.route('/prediction')
def prediction_form():
return render_template('prediction_form.html')
@app.route('/prediction_result', methods=['POST'])
def submit():
#Model Features
Pclass = int(request.values.get('Pclass'))
Cabin = int(request.values.get('Cabin'))
Age = int(request.values.get('Age'))
Gender = int(request.values.get('Sex'))
P_Embarkation = int(request.values.get('Embarkation'))
Name_Title = int(request.values.get('NameTitle'))
Trav_Aln = int(request.values.get('TravelAlone'))
if Trav_Aln == 1:
Travel_Relative = 0
else:
Travel_Relative = int(request.values.get('TravelNum'))
### Model Prediction
features = np.array([Pclass, Cabin, Age, Gender, P_Embarkation, Name_Title, Trav_Aln, Travel_Relative]).reshape(1,8)
prediction_sequential = round(model_sequential.predict(features)[0][0], 4)
round_pred = round(100*prediction_sequential)
response = {'Surv_pred': prediction_sequential}
resp_main = ("You will Survive!") if prediction_sequential > 0.5 else ("You will not Survive!")
resp_sec = ("Your chances to Survive are high enough with " + u"\u2248 " + str(round_pred) + "%") if prediction_sequential > 0.5 else ("Your chances to Survive are not high enough with " + u"\u2248 " + str(round_pred) + "%") if prediction_sequential < 0.5 else ("Your chances to Survive are even with " + u"\u2248 " + str(round_pred) + "%")
d_Pclass = "First Class" if Pclass == 1 else "Second Class" if Pclass == 2 else "Third Class"
d_Cabin = "Yes" if Cabin == 0 else "No"
d_Age = str(Age) + " years old"
d_Gender = "Female" if Gender == 0 else "Male"
d_P_Embarkation = "Cherbourg" if P_Embarkation == 0 else "Queenstown" if P_Embarkation == 1 else "Southampton"
d_Name_Title = "Mr." if Name_Title == 0 else "Miss." if Name_Title == 1 else "Mrs." if Name_Title == 2 else "Master." if Name_Title == 3 else "Other Title"
d_Trav_Aln = "No" if Trav_Aln == 0 else "Yes"
d_Travel_Relative = str(Travel_Relative) + " relative(s)" if Travel_Relative > 1 else str(Travel_Relative) + " relative"
data = {"Pclass": d_Pclass,
"Cabin": d_Cabin,
"Age": d_Age,
"Gender": d_Gender,
"P_Embarkation": d_P_Embarkation,
"Name_Title": d_Name_Title,
"Trav_Aln": d_Trav_Aln,
"Travel_Relative": d_Travel_Relative}
return render_template('result_pred.html', pred_val = response, data=data, resp_main = resp_main, resp_sec = resp_sec)
@app.route('/prediction_result')
def random_submit():
#Model Features
Pclass = random.randint(1, 3)
Cabin = random.randint(0, 1)
Age = random.randint(1, 75)
Gender = random.randint(0, 1)
P_Embarkation = random.randint(0, 2)
Name_Title = random.randint(0, 4)
Trav_Aln = random.randint(0, 1)
if Pclass == 1 or Pclass == 2:
if Gender == 0:
if Age > 21:
Name_Title = random.choice([1, 2, 4])
else:
Name_Title = random.choice([1, 2])
elif Gender == 1:
Name_Title = random.choice([0, 3, 4])
if Pclass == 3:
if Gender == 0:
if Age > 21:
Name_Title = random.choice([1, 2])
else:
Name_Title = random.choice([1])
elif Gender == 1:
Name_Title = random.choice([0, 3])
if Age < 16:
Trav_Aln = 0
else:
Trav_Aln = random.randint(0, 1)
if Trav_Aln == 1:
Travel_Relative = 0
else:
Travel_Relative = random.randint(1, 6)
### Model Prediction
features = np.array([Pclass, Cabin, Age, Gender, P_Embarkation, Name_Title, Trav_Aln, Travel_Relative]).reshape(1,8)
prediction_sequential = round(model_sequential.predict(features)[0][0], 4)
round_pred = round(100*prediction_sequential)
response = {'Surv_pred': prediction_sequential}
resp_main = ("You will Survive!") if prediction_sequential > 0.5 else ("You will not Survive!")
resp_sec = ("Your chances to Survive are high enough with " + u"\u2248 " + str(round_pred) + "%") if prediction_sequential > 0.5 else ("Your chances to Survive are not high enough with " + u"\u2248 " + str(round_pred) + "%") if prediction_sequential < 0.5 else ("Your chances to Survive are even with " + u"\u2248 " + str(round_pred) + "%")
d_Pclass = "First Class" if Pclass == 1 else "Second Class" if Pclass == 2 else "Third Class"
d_Cabin = "Yes" if Cabin == 0 else "No"
d_Age = str(Age) + " years old"
d_Gender = "Female" if Gender == 0 else "Male"
d_P_Embarkation = "Cherbourg" if P_Embarkation == 0 else "Queenstown" if P_Embarkation == 1 else "Southampton"
d_Name_Title = "Mr." if Name_Title == 0 else "Miss." if Name_Title == 1 else "Mrs." if Name_Title == 2 else "Master." if Name_Title == 3 else "Other Title"
d_Trav_Aln = "No" if Trav_Aln == 0 else "Yes"
d_Travel_Relative = str(Travel_Relative) + " relative(s)" if Travel_Relative > 1 else str(Travel_Relative) + " relative"
data = {"Pclass": d_Pclass,
"Cabin": d_Cabin,
"Age": d_Age,
"Gender": d_Gender,
"P_Embarkation": d_P_Embarkation,
"Name_Title": d_Name_Title,
"Trav_Aln": d_Trav_Aln,
"Travel_Relative": d_Travel_Relative}
return render_template('result_pred.html', pred_val = response, data=data, resp_main = resp_main, resp_sec = resp_sec)
@app.route('/prediction_result_firstclass')
def random_firstclass():
#Model Features
Pclass = 1
Cabin = random.randint(0, 1)
Age = random.randint(1, 75)
Gender = random.randint(0, 1)
P_Embarkation = random.randint(0, 2)
Trav_Aln = random.randint(0, 1)
if Gender == 0:
if Age > 21:
Name_Title = random.choice([4])
else:
Name_Title = random.choice([1, 2])
elif Gender == 1:
Name_Title = random.choice([3, 4])
if Age < 16:
Trav_Aln = 0
else:
Trav_Aln = random.randint(0, 1)
if Trav_Aln == 1:
Travel_Relative = 0
else:
Travel_Relative = random.randint(1, 6)
### Model Prediction
features = np.array([Pclass, Cabin, Age, Gender, P_Embarkation, Name_Title, Trav_Aln, Travel_Relative]).reshape(1,8)
prediction_sequential = round(model_sequential.predict(features)[0][0], 4)
round_pred = round(100*prediction_sequential)
response = {'Surv_pred': prediction_sequential}
resp_main = ("You will Survive!") if prediction_sequential > 0.5 else ("You will not Survive!")
resp_sec = ("Your chances to Survive are high enough with " + u"\u2248 " + str(round_pred) + "%") if prediction_sequential > 0.5 else ("Your chances to Survive are not high enough with " + u"\u2248 " + str(round_pred) + "%") if prediction_sequential < 0.5 else ("Your chances to Survive are even with " + u"\u2248 " + str(round_pred) + "%")
d_Pclass = "First Class"
d_Cabin = "Yes" if Cabin == 0 else "No"
d_Age = str(Age) + " years old"
d_Gender = "Female" if Gender == 0 else "Male"
d_P_Embarkation = "Cherbourg" if P_Embarkation == 0 else "Queenstown" if P_Embarkation == 1 else "Southampton"
d_Name_Title = "Mr." if Name_Title == 0 else "Miss." if Name_Title == 1 else "Mrs." if Name_Title == 2 else "Master." if Name_Title == 3 else "Other Title"
d_Trav_Aln = "No" if Trav_Aln == 0 else "Yes"
d_Travel_Relative = str(Travel_Relative) + " relative(s)" if Travel_Relative > 1 else str(Travel_Relative) + " relative"
data = {"Pclass": d_Pclass,
"Cabin": d_Cabin,
"Age": d_Age,
"Gender": d_Gender,
"P_Embarkation": d_P_Embarkation,
"Name_Title": d_Name_Title,
"Trav_Aln": d_Trav_Aln,
"Travel_Relative": d_Travel_Relative}
return render_template('result_pred.html', pred_val = response, data=data, resp_main = resp_main, resp_sec = resp_sec)
@app.route('/prediction_result_secondclass')
def random_secondclass():
#Model Features
Pclass = 2
Cabin = random.randint(0, 1)
Age = random.randint(1, 75)
Gender = random.randint(0, 1)
P_Embarkation = random.randint(0, 2)
Trav_Aln = random.randint(0, 1)
if Gender == 0:
if Age > 21:
Name_Title = random.choice([1, 2, 4])
else:
Name_Title = random.choice([1, 2])
elif Gender == 1:
Name_Title = random.choice([0, 3, 4])
if Age < 16:
Trav_Aln = 0
else:
Trav_Aln = random.randint(0, 1)
if Trav_Aln == 1:
Travel_Relative = 0
else:
Travel_Relative = random.randint(1, 6)
### Model Prediction
features = np.array([Pclass, Cabin, Age, Gender, P_Embarkation, Name_Title, Trav_Aln, Travel_Relative]).reshape(1,8)
prediction_sequential = round(model_sequential.predict(features)[0][0], 4)
round_pred = round(100*prediction_sequential)
response = {'Surv_pred': prediction_sequential}
resp_main = ("You will Survive!") if prediction_sequential > 0.5 else ("You will not Survive!")
resp_sec = ("Your chances to Survive are high enough with " + u"\u2248 " + str(round_pred) + "%") if prediction_sequential > 0.5 else ("Your chances to Survive are not high enough with " + u"\u2248 " + str(round_pred) + "%") if prediction_sequential < 0.5 else ("Your chances to Survive are even with " + u"\u2248 " + str(round_pred) + "%")
d_Pclass = "Second Class"
d_Cabin = "Yes" if Cabin == 0 else "No"
d_Age = str(Age) + " years old"
d_Gender = "Female" if Gender == 0 else "Male"
d_P_Embarkation = "Cherbourg" if P_Embarkation == 0 else "Queenstown" if P_Embarkation == 1 else "Southampton"
d_Name_Title = "Mr." if Name_Title == 0 else "Miss." if Name_Title == 1 else "Mrs." if Name_Title == 2 else "Master." if Name_Title == 3 else "Other Title"
d_Trav_Aln = "No" if Trav_Aln == 0 else "Yes"
d_Travel_Relative = str(Travel_Relative) + " relative(s)" if Travel_Relative > 1 else str(Travel_Relative) + " relative"
data = {"Pclass": d_Pclass,
"Cabin": d_Cabin,
"Age": d_Age,
"Gender": d_Gender,
"P_Embarkation": d_P_Embarkation,
"Name_Title": d_Name_Title,
"Trav_Aln": d_Trav_Aln,
"Travel_Relative": d_Travel_Relative}
return render_template('result_pred.html', pred_val = response, data=data, resp_main = resp_main, resp_sec = resp_sec)
@app.route('/prediction_result_thirdclass')
def random_thirdclass():
#Model Features
Pclass = 3
Cabin = random.randint(0, 1)
Age = random.randint(1, 75)
Gender = random.randint(0, 1)
P_Embarkation = random.randint(0, 2)
Trav_Aln = random.randint(0, 1)
if Gender == 0:
if Age > 21:
Name_Title = random.choice([1, 2])
else:
Name_Title = random.choice([1])
elif Gender == 1:
Name_Title = random.choice([0])
if Age < 16:
Trav_Aln = 0
else:
Trav_Aln = random.randint(0, 1)
if Trav_Aln == 1:
Travel_Relative = 0
else:
Travel_Relative = random.randint(1, 6)
### Model Prediction
features = | np.array([Pclass, Cabin, Age, Gender, P_Embarkation, Name_Title, Trav_Aln, Travel_Relative]) | numpy.array |
"""Training - mitosis detection"""
import argparse
from datetime import datetime
import json
import math
import os
import pickle
import shutil
import sys
import numpy as np
import tensorflow as tf
import tensorboard as tb
import resnet
import resnet50
# data
def get_image(filename, patch_size):
"""Get image from filename.
Args:
filename: String filename of an image.
patch_size: Integer length to which the square image will be
resized.
Returns:
TensorFlow tensor containing the decoded and resized image with
type float32 and values in [0, 1).
"""
image_string = tf.read_file(filename)
# shape (h,w,c), uint8 in [0, 255]:
image = tf.image.decode_png(image_string, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32) # float32 [0, 1)
# TODO: remove this
#image = tf.image.resize_images(image, [patch_size, patch_size]) # float32 [0, 1)
#with tf.control_dependencies(
# [tf.assert_type(image, tf.float32, image.dtype),
# tf.verify_tensor_all_finite(image, "image tensor contains NaN or INF values"]):
return image
def get_label(filename):
"""Get label from filename.
Args:
filename: String in format "**/train|val/mitosis|normal/name.{ext}",
where the label is either "mitosis" or "normal".
Returns:
TensorFlow float binary label equal to 1 for mitosis or 0 for
normal.
"""
# note file name format:
# lab is a single digit, case and region are two digits with padding if needed
splits = tf.string_split([filename], "/")
label_str = splits.values[-2]
# check that label string is valid
is_valid = tf.logical_or(tf.equal(label_str, 'normal'), tf.equal(label_str, 'mitosis'))
assert_op = tf.Assert(is_valid, [label_str])
with tf.control_dependencies([assert_op]): # test for correct label extraction
#label = tf.to_int32(tf.equal(label_str, 'mitosis'))
label = tf.to_float(tf.equal(label_str, 'mitosis')) # required because model produces float
return label
def preprocess(filename, patch_size):
"""Get image and label from filename.
Args:
filename: String filename of an image.
patch_size: Integer length to which the square image will be
resized, if necessary.
Returns:
Tuple of a float32 image Tensor with shape (h,w,c) and values in
[0, 1), a binary label, and a filename.
"""
# return image_resized, label
label = get_label(filename)
#label = tf.expand_dims(label, -1) # make each scalar label a vector of length 1 to match model
image = get_image(filename, patch_size) # float32 in [0, 1)
return image, label, filename
def normalize(image, model_name):
"""Normalize an image tensor.
Note: due to broadcasting, this works with a single image, or a batch
of images.
Args:
image: A Tensor of shape (...,h,w,c) with values in [0, 1].
model_name: String indicating the model to use.
Returns:
A normalized image Tensor of shape (...,h,w,c).
"""
# NOTE: don't use in-place updates to avoid side-effects
if model_name in ("vgg", "vgg19", "resnet"):
means = np.array([103.939, 116.779, 123.68]).astype(np.float32)
image = image[..., ::-1] # rbg -> bgr
image = image * 255 # float32 in [0, 255]
image = image - means # mean centering using imagenet means
else:
# normalize to [-1, 1]
#image = image / 255
image = image - 0.5
image = image * 2
return image
def unnormalize(image, model_name):
"""Unnormalize an image tensor.
Note: due to broadcasting, this works with a single image, or a batch
of images.
Args:
image: A Tensor of shape (...,h,w,c) with normalized values.
model_name: String indicating the model to use.
Returns:
An unnormalized image Tensor of shape (...,h,w,c) with values in
[0, 1].
"""
# NOTE: don't use in-place updates to avoid side-effects
if model_name in ("vgg", "vgg19", "resnet"):
means = np.array([103.939, 116.779, 123.68]).astype(np.float32)
image = image + means # mean centering using imagenet means
image = image / 255 # float32 in [0, 1]
image = image[..., ::-1] # bgr -> rgb
else:
image = image / 2
image = image + 0.5
return image
def augment(image, patch_size, seed=None):
"""Apply random data augmentation to the given image.
Args:
image: A Tensor of shape (h,w,c) with values in [0, 1].
patch_size: The patch size to which to randomly crop the image.
seed: An integer used to create a random seed.
Returns:
A data-augmented image with values in [0, 1].
"""
# NOTE: these values currently come from the Google pathology paper:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al.
# Detecting Cancer Metastases on Gigapixel Pathology Images. arXiv.org. 2017.
# TODO: convert these hardcoded values into hyperparameters!!
# NOTE: if the seed is None, these ops will be seeded with a completely random seed, rather than
# a deterministic one based on the graph seed. This appears to only happen within the map
# functions of the Dataset API, based on the `test_num_parallel_calls` and
# `test_image_random_op_seeds` tests. For now, we will pass in a seed from the user and use it
# at the op level.
# NOTE: Additionally, if the Dataset.map() function that calls this function is using
# `num_parallel_calls` > 1, the results will be non-reproducible.
# TODO: https://github.com/tensorflow/tensorflow/issues/13932
# NOTE: ouch! It turns out that a reinitializable iterator for a Dataset will cause any ops with
# random seeds, such as these, to be reset, and thus each epoch will be evaluated exactly the
# same. The desired behavior would be to seed these ops once at the very beginning, so that an
# entire training run can be deterministic, but not with the exact same random augmentation during
# each epoch. Oh TensorFlow...
shape = tf.shape(image) # (h, w, c)
# random zoom
# TODO: possibly re-enable random zooms enabled via flag
#lb = shape[0] # lower bound on the resize is the current size of the image
#ub = lb + tf.to_int32(tf.to_float(lb)*0.25) # upper bound is 25% larger
#new_size = tf.random_uniform([2], minval=lb, maxval=ub, dtype=tf.int32, seed=seed)
#image = tf.image.resize_images(image, new_size) # random resize
#image = tf.random_crop(image, shape, seed=seed) # random cropping back to original size
# mirror padding if needed
size = int(math.ceil((patch_size + 30) * (math.cos(math.pi/4) + math.sin(math.pi/4))))
#pad_h = tf.maximum(0, size - shape[0])
#pad_w = tf.maximum(0, size - shape[1])
#pad_h_before = tf.to_int32(tf.floor(pad_h / 2))
#pad_w_before = tf.to_int32(tf.floor(pad_w / 2))
#pad_h_after = pad_h - pad_h_before
#pad_w_after = pad_w - pad_w_before
#paddings = tf.reshape(
# tf.stack([pad_h_before, pad_h_after, pad_w_before, pad_w_after, 0, 0], 0),
# [3, 2]) # h, w, z before/after paddings
pad = tf.to_int32(tf.ceil(tf.maximum(0, size - shape[0]) / 2))
paddings = tf.reshape(tf.stack([pad, pad, pad, pad, 0, 0], 0), [3, 2]) # h, w, z before/after
image = tf.pad(image, paddings, mode="REFLECT")
# random rotation
angle = tf.random_uniform([], minval=0, maxval=2*np.pi, seed=seed)
image = tf.contrib.image.rotate(image, angle, "BILINEAR")
# crop to bounding box to allow for random translation crop, if the input image is large enough
# note: translation distance: 7 µm = 30 pixels = max allowable euclidean pred distance from
# actual mitosis
# note: the allowable region is a circle with radius 30, but we are cropping to a square, so we
# can't crop from a square with side length of patch_size+30 or we run the risk of moving the
# mitosis to a spot in the corner of the resulting image, which would be outside of the circle
# radius, and thus we would be incorrect to label that image as positive. We also want to impose
# some amount of buffer into our learned model, so we place an upper bound of `c` pixels on the
# distance. We sample a distance along the height axis, compute a valid distance along the width
# axis that is upper bounded by a Euclidean translation distance of `c` in the worst case, crop
# the center of the image to this height and width, and then perform a random crop, yielding a
# patch for which the center is at most `c` pixels from the true center in terms of Euclidean
# distance.
# NOTE: In the dataset, all normal samples must be > 60 pixels from the center of a mitotic figure
# to avoid random crops that end up incorrectly within a mitotic region.
# c = 25 = sqrt(a**2 + b**2) = 6.25 µm
c = 25 # TODO: set this as a hyperparameter
a = tf.random_uniform([], minval=0, maxval=c, dtype=tf.int32, seed=seed)
b = tf.to_int32(tf.floor(tf.sqrt(tf.to_float(c**2 - a**2))))
crop_h = tf.minimum(shape[0], patch_size + a)
crop_w = tf.minimum(shape[1], patch_size + b)
image = tf.image.resize_image_with_crop_or_pad(image, crop_h, crop_w)
# random central crop == random translation augmentation
image = tf.random_crop(image, [patch_size, patch_size, 3], seed=seed)
image = tf.image.random_flip_up_down(image, seed=seed)
image = tf.image.random_flip_left_right(image, seed=seed)
image = tf.image.random_brightness(image, 64/255, seed=seed)
image = tf.image.random_contrast(image, 0.25, 1, seed=seed)
image = tf.image.random_saturation(image, 0.75, 1, seed=seed)
image = tf.image.random_hue(image, 0.04, seed=seed)
image = tf.clip_by_value(image, 0, 1)
return image
def create_augmented_batch(image, batch_size, patch_size):
"""Create a batch of augmented versions of the given image.
This will sample `batch_size/4` augmented images deterministically,
and yield four rotated variants for each augmented image (0, 90, 180,
270 degrees).
Args:
image: A Tensor of shape (h,w,c).
batch_size: Number of augmented versions to generate.
patch_size: The patch size to which to randomly crop the image.
Returns:
A Tensor of shape (batch_size,h,w,c) containing a batch of
data-augmented versions of the given image.
"""
assert batch_size % 4 == 0 or batch_size == 1, "batch_size must be 1 or divisible by 4"
# TODO rewrite this function to just draw `batch_size` examples from the `augment` function
def rots_batch(image):
rot0 = image
rot90 = tf.image.rot90(image)
rot180 = tf.image.rot90(image, k=2)
rot270 = tf.image.rot90(image, k=3)
rots = tf.stack([rot0, rot90, rot180, rot270])
return rots
if batch_size >= 4:
image_crop = tf.image.resize_image_with_crop_or_pad(image, patch_size, patch_size)
images = rots_batch(image_crop)
for i in range(round(batch_size/4)-1):
aug_image = augment(image, patch_size, i)
aug_image_rots = rots_batch(aug_image)
images = tf.concat([images, aug_image_rots], axis=0)
else:
images = tf.expand_dims(image, 0)
return images
def marginalize(x):
"""Marginalize over injected noise at test time.
This implements noise marginalization by averaging over a batch of
values. Typically, this would be used with logits for a batch of
augmented versions of a single image, or for the associated batch
of labels. This is only performed at test time when
`tf.keras.backend.learning_phase() == 0`.
Args:
x: A Tensor of shape (n,...).
Returns:
A Tensor of shape (1, ...) containing the average over the batch
dimension.
"""
avg_x = tf.reduce_mean(x, axis=0, keepdims=True, name="avg_x")
x = tf.cond(tf.logical_not(tf.keras.backend.learning_phase()), lambda: avg_x, lambda: x)
return x
def process_dataset(dataset, model_name, patch_size, augmentation, marginalization, marg_batch_size,
threads, seed=None):
"""Process a Dataset.
Args:
dataset: Dataset of filenames.
model_name: String indicating the model to use.
patch_size: Integer length to which the square patches will be
resized.
augmentation: Boolean for whether or not to apply random augmentation
to the images.
marginalization: Boolean for whether or not to use noise
marginalization when evaluating the validation set. If True, then
each image in the validation set will be expanded to a batch of
augmented versions of that image, and predicted probabilities for
each batch will be averaged to yield a single noise-marginalized
prediction for each image. Note: if this is True, then
`marg_batch_size` must be divisible by 4, or equal to 1 for a special
debugging case of no augmentation.
marg_batch_size: Integer training batch size.
threads: Integer number of threads for dataset buffering.
seed: Integer random seed.
Returns:
A labeled Dataset of augmented, normalized images, possibly with
marginalization.
"""
dataset = dataset.map(lambda filename: preprocess(filename, patch_size),
num_parallel_calls=threads)
# augment (typically at training time)
if augmentation:
dataset = dataset.map(
lambda image, label, filename: (augment(image, patch_size, seed), label, filename),
num_parallel_calls=threads)
else:
# we are now generating larger original images to allow for random rotations & translations
# during augmentation, and thus if we don't apply augmentation, we need to ensure that the
# images are center cropped to the correct size.
dataset = dataset.map(lambda image, label, filename:
(tf.image.resize_image_with_crop_or_pad(image, patch_size, patch_size), label, filename),
num_parallel_calls=threads)
# TODO: should this be in an `elif` block before the above `else` block? in particular, the
# patch sizes will be messed up
# marginalize (typically at eval time)
if marginalization:
dataset = dataset.map(lambda image, label, filename:
(create_augmented_batch(image, marg_batch_size, patch_size),
tf.tile(tf.expand_dims(label, -1), [marg_batch_size]),
tf.tile(tf.expand_dims(filename, -1), [marg_batch_size])),
num_parallel_calls=threads)
# normalize
dataset = dataset.map(lambda image, label, filename:
(normalize(image, model_name), label, filename), num_parallel_calls=threads)
return dataset
def create_dataset(path, model_name, patch_size, batch_size, shuffle, augmentation, marginalization,
oversampling, threads, prefetch_batches, seed=None):
"""Create a dataset.
Args:
path: String path to the generated image patches. This should
contain folders for each class.
model_name: String indicating the model to use.
patch_size: Integer length to which the square patches will be
resized.
batch_size: Integer training batch size.
shuffle: Boolean for whether or not to shuffle filenames.
augmentation: Boolean for whether or not to apply random augmentation
to the images.
marginalization: Boolean for whether or not to use noise
marginalization when evaluating the validation set. If True, then
each image in the validation set will be expanded to a batch of
augmented versions of that image, and predicted probabilities for
each batch will be averaged to yield a single noise-marginalized
prediction for each image. Note: if this is True, then
`batch_size` must be divisible by 4, or equal to 1 for a special
debugging case of no augmentation.
oversampling: Boolean for whether or not to oversample the minority
mitosis class via class-aware sampling. Not compatible with
marginalization.
threads: Integer number of threads for dataset buffering.
prefetch_batches: Integer number of batches to prefetch.
seed: Integer random seed.
Returns:
A Dataset object.
"""
# read & process images
if oversampling:
# oversample the minority mitosis class via class-aware sampling, in which we sample the mitosis
# and normal samples separately in order to yield class-balanced mini-batches.
mitosis_dataset = tf.data.Dataset.list_files(os.path.join(path, "mitosis", "*.png"))
normal_dataset = tf.data.Dataset.list_files(os.path.join(path, "normal", "*.png"))
# zipping will stop once the normal dataset is empty
mitosis_dataset = mitosis_dataset.repeat(-1).shuffle(int(1e6))
normal_dataset = normal_dataset.shuffle(int(1e6))
mitosis_dataset = process_dataset(mitosis_dataset, model_name, patch_size, augmentation, False,
batch_size, threads, seed)
normal_dataset = process_dataset(normal_dataset, model_name, patch_size, augmentation, False,
batch_size, threads, seed)
# zip together the datasets, then flatten and batch so that each mini-batch contains an even
# number of mitosis and normal samples
# NOTE: the number of elements in the zipped dataset is limited to the lesser of the mitosis and
# normal datasets, and since the mitosis dataset is set to repeat indefinitely, this zipped
# dataset will be limited to the number of normal samples
dataset = tf.data.Dataset.zip((mitosis_dataset, normal_dataset))
dataset = dataset.flat_map(lambda mitosis, normal:
tf.data.Dataset.from_tensors(mitosis).concatenate(tf.data.Dataset.from_tensors(normal)))
dataset = dataset.batch(batch_size)
# note that batch norm could be affected by very small final batches, but right now this would
# also affect evaluation tasks, so we will wait to enable this until we move to tf Estimators
#dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
else:
dataset = tf.data.Dataset.list_files(os.path.join(path, "*", "*.png"))
if shuffle:
dataset = dataset.shuffle(int(1e7))
dataset = process_dataset(dataset, model_name, patch_size, augmentation, marginalization,
batch_size, threads, seed)
# batch if necessary
if not marginalization:
dataset = dataset.batch(batch_size)
# note that batch norm could be affected by very small final batches, but right now this would
# also affect evaluation tasks, so we will wait to enable this until we move to tf Estimators
#dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
# prefetch
dataset = dataset.prefetch(prefetch_batches)
return dataset
# model
def create_model(model_name, input_shape, images):
"""Create a model.
Args:
model_name: String indicating the model to use in ("vgg", "vgg19",
"resnet", "logreg").
input_shape: 3-Tuple containing the shape of a single image.
images: An image Tensor of shape (n,h,w,c).
Returns:
An unfrozen Keras Model in which `images` is the input tensor, and
another Model object representing the base model when using
pretrained models.
"""
if model_name == "logreg":
# logistic regression classifier
model_base = None
inputs = tf.keras.layers.Input(shape=input_shape, tensor=images)
x = tf.keras.layers.Flatten()(inputs)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "vgg":
# create a model by replacing the classifier of a VGG16 model with a new classifier specific
# to the breast cancer problem
# recommend fine-tuning last 4 layers
#with tf.device("/cpu"):
model_base = tf.keras.applications.VGG16(
include_top=False, input_shape=input_shape, input_tensor=images)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.GlobalAveragePooling2D()(x)
#x = tf.keras.layers.Dropout(0.5)(x)
#x = tf.keras.layers.Dropout(0.1)(x)
#x = tf.keras.layers.Dense(256, activation='relu', name='fc1')(x)
#x = tf.keras.layers.Dense(256, kernel_initializer="he_normal",
# kernel_regularizer=keras.regularizers.l2(l2))(x)
#x = tf.keras.layers.Dropout(0.5)(x)
#x = tf.keras.layers.Dropout(0.1)(x)
#x = tf.keras.layers.Dense(256, activation='relu', name='fc2')(x)
#x = tf.keras.layers.Dense(256, kernel_initializer="he_normal",
# kernel_regularizer=keras.regularizers.l2(l2))(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "vgg_new":
# train a new vgg16-like model from scratch on inputs in [-1, 1].
#with tf.device("/cpu"):
model_base = tf.keras.applications.VGG16(
include_top=False, input_shape=input_shape, input_tensor=images, weights=None)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.GlobalAveragePooling2D()(x)
#x = tf.keras.layers.Dropout(0.5)(x)
#x = tf.keras.layers.Dropout(0.1)(x)
#x = tf.keras.layers.Dense(256, activation='relu', name='fc1')(x)
#x = tf.keras.layers.Dense(256, kernel_initializer="he_normal",
# kernel_regularizer=tf.keras.regularizers.l2(l2))(x)
#x = tf.keras.layers.Dropout(0.5)(x)
#x = tf.keras.layers.Dropout(0.1)(x)
#x = tf.keras.layers.Dense(256, activation='relu', name='fc2')(x)
#x = tf.keras.layers.Dense(256, kernel_initializer="he_normal",
# kernel_regularizer=tf.keras.regularizers.l2(l2))(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "vgg19":
# create a model by replacing the classifier of a VGG19 model with a new classifier specific
# to the breast cancer problem
# recommend fine-tuning last 4 layers
#with tf.device("/cpu"):
#inputs = tf.keras.layers.Input(shape=input_shape)
model_base = tf.keras.applications.VGG19(
include_top=False, input_shape=input_shape, input_tensor=images)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "resnet":
# create a model by replacing the classifier of a ResNet50 model with a new classifier
# specific to the breast cancer problem
# recommend fine-tuning last 11 (stage 5 block c), 21 (stage 5 blocks b & c), or 33 (stage
# 5 blocks a,b,c) layers
#with tf.device("/cpu"):
# NOTE: there is an issue in keras with using batch norm with model templating, i.e.,
# defining a model with generic inputs and then calling it on a tensor. the issue stems from
# batch norm not being well defined for shared settings, but it makes it quite annoying in
# this context. to "fix" it, we define it by directly passing in the `images` tensor
# https://github.com/fchollet/keras/issues/2827
model_base = resnet50.ResNet50(include_top=False, input_shape=input_shape, input_tensor=images)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.GlobalAveragePooling2D()(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "resnet_new":
# train a new resnet50-like model from scratch on inputs in [-1, 1].
#with tf.device("/cpu"):
# NOTE: there is an issue in keras with using batch norm with model templating, i.e.,
# defining a model with generic inputs and then calling it on a tensor. the issue stems from
# batch norm not being well defined for shared settings, but it makes it quite annoying in
# this context. to "fix" it, we define it by directly passing in the `images` tensor
# https://github.com/fchollet/keras/issues/2827
model_base = resnet50.ResNet50(
include_top=False, input_shape=input_shape, input_tensor=images, weights=None)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.GlobalAveragePooling2D()(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "resnet_custom":
model_base = None
model_tower = resnet.ResNet(images, input_shape)
else:
raise Exception("model name unknown: {}".format(model_name))
# TODO: add this when it's necessary, and move to a separate function
## Multi-GPU exploitation via a linear combination of GPU loss functions.
#ins = []
#outs = []
#for i in range(num_gpus):
# with tf.device("/gpu:{}".format(i)):
# x = tf.keras.layers.Input(shape=input_shape) # split of batch
# out = resnet50(x) # run split on shared model
# ins.append(x)
# outs.append(out)
#model = tf.keras.Model(inputs=ins, outputs=outs) # multi-GPU, data-parallel model
model = model_tower
# unfreeze all model layers.
for layer in model.layers[1:]: # don't include input layer
layer.trainable = True
return model, model_base
# based on `keras.utils.multi_gpu_model`
def multi_gpu_model(model, gpus):
"""Replicates a model on different GPUs.
Specifically, this function implements single-machine
multi-GPU data parallelism. It works in the following way:
- Divide the model's input(s) into multiple sub-batches.
- Apply a model copy on each sub-batch. Every model copy
is executed on a dedicated GPU.
- Concatenate the results (on CPU) into one big batch.
E.g. if your `batch_size` is 64 and you use `gpus=2`,
then we will divide the input into 2 sub-batches of 32 samples,
process each sub-batch on one GPU, then return the full
batch of 64 processed samples.
This induces quasi-linear speedup on up to 8 GPUs.
This function is only available with the TensorFlow backend
for the time being.
# Arguments
model: A Keras model instance. To avoid OOM errors,
this model could have been built on CPU, for instance
(see usage example below).
gpus: Integer >= 2 or list of integers, number of GPUs or
list of GPU IDs on which to create model replicas.
# Returns
A Keras `Model` instance which can be used just like the initial
`model` argument, but which distributes its workload on multiple GPUs.
"""
if isinstance(gpus, (list, tuple)):
num_gpus = len(gpus)
target_gpu_ids = gpus
else:
num_gpus = gpus
target_gpu_ids = range(num_gpus)
def get_slice(data, i, parts):
shape = tf.shape(data)
batch_size = shape[:1]
input_shape = shape[1:]
step = batch_size // parts
if i == num_gpus - 1:
size = batch_size - step * i
else:
size = step
size = tf.concat([size, input_shape], axis=0)
stride = tf.concat([step, input_shape * 0], axis=0)
start = stride * i
return tf.slice(data, start, size)
all_outputs = []
for i in range(len(model.outputs)):
all_outputs.append([])
# Place a copy of the model on each GPU,
# each getting a slice of the inputs.
for i, gpu_id in enumerate(target_gpu_ids):
with tf.device('/cpu:0'):
inputs = []
# Retrieve a slice of the input on the CPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_i = tf.keras.layers.Lambda(
get_slice, output_shape=input_shape, arguments={'i': i, 'parts': num_gpus})(x)
inputs.append(slice_i)
with tf.device('/gpu:%d' % gpu_id):
with tf.name_scope('replica_%d' % gpu_id):
# Apply model on slice (creating a model replica on the target device).
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save the outputs for merging back together later.
for o in range(len(outputs)):
all_outputs[o].append(outputs[o])
# Merge outputs on CPU.
with tf.device('/cpu:0'):
merged = []
for name, outputs in zip(model.output_names, all_outputs):
merged.append(tf.keras.layers.concatenate(outputs, axis=0, name=name))
return tf.keras.Model(model.inputs, merged)
def compute_data_loss(labels, logits):
"""Compute the mean logistic loss.
Args:
labels: A Tensor of shape (n, 1) containing a batch of labels.
logits: A Tensor of shape (n, 1) containing a batch of pre-sigmoid
prediction values.
Returns:
A scalar Tensor representing the mean logistic loss.
"""
# TODO: this is a binary classification problem so optimizing a loss derived from a Bernoulli
# distribution is appropriate. however, would the dynamics of the training algorithm be more
# stable if we treated this as a multi-class classification problem and derived a loss from a
# Multinomial distribution with two classes (and a single trial)? it would be
# over-parameterized, but then again, the deep net itself is already heavily parameterized.
# Bernoulli-derived loss
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.reshape(labels, [-1, 1]), logits=logits))
# Multinomial-derived loss
#labels = tf.one_hot(indices=labels, depth=2, on_value=1, off_value=0)
#loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=logits))
#loss = tf.reduce_mean(
# tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, features=logits))
return loss
def compute_l2_reg_loss(model, include_frozen=False, reg_final=True, reg_biases=False):
"""Compute L2 loss of trainable model weights.
This places a Gaussian prior with mean 0 std 1 on each of the model
parameters.
Args:
model: A Keras Model object.
include_frozen: Boolean for whether or not to ignore frozen layers.
reg_final: Boolean for whether or not to regularize the final
logits-producing layer.
reg_biases: Boolean for whether or not to regularize biases.
Returns:
The L2 regularization loss of all trainable (i.e., unfrozen) model
weights, unless `include_frozen` is True, in which case all weights
are used.
"""
weights = []
if reg_final:
end = None
else: # don't regularize the final function that produces logits
end = -1 if not model.layers[-1].name.startswith("flatten") else -2
for layer in model.layers[:end]:
if layer.trainable or include_frozen:
if hasattr(layer, 'kernel'): # conv, dense
weights.append(layer.kernel)
elif hasattr(layer, 'gamma'): # batch norm scale
weights.append(1.0 - layer.gamma) # Gaussian prior centered at 1 for batch norm gamma value
if reg_biases:
# TODO: generally, we don't regularize the biases, but could we determine a probabilistic
# motivation to do this?
if hasattr(layer, 'bias'):
weights.append(layer.bias)
elif hasattr(layer, 'beta'):
weights.append(layer.beta)
l2_loss = tf.add_n([tf.nn.l2_loss(w) for w in weights])
return l2_loss
def compute_metrics(loss, labels, preds, probs, num_thresholds):
"""Compute metrics.
This creates ops that compute metrics in a streaming fashion.
Args:
loss: A Tensor representing the current batch mean loss.
labels: A Tensor of shape (n, 1) containing a batch of labels.
preds: A Tensor of shape (n, 1) containing a batch of binary
prediction values.
probs: A Tensor of shape (n, 1) containing a batch of probabilistic
prediction values.
num_thresholds: An integer indicating the number of thresholds to
use to compute PR curves.
Returns:
A tuple of mean loss, accuracy, positive predictive value
(precision), sensitivity (recall), F1, PR curve data, F1 list
based on the PR curve data, a grouped metrics update op, and a
group metrics reset op.
"""
# TODO: think about converting this to a class
mean_loss, mean_loss_update_op, mean_loss_reset_op = create_resettable_metric(tf.metrics.mean,
'mean_loss', values=loss)
acc, acc_update_op, acc_reset_op = create_resettable_metric(tf.metrics.accuracy,
'acc', labels=labels, predictions=preds)
ppv, ppv_update_op, ppv_reset_op = create_resettable_metric(tf.metrics.precision,
'ppv', labels=labels, predictions=preds)
sens, sens_update_op, sens_reset_op = create_resettable_metric(tf.metrics.recall,
'sens', labels=labels, predictions=preds)
f1 = 2 * (ppv * sens) / (ppv + sens)
pr, pr_update_op, pr_reset_op = create_resettable_metric(
tf.contrib.metrics.precision_recall_at_equal_thresholds,
'pr', labels=tf.cast(labels, dtype=tf.bool), predictions=probs, num_thresholds=num_thresholds)
f1s = 2 * (pr.precision * pr.recall) / (pr.precision + pr.recall)
# combine all reset & update ops
metric_update_ops = tf.group(
mean_loss_update_op, acc_update_op, ppv_update_op, sens_update_op, pr_update_op)
metric_reset_ops = tf.group(
mean_loss_reset_op, acc_reset_op, ppv_reset_op, sens_reset_op, pr_reset_op)
return mean_loss, acc, ppv, sens, f1, pr, f1s, metric_update_ops, metric_reset_ops
#return mean_loss, acc, ppv, sens, f1, metric_update_ops, metric_reset_ops
# utils
def create_resettable_metric(metric, scope, **metric_kwargs): # prob safer to only allow kwargs
"""Create a resettable metric.
Args:
metric: A tf.metrics metric function.
scope: A String scope name to enclose the metric variables within.
metric_kwargs: Kwargs for the metric.
Returns:
The metric op, the metric update op, and a metric reset op.
"""
# started with an implementation from https://github.com/tensorflow/tensorflow/issues/4814
with tf.variable_scope(scope) as scope:
metric_op, update_op = metric(**metric_kwargs)
scope_name = tf.contrib.framework.get_name_scope() # in case nested name/variable scopes
local_vars = tf.contrib.framework.get_variables(scope_name,
collection=tf.GraphKeys.LOCAL_VARIABLES) # get all local variables in this scope
reset_op = tf.variables_initializer(local_vars)
return metric_op, update_op, reset_op
def initialize_variables(sess):
"""Initialize variables for training.
This initializes all tensor variables in the graph.
Args:
sess: A TensorFlow Session.
"""
# NOTE: Keras keeps track of the variables that are initialized, and any call to
# `tf.keras.backend.get_session()`, which is even used internally, will include logic to
# initialize variables. There is a situation in which resuming from a previous checkpoint and
# then saving the model after the first epoch will result in part of the model being
# reinitialized. The problem is that calling `tf.keras.backend.get_session()` here is too soon
# to initialize any variables, the resume branch skips any variable initialization, and then the
# `model.save` code path ends up calling `tf.keras.backend.get_session()`, thus causing part of
# the model to be reinitialized. Specifically, the model base is fine because it is initialized
# when the pretrained weights are added in, but the new dense classifier will not be marked as
# initialized by Keras. The non-resume branch will initialize any variables not initialized by
# Keras yet, and thus will avoid this issue. It could be possible to use
# `tf.keras.backend.manual_variable_initialization(True)` and then manually initialize
# all variables, but this would cause any pretrained weights to be removed. Instead, we should
# initialize all variables first with the equivalent of the logic in
# `tf.keras.backend.get_session()`, and then call resume.
# NOTE: the global variables initializer will erase the pretrained weights, so we instead only
# initialize the other variables
# NOTE: reproduced from the old tf.keras.backend._initialize_variables() function
# EDIT: this was updated in the master branch in commit
# https://github.com/fchollet/keras/commit/9166733c3c144739868fe0c30d57b861b4947b44
# TODO: given the change in master, reevaluate whether or not this is actually necessary anymore
variables = tf.global_variables()
uninitialized_variables = []
for v in variables:
if not hasattr(v, '_keras_initialized') or not v._keras_initialized:
uninitialized_variables.append(v)
v._keras_initialized = True
global_init_op = tf.variables_initializer(uninitialized_variables)
local_init_op = tf.local_variables_initializer()
sess.run([global_init_op, local_init_op])
# training
def train(train_path, val_path, exp_path, model_name, model_weights, patch_size, train_batch_size,
val_batch_size, clf_epochs, finetune_epochs, clf_lr, finetune_lr, finetune_momentum,
finetune_layers, l2, reg_biases, reg_final, augmentation, marginalization, oversampling,
num_gpus, threads, prefetch_batches, log_interval, checkpoint, resume, seed):
"""Train a model.
Args:
train_path: String path to the generated training image patches.
This should contain folders for each class.
val_path: String path to the generated validation image patches.
This should contain folders for each class.
exp_path: String path in which to store the model checkpoints, logs,
etc. for this experiment
model_name: String indicating the model to use.
model_weights: Optional string path to an HDF5 file containing the
initial weights of the model. If None, then pretrained imagenet
weights will be used.
patch_size: Integer length to which the square patches will be
resized.
train_batch_size: Integer training batch size.
val_batch_size: Integer validation batch size.
clf_epochs: Integer number of epochs for which to training the new
classifier layers.
finetune_epochs: Integer number of epochs for which to fine-tune the
model.
clf_lr: Float learning rate for training the new classifier layers.
finetune_lr: Float learning rate for fine-tuning the model.
finetune_momentum: Float momentum rate for fine-tuning the model.
finetune_layers: Integer number of layers at the end of the
pretrained portion of the model to fine-tune. The new classifier
layers will still be trained during fine-tuning as well.
l2: Float L2 global regularization value.
reg_biases: Boolean for whether or not to regularize biases.
reg_final: Boolean for whether or not to regularize the final
logits-producing layer.
augmentation: Boolean for whether or not to apply random
augmentation to the images.
marginalization: Boolean for whether or not to use noise
marginalization when evaluating the validation set. If True, then
each image in the validation set will be expanded to a batch of
augmented versions of that image, and predicted probabilities for
each batch will be averaged to yield a single noise-marginalized
prediction for each image. Note: if this is True, then
`val_batch_size` must be divisible by 4, or equal to 1 for a
special debugging case of no augmentation.
oversampling: Boolean for whether or not to oversample the minority
mitosis class via class-aware sampling.
num_gpus: Integer number of GPUs to use for data parallelism.
threads: Integer number of threads for dataset buffering.
prefetch_batches: Integer number of batches to prefetch.
log_interval: Integer number of steps between logging during
training.
checkpoint: Boolean flag for whether or not to save a checkpoint
after each epoch.
resume: Boolean flag for whether or not to resume training from a
checkpoint.
seed: Integer random seed.
"""
# TODO: break this out into:
# * data gen func
# * inference func
# * loss func
# * metrics func
# * logging func
# * train func
# set random seed
# NOTE: At the moment, this is faily useless because if the augmentation ops are seeded, they will
# be evaluated in the exact same deterministic manner on every epoch, which is not desired.
# Additionally, the multithreading needed to process the data will cause non-deterministic
# results. The one benefit is that the classification layers will be created deterministically.
np.random.seed(seed)
tf.set_random_seed(seed)
# create session, force tf.Keras to use it
config = tf.ConfigProto(allow_soft_placement=True)#, log_device_placement=True)
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
# debugger
#from tensorflow.python import debug as tf_debug
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# data
with tf.name_scope("data"):
# NOTE: seed issues to be fixed in tf
train_dataset = create_dataset(train_path, model_name, patch_size, train_batch_size, True,
augmentation, False, oversampling, threads, prefetch_batches) #, seed)
val_dataset = create_dataset(val_path, model_name, patch_size, val_batch_size, False,
False, marginalization, False, threads, prefetch_batches) #, seed)
# note that batch norm could be affected by very small final batches, but right now the fix,
# which requires this change as well, would also affect evaluation tasks, so we will wait to
# enable that (and this change too) until we move to tf Estimators
#output_shapes = (tf.TensorShape([None, patch_size, patch_size, 3]),
# tf.TensorShape([None]),
# tf.TensorShape([None]))
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
#output_shapes)
train_init_op = iterator.make_initializer(train_dataset)
val_init_op = iterator.make_initializer(val_dataset)
images, labels, filenames = iterator.get_next()
input_shape = (patch_size, patch_size, 3)
# models
with tf.name_scope("model"):
# replicate model on each GPU to allow for data parallelism
if num_gpus > 1:
with tf.device("/cpu:0"):
model_tower, model_base = create_model(model_name, input_shape, images)
#model_tower, model_base = create_model(model_name, input_shape, images)
model = multi_gpu_model(model_tower, num_gpus)
else:
model_tower, model_base = create_model(model_name, input_shape, images)
model = model_tower
if model_weights is not None:
model_tower.load_weights(model_weights)
# compute logits and predictions, possibly with marginalization
# NOTE: tf prefers to feed logits into a combined sigmoid and logistic loss function for
# numerical stability
if marginalization:
logits = marginalize(model.output) # will marginalize at test time
labels = tf.cond(tf.keras.backend.learning_phase(), lambda: labels, lambda: labels[0:1])
else:
logits = model.output
# for Bernoulli-derived loss
probs = tf.nn.sigmoid(logits, name="probs")
preds = tf.round(probs, name="preds") # implicit threshold at 0.5
# for Multinomial-derived loss
#probs = tf.nn.softmax(logits, name="probs") # possible improved numerical stability
#preds = tf.argmax(probs, axis=1, name="preds")
# loss
with tf.name_scope("loss"):
with tf.control_dependencies([tf.assert_equal(tf.shape(labels)[0], tf.shape(logits)[0])]):
data_loss = compute_data_loss(labels, logits)
reg_loss = compute_l2_reg_loss(
model_tower, include_frozen=True, reg_final=reg_final, reg_biases=reg_biases)
loss = data_loss + l2*reg_loss
# TODO: enable this and test it
# use l2 reg during training, but not during validation. Otherwise, more fine-tuning will
# lead to an apparent lower validation loss, even though it may just be due to more layers
# that can be adjusted in order to lower the regularization portion of the loss.
#loss = tf.cond(tf.keras.backend.learning_phase(), lambda: data_loss + l2*reg_loss, lambda: data_loss)
# optim
# TODO: extract this into a function with tests
with tf.name_scope("optim"):
global_step_op = tf.train.get_or_create_global_step()
global_epoch_op = tf.Variable(0, trainable=False, name="global_epoch", dtype=tf.int32)
global_epoch_increment_op = tf.assign_add(global_epoch_op, 1, name="global_epoch_increment")
# TODO: rework the `finetune_layers` param to include starting from the beg/end
# classifier
# - freeze all pre-trained model layers.
if model_base:
for layer in model_base.layers:
layer.trainable = False
var_list = model_tower.trainable_weights
else:
var_list = None # i.e., use all available variables if we are not using transfer learning
# add any weight regularization to the base loss for unfrozen layers:
clf_reg_loss = compute_l2_reg_loss(model_tower, reg_final=reg_final, reg_biases=reg_biases)
clf_loss = data_loss + l2*clf_reg_loss
clf_opt = tf.train.AdamOptimizer(clf_lr)
clf_grads_and_vars = clf_opt.compute_gradients(clf_loss, var_list=var_list)
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
clf_model_update_ops = model_tower.updates
with tf.control_dependencies(clf_model_update_ops):
clf_train_op = clf_opt.apply_gradients(clf_grads_and_vars, global_step=global_step_op)
# finetuning
# - unfreeze a portion of the pre-trained model layers.
# note, could make this arbitrary, but for now, fine-tune some number of layers at the *end* of
# the pretrained portion of the model
if model_base:
if finetune_layers != 0:
for layer in model_base.layers[-finetune_layers:]:
layer.trainable = True
var_list = model_tower.trainable_weights
else:
var_list = None # i.e., use all available variables if we are not using transfer learning
# add any weight regularization to the base loss for unfrozen layers:
finetune_reg_loss = compute_l2_reg_loss(model_tower, reg_final=reg_final, reg_biases=reg_biases)
finetune_loss = data_loss + l2*finetune_reg_loss
# TODO: enable this, or use `tf.train.piecewise_constant` with `global_epoch`
#lr = tf.train.exponential_decay(
# finetune_lr, global_step_op,
# decay_steps=decay_steps, decay_rate=decay_rate,
# staircase=True)
finetune_opt = tf.train.MomentumOptimizer(finetune_lr, finetune_momentum, use_nesterov=True)
finetune_grads_and_vars = finetune_opt.compute_gradients(finetune_loss, var_list=var_list)
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
finetune_model_update_ops = model_tower.updates
with tf.control_dependencies(finetune_model_update_ops):
finetune_train_op = finetune_opt.apply_gradients(
finetune_grads_and_vars, global_step=global_step_op)
# metrics
with tf.name_scope("metrics"):
num_thresholds = 11
mean_loss, acc, ppv, sens, f1, pr, f1s, metric_update_ops, metric_reset_ops = compute_metrics(
loss, labels, preds, probs, num_thresholds)
f1_max = tf.reduce_max(f1s)
thresh_max = pr.thresholds[tf.argmax(f1s)]
# tensorboard summaries
# TODO: extract this into a function
# NOTE: tensorflow is annoying when it comes to name scopes, so sometimes the name needs to be
# hardcoded as a prefix instead of a proper name scope if that name was used as a name scope
# earlier. otherwise, a numeric suffix will be appended to the name.
# general minibatch summaries
with tf.name_scope("summary"):
# data
actual_batch_size = tf.shape(images)[0]
percent_pos = tf.reduce_mean(labels) # positive labels are 1
pos_mask = tf.cast(labels, tf.bool)
neg_mask = tf.logical_not(pos_mask)
mitosis_images = tf.boolean_mask(images, pos_mask)
normal_images = tf.boolean_mask(images, neg_mask)
mitosis_filenames = tf.boolean_mask(filenames, pos_mask)
normal_filenames = tf.boolean_mask(filenames, neg_mask)
num_preds = tf.shape(preds)[0]
# false-positive & false-negative cases
pos_preds_mask = tf.cast(tf.squeeze(preds, axis=1), tf.bool)
#pos_preds_mask = tf.cast(preds, tf.bool)
neg_preds_mask = tf.logical_not(pos_preds_mask)
fp_mask = tf.logical_and(pos_preds_mask, neg_mask)
fn_mask = tf.logical_and(neg_preds_mask, pos_mask)
fp_images = tf.boolean_mask(images, fp_mask)
fn_images = tf.boolean_mask(images, fn_mask)
fp_filenames = tf.boolean_mask(filenames, fp_mask)
fn_filenames = tf.boolean_mask(filenames, fn_mask)
with tf.name_scope("images"):
tf.summary.image("mitosis", unnormalize(mitosis_images, model_name), 1,
collections=["minibatch", "minibatch_val"])
tf.summary.image("normal", unnormalize(normal_images, model_name), 1,
collections=["minibatch", "minibatch_val"])
tf.summary.image("false-positive", unnormalize(fp_images, model_name), 1,
collections=["minibatch", "minibatch_val"])
tf.summary.image("false-negative", unnormalize(fn_images, model_name), 1,
collections=["minibatch", "minibatch_val"])
with tf.name_scope("data/filenames"):
tf.summary.text("mitosis", mitosis_filenames, collections=["minibatch", "minibatch_val"])
tf.summary.text("normal", normal_filenames, collections=["minibatch", "minibatch_val"])
tf.summary.text("false-positive", fp_filenames, collections=["minibatch", "minibatch_val"])
tf.summary.text("false-negative", fn_filenames, collections=["minibatch", "minibatch_val"])
tf.summary.histogram("data/images", images, collections=["minibatch", "minibatch_val"])
tf.summary.histogram("data/labels", labels, collections=["minibatch", "minibatch_val"])
for layer in model_tower.layers:
for weight in layer.weights:
tf.summary.histogram(weight.name, weight, collections=["minibatch", "minibatch_val"])
if hasattr(layer, 'output'):
layer_name = "model/{}/out".format(layer.name)
tf.summary.histogram(layer_name, layer.output, collections=["minibatch", "minibatch_val"])
tf.summary.histogram("model/probs", probs, collections=["minibatch", "minibatch_val"])
tf.summary.histogram("model/preds", preds, collections=["minibatch", "minibatch_val"])
with tf.name_scope("minibatch"):
tf.summary.scalar("loss", loss, collections=["minibatch"])
tf.summary.scalar("batch_size", actual_batch_size, collections=["minibatch", "minibatch_val"])
tf.summary.scalar("num_preds", num_preds, collections=["minibatch", "minibatch_val"])
tf.summary.scalar("percent_positive", percent_pos, collections=["minibatch"])
tf.summary.scalar("learning_phase", tf.to_int32(tf.keras.backend.learning_phase()),
collections=["minibatch", "minibatch_val"])
# TODO: gradient histograms
# TODO: first layer convolution kernels as images
minibatch_summaries = tf.summary.merge_all("minibatch")
minibatch_val_summaries = tf.summary.merge_all("minibatch_val")
# epoch summaries
with tf.name_scope("epoch"):
tf.summary.scalar("loss", mean_loss, collections=["epoch"])
tf.summary.scalar("acc", acc, collections=["epoch"])
tf.summary.scalar("ppv", ppv, collections=["epoch"])
tf.summary.scalar("sens", sens, collections=["epoch"])
tf.summary.scalar("f1", f1, collections=["epoch"])
tf.summary.scalar("f1_max", f1_max, collections=["epoch"])
tf.summary.scalar("thresh_max", thresh_max, collections=["epoch"])
tb.summary.pr_curve_raw_data_op(
name='pr_curve',
true_positive_counts=pr.tp,
false_positive_counts=pr.fp,
true_negative_counts=pr.tn,
false_negative_counts=pr.fn,
precision=pr.precision,
recall=pr.recall,
num_thresholds=num_thresholds,
display_name='pr curve',
description="PR curve for {num_thresholds} thresholds.".format(num_thresholds=num_thresholds),
collections=["epoch"])
epoch_summaries = tf.summary.merge_all("epoch")
# use train and val writers so that plots can be on same graph
train_writer = tf.summary.FileWriter(os.path.join(exp_path, "train"), tf.get_default_graph())
val_writer = tf.summary.FileWriter(os.path.join(exp_path, "val"))
# save ops
checkpoint_filename = os.path.join(exp_path, "model.ckpt")
saver = tf.train.Saver()
# initialize stuff
initialize_variables(sess)
if resume:
saver.restore(sess, checkpoint_filename)
# TODO: extract this into a function with tests
# training loop for new classifier layers and fine-tuning
for train_op, epochs in [(clf_train_op, clf_epochs), (finetune_train_op, finetune_epochs)]:
global_epoch_start = sess.run(global_epoch_op)
for _ in range(global_epoch_start, global_epoch_start+epochs): # allow for resuming of training
global_epoch = sess.run(global_epoch_op)
# training
sess.run(train_init_op)
while True:
global_step = sess.run(global_step_op)
try:
if log_interval > 0 and global_step % log_interval == 0:
# train, update metrics, & log stuff
_, _, loss_val, summary_str = sess.run([train_op, metric_update_ops, loss,
minibatch_summaries], feed_dict={tf.keras.backend.learning_phase(): 1})
mean_loss_val, f1_val = sess.run([mean_loss, f1])
train_writer.add_summary(summary_str, global_step)
print("train", global_epoch, global_step, loss_val, mean_loss_val, f1_val)
else:
# train & update metrics
_, _ = sess.run(
[train_op, metric_update_ops], feed_dict={tf.keras.backend.learning_phase(): 1})
except tf.errors.OutOfRangeError:
break
# log average training metrics for epoch & reset
op_values = sess.run([f1, f1_max, thresh_max, ppv, sens, acc, mean_loss, epoch_summaries])
(f1_val, f1_max_val, thresh_max_val, ppv_val, sens_val, acc_val, mean_loss_val,
summary_str) = op_values
print("---epoch {global_epoch}, train f1 (@ 0.5): {f1_val}, train max f1 "\
"(@ {thresh_max_val}): {f1_max_val}, train ppv: {ppv_val}, train sens: {sens_val}, "\
"train acc: {acc_val}, train avg loss: {mean_loss_val}"\
.format(global_epoch=global_epoch, f1_val=f1_val, acc_val=acc_val,
mean_loss_val=mean_loss_val, thresh_max_val=thresh_max_val,
f1_max_val=f1_max_val, ppv_val=ppv_val, sens_val=sens_val))
train_writer.add_summary(summary_str, global_epoch)
sess.run(metric_reset_ops)
# validation
sess.run(val_init_op)
vi = 0 # validation step
while True:
try:
# evaluate & update metrics
if log_interval > 0 and vi % log_interval == 0:
_, loss_val, summary_str = sess.run(
[metric_update_ops, loss, minibatch_val_summaries],
feed_dict={tf.keras.backend.learning_phase(): 0})
mean_loss_val, f1_val = sess.run([mean_loss, f1])
print("val", global_epoch, vi, loss_val, mean_loss_val, f1_val)
val_writer.add_summary(summary_str, vi)
else:
_ = sess.run(metric_update_ops, feed_dict={tf.keras.backend.learning_phase(): 0})
vi += 1
except tf.errors.OutOfRangeError:
break
# log average validation metrics for epoch & reset
op_values = sess.run([f1, f1_max, thresh_max, ppv, sens, acc, mean_loss, epoch_summaries])
(f1_val, f1_max_val, thresh_max_val, ppv_val, sens_val, acc_val, mean_loss_val,
summary_str) = op_values
print("---epoch {global_epoch}, val f1 (@ 0.5): {f1_val}, val max f1 (@ {thresh_max_val}): "\
"{f1_max_val}, val ppv: {ppv_val}, val sens: {sens_val}, val acc: {acc_val}, "\
"val avg loss: {mean_loss_val}"\
.format(global_epoch=global_epoch, f1_val=f1_val,
thresh_max_val=thresh_max_val, f1_max_val=f1_max_val, ppv_val=ppv_val,
sens_val=sens_val, acc_val=acc_val, mean_loss_val=mean_loss_val))
val_writer.add_summary(summary_str, global_epoch)
sess.run(metric_reset_ops)
sess.run(global_epoch_increment_op) # global_epoch += 1
# save model
if checkpoint:
keras_filename = os.path.join(exp_path, "checkpoints",
"{f1_max_val:.5}_f1max_{f1_val:.5}_f1_{mean_loss_val:.5}_loss_{global_epoch}_"\
"epoch_model.hdf5"\
.format(f1_max_val=f1_max_val, f1_val=f1_val, mean_loss_val=mean_loss_val,
global_epoch=global_epoch))
model_tower.save(keras_filename, include_optimizer=False) # keras model
saver.save(sess, checkpoint_filename) # full TF graph
print("Saved model file to {}".format(keras_filename))
val_writer.flush()
#train_writer.flush()
def main(argv=None):
"""Command line interface for this script. Can optionally pass in a
list of strings to simulate command line usage.
"""
# parse args
parser = argparse.ArgumentParser()
parser.add_argument("--patches_path", default=os.path.join("data", "mitoses", "patches"),
help="path to the generated image patches containing `train` & `val` folders "\
"(default: %(default)s)")
parser.add_argument("--exp_parent_path", default=os.path.join("experiments", "mitoses"),
help="parent path in which to store experiment folders (default: %(default)s)")
parser.add_argument("--exp_name", default=None,
help="path within the experiment parent path in which to store the model checkpoints, "\
"logs, etc. for this experiment; an existing path can be used to resume training "\
"(default: %%y-%%m-%%d_%%H:%%M:%%S_{model})")
parser.add_argument("--exp_name_suffix", default=None,
help="suffix to add to experiment name (default: all parameters concatenated together)")
parser.add_argument("--exp_full_path", default=None,
help="full path in which to store the experiment. either this or the --exp_parent_path, "\
"--exp_name, --exp_name_suffix flags as a group can be used. typically, this would "\
"be used to resume an existing experiment (default: %(default)s)")
parser.add_argument("--model", default="vgg",
choices=["logreg", "vgg", "vgg_new", "vgg19", "resnet", "resnet_new", "resnet_custom"],
help="name of the model to use in ['logreg', 'vgg', 'vgg_new', 'vgg19', 'resnet', "\
"'resnet_new', 'resnet_custom'] (default: %(default)s)")
parser.add_argument("--model_weights", default=None,
help="optional hdf5 file containing the initial weights of the model. if not supplied, the "\
"model will start with pretrained weights from imagenet. (default: %(default)s)")
parser.add_argument("--patch_size", type=int, default=64,
help="integer length to which the square patches will be resized (default: %(default)s)")
parser.add_argument("--train_batch_size", type=int, default=32,
help="training batch size (default: %(default)s)")
parser.add_argument("--val_batch_size", type=int, default=32,
help="validation batch size (default: %(default)s)")
parser.add_argument("--clf_epochs", type=int, default=1,
help="number of epochs for which to train the new classifier layers "\
"(default: %(default)s)")
parser.add_argument("--finetune_epochs", type=int, default=0,
help="number of epochs for which to fine-tune the unfrozen layers (default: %(default)s)")
parser.add_argument("--clf_lr", type=float, default=1e-3,
help="learning rate for training the new classifier layers (default: %(default)s)")
parser.add_argument("--finetune_lr", type=float, default=1e-4,
help="learning rate for fine-tuning the unfrozen layers (default: %(default)s)")
parser.add_argument("--finetune_momentum", type=float, default=0.9,
help="momentum rate for fine-tuning the unfrozen layers (default: %(default)s)")
parser.add_argument("--finetune_layers", type=int, default=0,
help="number of layers at the end of the pretrained portion of the model to fine-tune "\
"(note: the new classifier layers will still be trained during fine-tuning as well) "\
"(default: %(default)s)")
parser.add_argument("--l2", type=float, default=1e-3,
help="amount of l2 weight regularization (default: %(default)s)")
parser.add_argument("--reg_biases", default=False, action="store_true",
help="whether or not to regularize biases. (default: %(default)s)")
parser.add_argument("--skip_reg_final", dest="reg_final", action="store_false",
help="whether or not to skip regularization of the logits-producing layer "\
"(default: %(default)s)")
parser.set_defaults(reg_final=True)
augment_parser = parser.add_mutually_exclusive_group(required=False)
augment_parser.add_argument("--augment", dest="augment", action="store_true",
help="apply random augmentation to the training images (default: True)")
augment_parser.add_argument("--no_augment", dest="augment", action="store_false",
help="do not apply random augmentation to the training images (default: False)")
parser.set_defaults(augment=True)
parser.add_argument("--marginalize", default=False, action="store_true",
help="use noise marginalization when evaluating the validation set. if this is set, then "\
"the validation batch_size must be divisible by 4, or equal to 1 for no augmentation "\
"(default: %(default)s)")
parser.add_argument("--oversample", default=False, action="store_true",
help="oversample the minority mitosis class during training via class-aware sampling "\
"(default: %(default)s)")
parser.add_argument("--num_gpus", type=int, default=1,
help="num_gpus: Integer number of GPUs to use for data parallelism. (default: %(default)s)")
parser.add_argument("--threads", type=int, default=5,
help="number of threads for dataset parallel processing; note: this will cause "\
"non-reproducibility (default: %(default)s)")
# TODO: update this to default to `None` to take advantage of auto prefetch buffer size tuning
# https://github.com/tensorflow/tensorflow/commit/d355f4e2644b68ea643f573c564936ec23b93787
parser.add_argument("--prefetch_batches", type=int, default=100,
help="number of batches to prefetch (default: %(default)s)")
parser.add_argument("--log_interval", type=int, default=100,
help="number of steps between logging during training (default: %(default)s)")
checkpoint_parser = parser.add_mutually_exclusive_group(required=False)
checkpoint_parser.add_argument("--checkpoint", dest="checkpoint", action="store_true",
help="save a checkpoint after each epoch (default: True)")
checkpoint_parser.add_argument("--no_checkpoint", dest="checkpoint", action="store_false",
help="do not save a checkpoint after each epoch (default: False)")
parser.set_defaults(checkpoint=True)
parser.add_argument("--resume", default=False, action="store_true",
help="resume training from a checkpoint (default: %(default)s)")
parser.add_argument("--seed", type=int, help="random seed (default: %(default)s)")
args = parser.parse_args(argv)
# set train/val paths
train_path = os.path.join(args.patches_path, "train")
val_path = os.path.join(args.patches_path, "val")
if args.exp_full_path is None:
if args.exp_name is None:
date = datetime.strftime(datetime.today(), "%y%m%d_%H%M%S")
args.exp_name = date + "_" + args.model
if args.exp_name_suffix is None:
args.exp_name_suffix = "patch_size_{args.patch_size}_batch_size_{args.train_batch_size}_"\
"clf_epochs_{args.clf_epochs}_ft_epochs_{args.finetune_epochs}_"\
"clf_lr_{args.clf_lr}_ft_lr_{args.finetune_lr}_"\
"ft_mom_{args.finetune_momentum}_ft_layers_{args.finetune_layers}_"\
"l2_{args.l2}_rb_{args.reg_biases}_aug_{args.augment}_"\
"marg_{args.marginalize}_over_{args.oversample}".format(args=args)
full_exp_name = args.exp_name + "_" + args.exp_name_suffix
args.exp_full_path = os.path.join(args.exp_parent_path, full_exp_name)
# make an experiment folder
if not os.path.exists(args.exp_full_path):
os.makedirs(os.path.join(args.exp_full_path, "checkpoints"))
print("experiment directory: {}".format(args.exp_full_path))
# create a random seed if needed
if args.seed is None:
args.seed = np.random.randint(1e9)
# save args to a file in the experiment folder, appending if it exists
with open(os.path.join(args.exp_full_path, 'args.txt'), 'a') as f:
json.dump(args.__dict__, f)
print("", file=f)
# can be read in later with
#with open('args.txt', 'r') as f:
# args = json.load(f)
# save command line invocation to txt file for ease of rerunning the exact experiment
with open(os.path.join(args.exp_full_path, 'invoke.txt'), 'a') as f:
# NOTE: since we sometimes call this `main` function via the hyperparam search script, we can't
# always use `sys.argv` because it would always refer to the outer invocation, i.e., the
# invocation of the hyperparam search script.
if argv is not None: # called from hpo script
fname = os.path.basename(__file__)
f.write("python3 {fname} ".format(fname=fname) + " ".join(argv) + "\n")
else: # called directly
f.write("python3 " + " ".join(sys.argv) + "\n")
# copy this script to the experiment folder
shutil.copy2(os.path.realpath(__file__), args.exp_full_path)
# train!
train(train_path=train_path, val_path=val_path, exp_path=args.exp_full_path,
model_name=args.model, model_weights=args.model_weights, patch_size=args.patch_size,
train_batch_size=args.train_batch_size, val_batch_size=args.val_batch_size,
clf_epochs=args.clf_epochs, finetune_epochs=args.finetune_epochs, clf_lr=args.clf_lr,
finetune_lr=args.finetune_lr, finetune_momentum=args.finetune_momentum,
finetune_layers=args.finetune_layers, l2=args.l2, reg_biases=args.reg_biases,
reg_final=args.reg_final, augmentation=args.augment, marginalization=args.marginalize,
oversampling=args.oversample, num_gpus=args.num_gpus, threads=args.threads,
prefetch_batches=args.prefetch_batches, log_interval=args.log_interval,
checkpoint=args.checkpoint, resume=args.resume, seed=args.seed)
if __name__ == "__main__":
main()
# ---
# tests
# TODO: eventually move these to a separate file.
# `py.test train_mitoses.py`
# TODO: use this fixture when we move these tests to a test module
#import pytest
#
#@pytest.fixture(autouse=True)
#def reset():
# """Ensure that the TensorFlow graph/session are clean."""
# tf.reset_default_graph()
# tf.keras.backend.clear_session()
# yield # run test
def reset():
"""Ensure that the TensorFlow graph/session are clean."""
tf.reset_default_graph()
tf.keras.backend.clear_session()
# data
def test_get_image(tmpdir):
# NOTE: pytest will provide a temp directory automatically:
# https://docs.pytest.org/en/latest/tmpdir.html
from PIL import Image
reset()
# create png image
filename = os.path.join(str(tmpdir), "x.png")
x = np.random.randint(0, 255, dtype=np.uint8, size=(64,64,3))
Image.fromarray(x).save(filename)
image_op = get_image(filename, 64)
sess = tf.keras.backend.get_session()
image = sess.run(image_op)
assert image.shape == (64, 64, 3)
assert image.dtype == np.float32
assert np.min(image) >= 0
assert np.max(image) < 1
assert np.allclose(x.astype(np.float32) / 255, image)
assert np.allclose((x / 255).astype(np.float32), image)
def test_get_label():
import pytest
# mitosis
reset()
filename = "train/mitosis/1_03_05_713_348.jpg"
label_op = get_label(filename)
sess = tf.keras.backend.get_session()
label = sess.run(label_op)
assert label == 1
# normal
reset()
filename = "train/normal/1_03_05_713_348.jpg"
label_op = get_label(filename)
sess = tf.keras.backend.get_session()
label = sess.run(label_op)
assert label == 0
# wrong label name
with pytest.raises(tf.errors.InvalidArgumentError):
reset()
filename = "train/unknown/1_03_05_713_348.jpg"
label_op = get_label(filename)
sess = tf.keras.backend.get_session()
label = sess.run(label_op)
def test_preprocess(tmpdir):
# NOTE: pytest will provide a temp directory automatically:
# https://docs.pytest.org/en/latest/tmpdir.html
from PIL import Image
reset()
# create png image
folder = os.path.join(str(tmpdir), "this/train/mitosis")
os.makedirs(folder)
filename_orig = os.path.join(folder, "x.png")
x = np.random.randint(0, 255, dtype=np.uint8, size=(64,64,3))
Image.fromarray(x).save(filename_orig)
image_op, label_op, filename_op = preprocess(tf.constant(filename_orig), 64)
sess = tf.keras.backend.get_session()
image, label, filename = sess.run([image_op, label_op, filename_op])
assert image.shape == (64, 64, 3)
assert image.dtype == np.float32
assert np.min(image) >= 0
assert np.max(image) < 1
assert label == 1.0
assert filename.decode("utf-8") == filename_orig
def test_normalize_unnormalize():
reset()
sess = tf.keras.backend.get_session()
input_shape = (64, 64, 3)
x_np = np.random.rand(*input_shape).astype(np.float32) # uniform sampling in [0, 1)
x_batch_np = np.random.rand(2, *input_shape).astype(np.float32) # uniform sampling in [0, 1)
# imagenet preprocessing
model_name = "vgg"
means = np.array([103.939, 116.779, 123.68]).astype(np.float32)
x_norm_correct_np = x_np[..., ::-1] * 255 - means
x_batch_norm_correct_np = x_batch_np[..., ::-1] * 255 - means
assert x_np.dtype == np.float32
assert x_np.dtype == x_batch_np.dtype == x_norm_correct_np.dtype == x_batch_norm_correct_np.dtype
# single example
def test(x_norm, x_unnorm):
"""Test normalized and unnormalized versions of x."""
# NOTE: closes over x_np & x_norm_correct_np
assert x_norm.dtype == x_norm_correct_np.dtype
assert x_unnorm.dtype == x_np.dtype
assert | np.allclose(x_norm, x_norm_correct_np) | numpy.allclose |
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../LassoVariants/AlternateLasso')
# sys.path.append('../../LassoVariants/AlternateLasso')
import pickle
import numpy as np
from AlternateLinearModel import AlternateLogisticLasso
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import pandas as pd
from copy import deepcopy
from sklearn.metrics import roc_auc_score
from multiprocessing import Pool
SEED = 42
def apply_alternate_LL(X, y, feature_names=[], rho=0.1, tol=1e-4, save='test.npy'):
mdl = AlternateLogisticLasso(rho=rho, tol=tol, verbose=True, save=save)
mdl = mdl.fit(X, y, feature_names)
#print(mdl.a_, mdl.lasso_.coef_)
return mdl
def parse_mdl_predictor(mdl):
if len(mdl.alternates_) == 0:
for i, coef in enumerate(mdl.a_):
yield i, i, 0, 0, coef
else:
for i, best_features in enumerate(mdl.alternates_):
fname, coef = best_features[0], best_features[1]
yield fname, i, 0, 0, coef
for j, semi in enumerate(best_features[2]):
sname, sscore, scoef = semi[0], semi[1], semi[2]
yield sname, i, j+1, sscore-mdl.obj_, scoef
yield 'intercept', -1, 0, 0, mdl.b_
METHODS = ['la', 'logistic', 'rf', 'svm']
# METHODS = ['la', 'logistic', 'rf']
def get_classifier(classifier, kwargs={}):
mdl = None
if classifier == 'la' or classifier == 'laobest':
mdl = AlternateLogisticLasso(**kwargs)
if classifier == 'logistic':
mdl = LogisticRegression(penalty='l1', solver='liblinear')
elif classifier == 'rf':
mdl = RandomForestClassifier(n_estimators=100)
elif classifier == 'svm':
mdl = SVC(kernel='rbf', degree=3, probability=True, random_state=SEED, gamma='auto')
else:
pass
return mdl
def make_other_clfs_each(X, c, clusters, classifier, output):
y = np.array([1 if cl == c else 0 for cl in clusters])
mdl = get_classifier(classifier)
mdl = mdl.fit(X, y)
print('Best auc', output, c, classifier, roc_auc_score(y, mdl.predict_proba(X)[:,1]))
return (c, deepcopy(mdl))
def make_other_clfs_parallel(X, clusters, output, classifier='logistic', cores=1): # logistic, svm, random forest
global SEED
pool = Pool(processes=cores)
print(cores, classifier)
# X = X[0:100,:]
# clusters = clusters[0:100]
cluster_list = sorted(list(set(clusters)))
for i in range(0, len(cluster_list), cores):
print(str(i)+'-'+str(min(len(cluster_list), i+cores))+' clusters')
sys.stdout.flush()
result = pool.starmap_async(make_other_clfs_each, [(deepcopy(X), c, clusters, classifier, output) for c in cluster_list[i:min(len(cluster_list), i+cores)]])
clfs = {}
for (c, mdl) in result.get():
clfs[c] = mdl
with open(output+'_'+classifier+'.npy', 'wb') as f:
pickle.dump(clfs, f)
pool.close()
pool.join()
del pool
def make_other_clfs(X, clusters, output, classifier='logistic', cores=1, saved=False): # logistic, svm, random forest
global SEED
if cores > 1:
make_other_clfs_parallel(X, clusters, output, classifier, cores)
return
clfs = {}
ofname = output+'_'+classifier+'.npy'
if (not saved) or (not os.path.exists(ofname)):
for c in set(clusters):
y = np.array([1 if cl == c else 0 for cl in clusters])
mdl = get_classifier(classifier)
mdl = mdl.fit(X, y)
print('Best auc', output, c, classifier, roc_auc_score(y, mdl.predict_proba(X)[:,1]))
clfs[c] = deepcopy(mdl)
with open(ofname, 'wb') as f:
pickle.dump(clfs, f)
def read_other_clfs(output, classifier='logistic'):
if os.path.exists(output+'_'+classifier+'.npy'):
with open(output+'_'+classifier+'.npy', 'rb') as f:
return pickle.load(f)
return None
def read_lasso_clf(header):
if os.path.exists(header+'.npy'):
with open(header+'.npy', 'rb') as f:
return pickle.load(f)
return None
def make_lasso_matrix_each(X, c, clusters, feature_names):
print('make_lasso_each', c, X.shape)
kwargs = {'rho':1e-5, 'tol':1e-4, 'check':0.90, 'max_alternates':-1, 'maxitr':1000}
mdl = get_classifier('la', kwargs)
y = np.array([True if cl == c else False for cl in clusters])
mdl = mdl.fit(X, y, featurename=feature_names)
# mdl.predict(X)
temp = pd.DataFrame([list(x) for x in parse_mdl_predictor(mdl)], columns=['index', 'dimension', 'order', 'score', 'coef'])
temp = temp.assign(gene=[feature_names[int(x)] if x != 'intercept' else x for x in temp.loc[:,'index']])
temp = temp.assign(celltype=c)
print(c, mdl, temp)
return (c, deepcopy(mdl), temp)
def make_lasso_matrix_parallel(X, clusters, header, feature_names=[], cores=1):
print(cores, 'la')
print(X.shape, clusters[0:10], len(set(clusters)))
pmatrix = None
clfs = {}
pool = Pool(processes=cores)
cluster_list = sorted(list(set(clusters)))
for i in range(0, len(cluster_list), cores):
print(str(i)+'-'+str(min(len(cluster_list), i+cores))+' clusters')
sys.stdout.flush()
result = pool.starmap_async(make_lasso_matrix_each, [(deepcopy(X), c, clusters, feature_names) for c in cluster_list[i:min(len(cluster_list), i+cores)]])
for (c, mdl, temp) in result.get():
if pmatrix is None:
pmatrix = temp
else:
pmatrix = pd.concat([pmatrix, temp], axis=0, ignore_index=True)
clfs[c] = mdl
with open(header+'.npy', 'wb') as f:
pickle.dump(clfs, f)
if pmatrix is not None:
pmatrix.to_csv(header+'.csv')
pool.close()
pool.join()
del pool
def make_lasso_matrix(X, clusters, header, feature_names=[], cores=1, saved=False):
if cores > 1:
make_lasso_matrix_parallel(X, clusters, header, feature_names, cores)
return
pmatrix = None
clfs = {}
kwargs = {'rho':1e-5, 'tol':1e-4, 'check':0.90, 'max_alternates':-1, 'maxitr':1000}
ofname = header+'.npy'
if (not saved) or (not os.path.exists(ofname)):
for c in sorted(list(set(clusters))):
mdl = get_classifier('la', kwargs)
y = np.array([True if cl == c else False for cl in clusters])
mdl = mdl.fit(X, y, featurename=feature_names)
mdl.predict(X)
# mdl = apply_alternate_LL(X, y, feature_names, save='')
temp = pd.DataFrame([list(x) for x in parse_mdl_predictor(mdl)], columns=['index', 'dimension', 'order', 'score', 'coef'])
temp = temp.assign(gene=[feature_names[int(x)] if x != 'intercept' else x for x in temp.loc[:,'index']])
temp = temp.assign(celltype=c)
if pmatrix is None:
pmatrix = temp
else:
pmatrix = pd.concat([pmatrix, temp], axis=0, ignore_index=True)
clfs[c] = deepcopy(mdl)
with open(ofname, 'wb') as f:
pickle.dump(clfs, f)
if pmatrix is not None:
pmatrix.to_csv(header+'.csv')
if __name__ == "__main__":
seed = 0
num = 1000
dim = 2
dim_extra = 2
np.random.seed(seed)
X = np.random.randn(num, dim + dim_extra)
for i in range(dim_extra):
X[:, dim + i] = X[:, 0] + 0.5 * | np.random.randn(num) | numpy.random.randn |
import numpy as np
import scipy.optimize
import scipy.linalg
from scipy.stats import unitary_group as UG
###############
### Matrix Operations
###############
def dag(A):
return 1.*np.conjugate(np.transpose(A))
def dot(A,B):
return np.trace(dag(A)@B)
def norm(A):
return np.sqrt(np.abs(dot(A,A)))
def kprod(A,B):
return np.kron(A,B)
def ksum(A,B):
return np.kron(A,one) + np.kron(one,B)
def eig(A):
vals, vecs = np.linalg.eigh(A)
vecs = np.transpose(vecs) ## so vecs[0] is an eigenvector
return 1.*vals, 1.*vecs
def couter(psi):
return 1.*np.outer(psi, np.conjugate(psi))
###############
### Multipartite Matrix Operations
###############
## basis dictionaries
d22 = {'00':0, '01':1, '10':2, '11':3}
d23 = {'00':0, '01':1, '02':2, '10':3, '11':4, '12':5}
d33 = {'00':0, '01':1, '02':2, '10':3, '11':4, '12':5, '20':6, '21':7, '22':8}
d222 = {'000':0, '001':1, '010':2, '011':3, '100':4, '101':5, '110':6, '111':7}
d223 = {'000':0, '001':1, '002':2, '010':3, '011':4, '012':5, '100':6, '101':7, '102':8, '110':9, '111':10, '112':11}
d2222 = {format(i,'04b'):i for i in range(16)}
dxx = {'22':d22, '23':d23, '222':d222, '223':d223, '33':d33, '2222':d2222}
## dictionary lookup from n=(nA,nB,...)
def ndict(n):
return dxx[''.join([str(nn) for nn in n])]
## generate list of basis index labels for system of type n=(nA,nB,...), possibly holding some index values fixed
## sums over all index values which are 'x' in the hold argument
## "mind" = "multipartite indices"
def mind(n=[2,2], hold='xxxxx'):
ss = [''.join([str(i) for i in range(nn)]) for nn in n]
for i in range(len(n)):
if not hold[i] == 'x':
ss[i] = hold[i]
ijk= [x for x in ss[0]]
for s in ss[1:]:
ijk = [x+y for x in ijk for y in s]
return tuple(ijk)
## "rind" = "rho indices"
def rind(n=[2,2], hold='xxxxx'):
dd = ndict(n)
return np.array([dd[idx] for idx in mind(n,hold)], dtype=int)
## compute reduced density matrices given n=(nA,nB,...) and rho
def REDUCE(rho, n):
## check dims match
if len(rho)==np.prod(n):
if len(n)==1:
red = [1.*rho]
if len(n)>1:
red = [np.zeros((nn,nn), dtype=complex) for nn in n]
## iterate over subspaces
for m in range(len(n)):
## iterate over reduced density matrix elements
for i in range(n[m]):
for j in range(n[m]):
## indices to sum over
hold = len(n)*'x'
hold = hold[:m]+str(i)+hold[m+1:]
mi, ri = mind(n,hold), rind(n,hold)
mj, rj = mind(n,hold.replace(str(i),str(j))), rind(n,hold.replace(str(i),str(j)))
## fill rho
red[m][i,j] = np.sum([rho[ri[k],rj[k]] for k in range(len(ri))], axis=0, dtype=complex)
## return
return tuple([1.*rr for rr in red])
###############
### Generate Arbitrary 2 or 3 dimensional rank-1 coarse-graining
###############
## function used in parameterizing SU3
def FF(v,w,x,y,z):
v,w,x,y,z = 1.*v,1.*w,1.*x,1.*y,1.*z
return -np.cos(v)*np.cos(w)*np.cos(x)*np.exp(1j*y) - np.sin(w)*np.sin(x)*np.exp(-1j*z)
## arbitrary SU matrix parameterized by real vector x in (0,1)
def SU(x):
if len(x)==3:
return SU2(x)
if len(x)==8:
return SU3(x)
## SU2 parameterized by real vector x
def SU2(x=np.zeros(3)):
## identity is at x = np.array([0.,0.,0.])
## periodic as each x=x+1, so use x in (0,1)
th = 2.*np.pi*x
su = np.array(
[
[ np.cos(th[0])*np.exp( 1j*th[1]), np.sin(th[0])*np.exp( 1j*th[2])],
[-np.sin(th[0])*np.exp(-1j*th[2]), np.cos(th[0])*np.exp(-1j*th[1])],
], dtype=complex)
return 1.*su
## SU3 parameterized by real vector x
def SU3(x=np.array([.5,.5,.5,0.,0.,0.,0.,0.])):
## https://arxiv.org/abs/1303.5904
## identity is at x = np.array([.5,.5,.5,0.,0.,0.,0.,0.])
## periodic as each x=x+1, so use x in (0,1)
x = 2.*np.pi*x
pi = np.pi
ph31, th31, th32, ph32, ch32, th21, ch21, ph21 = 1.*x
su = np.zeros((3,3), dtype=complex)
## top row
su[0,0] = FF(th31,0,0,ph31+pi,0)
su[0,1] = FF(th31-pi/2.,th32,pi,ph32,0)
su[0,2] = FF(th31-pi/2.,th32-pi/2.,pi,ch32,0)
## middle row
su[1,0] = FF(th31-pi/2.,pi,th21,ph21,0)
su[1,1] = FF(th31,th32,th21,-ph31+ph32+ph21,ch32+ch21)
su[1,2] = FF(th31,th32-pi/2.,th21,-ph31+ch32+ph21,ph32+ch21)
## bottom row
su[2,0] = FF(th31-pi/2.,pi,th21-pi/2.,ch21,0)
su[2,1] = FF(th31,th32,th21-pi/2,-ph31+ph32+ch21,ch32+ph21)
su[2,2] = FF(th31,th32-pi/2,th21-pi/2,-ph31+ch32+ch21,ph32+ph21)
## return
return 1.*su
## make a set of projectors from the columns of a unitary matrix
def PROJ(U):
proj = np.array([couter(U[i]) for i in range(len(U))], dtype=complex)
return 1.*proj
## combine two sets of projectors by tensor product
def PROJPROD(PA,PB):
return 1.*np.array([kprod(pa,pb) for pa in PA for pb in PB], dtype=complex)
## combine a list of sets of projectors by tensor product
def PROJMP(PX):
proj = PX[0]
for j in range(1,len(PX)):
proj = PROJPROD(proj,PX[j])
return 1.*proj
## product projectors parameterized by real vector x
## n=(nA,nB,...) dictates how dimensions split into product
def PROJN(x=np.zeros(6), n=[2,2], factors_out=False):
n = np.array(n, dtype=int)
if len(x)== | np.sum(n**2-1) | numpy.sum |
# Copyright (c) 2015, <NAME>
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
from . import Inertial, WaveformModes, SpinWeights, DataNames
from . import h, hdot, sigma, news, psi0, psi1, psi2, psi3, psi4
from .waveform_base import WaveformBase, waveform_alterations
import sys
import warnings
import pprint
import numbers
import math
import numpy as np
from scipy import interpolate
import quaternion
import spherical_functions as sf
import spinsfast
def process_transformation_kwargs(ell_max, **kwargs):
# Build the supertranslation and spacetime_translation arrays
supertranslation = np.zeros((4,), dtype=complex) # For now; may be resized below
ell_max_supertranslation = 1 # For now; may be increased below
if "supertranslation" in kwargs:
supertranslation = np.array(kwargs.pop("supertranslation"), dtype=complex)
if supertranslation.dtype != "complex" and supertranslation.size > 0:
# I don't actually think this can ever happen...
raise TypeError(
"\nInput argument `supertranslation` should be a complex array with size>0.\n"
"Got a {} array of shape {}.".format(supertranslation.dtype, supertranslation.shape)
)
# Make sure the array has size at least 4, by padding with zeros
if supertranslation.size <= 4:
supertranslation = np.lib.pad(
supertranslation, (0, 4 - supertranslation.size), "constant", constant_values=(0.0,)
)
# Check that the shape is a possible array of scalar modes with complete (ell,m) data
ell_max_supertranslation = int(np.sqrt(len(supertranslation))) - 1
if (ell_max_supertranslation + 1) ** 2 != len(supertranslation):
raise ValueError(
"\nInput supertranslation parameter must contain modes from ell=0 up to some ell_max, "
"including\nall relevant m modes in standard order (see `spherical_functions` "
"documentation for details).\nThus, it must be an array with length given by a "
"perfect square; its length is {}".format(len(supertranslation))
)
# Check that the resulting supertranslation will be real
for ell in range(ell_max_supertranslation + 1):
for m in range(ell + 1):
i_pos = sf.LM_index(ell, m, 0)
i_neg = sf.LM_index(ell, -m, 0)
a = supertranslation[i_pos]
b = supertranslation[i_neg]
if abs(a - (-1.0) ** m * b.conjugate()) > 3e-16 + 1e-15 * abs(b):
raise ValueError(
f"\nsupertranslation[{i_pos}]={a} # (ell,m)=({ell},{m})\n"
+ "supertranslation[{}]={} # (ell,m)=({},{})\n".format(i_neg, b, ell, -m)
+ "Will result in an imaginary supertranslation."
)
spacetime_translation = np.zeros((4,), dtype=float)
spacetime_translation[0] = sf.constant_from_ell_0_mode(supertranslation[0]).real
spacetime_translation[1:4] = -sf.vector_from_ell_1_modes(supertranslation[1:4]).real
if "spacetime_translation" in kwargs:
st_trans = np.array(kwargs.pop("spacetime_translation"), dtype=float)
if st_trans.shape != (4,) or st_trans.dtype != "float":
raise TypeError(
"\nInput argument `spacetime_translation` should be a float array of shape (4,).\n"
"Got a {} array of shape {}.".format(st_trans.dtype, st_trans.shape)
)
spacetime_translation = st_trans[:]
supertranslation[0] = sf.constant_as_ell_0_mode(spacetime_translation[0])
supertranslation[1:4] = sf.vector_as_ell_1_modes(-spacetime_translation[1:4])
if "space_translation" in kwargs:
s_trans = np.array(kwargs.pop("space_translation"), dtype=float)
if s_trans.shape != (3,) or s_trans.dtype != "float":
raise TypeError(
"\nInput argument `space_translation` should be an array of floats of shape (3,).\n"
"Got a {} array of shape {}.".format(s_trans.dtype, s_trans.shape)
)
spacetime_translation[1:4] = s_trans[:]
supertranslation[1:4] = sf.vector_as_ell_1_modes(-spacetime_translation[1:4])
if "time_translation" in kwargs:
t_trans = kwargs.pop("time_translation")
if not isinstance(t_trans, float):
raise TypeError("\nInput argument `time_translation` should be a single float.\n" "Got {}.".format(t_trans))
spacetime_translation[0] = t_trans
supertranslation[0] = sf.constant_as_ell_0_mode(spacetime_translation[0])
# Decide on the number of points to use in each direction. A nontrivial supertranslation will introduce
# power in higher modes, so for best accuracy, we need to account for that. But we'll make it a firm
# requirement to have enough points to capture the original waveform, at least
w_ell_max = ell_max
ell_max = w_ell_max + ell_max_supertranslation
n_theta = kwargs.pop("n_theta", 2 * ell_max + 1)
n_phi = kwargs.pop("n_phi", 2 * ell_max + 1)
if n_theta < 2 * ell_max + 1 and abs(supertranslation[1:]).max() > 0.0:
warning = (
f"n_theta={n_theta} is small; because of the supertranslation, "
+ f"it will lose accuracy for anything less than 2*ell+1={ell_max}"
)
warnings.warn(warning)
if n_theta < 2 * w_ell_max + 1:
raise ValueError(f"n_theta={n_theta} is too small; " + "must be at least 2*ell+1={}".format(2 * w_ell_max + 1))
if n_phi < 2 * ell_max + 1 and abs(supertranslation[1:]).max() > 0.0:
warning = (
f"n_phi={n_phi} is small; because of the supertranslation, "
+ f"it will lose accuracy for anything less than 2*ell+1={ell_max}"
)
warnings.warn(warning)
if n_phi < 2 * w_ell_max + 1:
raise ValueError(f"n_phi={n_phi} is too small; " + "must be at least 2*ell+1={}".format(2 * w_ell_max + 1))
# Get the rotor for the frame rotation
frame_rotation = np.quaternion(*np.array(kwargs.pop("frame_rotation", [1, 0, 0, 0]), dtype=float))
if frame_rotation.abs() < 3e-16:
raise ValueError(f"frame_rotation={frame_rotation} should be a unit quaternion")
frame_rotation = frame_rotation.normalized()
# Get the boost velocity vector
boost_velocity = np.array(kwargs.pop("boost_velocity", [0.0] * 3), dtype=float)
beta = | np.linalg.norm(boost_velocity) | numpy.linalg.norm |
import unittest
import sys
import bottlechest as bn
import numpy as np
import scipy.sparse as sp
class TestContingency(unittest.TestCase):
def test_1d_int(self):
data = np.array([0, 1, 1, 2, 1])
bb = [0, 1, 1, 0, 0]
for b in [bb, np.array(bb, dtype=np.int8), np.array(bb, dtype=float)]:
counts, nans = bn.contingency(data, b, 2, 1)
| np.testing.assert_almost_equal(counts, [[1, 1, 1], [0, 2, 0]]) | numpy.testing.assert_almost_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 14:55:00 2020
@author: rdamseh
"""
import numpy as np
from tqdm import tqdm
class CreateCylinderMappings:
'''
This class create 3D maps based on oriented cylinders built at each graph edge
'''
def __init__(self, g, to_return=['binary',
'velocity',
'so2',
'hct',
'gradient',
'propagation']):
self.g=g
self.GetImSize()
# set the needed outputs
self.tags={'binary':1,
'velocity':0,
'so2':0,
'hct':0,
'gradient':0,
'propagation':0}
for i in to_return:
self.tags[i]=1
def GetImSize(self):
# shift graph geometry to start from zero coordinates
# and
# set min radius to 2.0
min_rad=2.0
pos=np.array(self.g.GetNodesPos())
pos=pos-np.min(pos, axis=0)[None, :]
rad=np.array(self.g.GetRadii())
rad[rad<min_rad]=min_rad
maxr=np.max(rad)
for i, p, r in zip(self.g.GetNodes(), pos, rad):
self.g.node[i]['pos']=p+maxr
self.g.node[i]['r']=r
# get image size to be constructed
real_s = np.max(pos, axis=0) # real image size
new_s=real_s
new_s=tuple((np.ceil(new_s+(2*maxr))).astype(int)) # image size after padding
print('Image size: '+str(new_s))
self.real_s = real_s
self.new_s = new_s
self.niter = self.g.number_of_edges()
def cylinder(self, direction, radius, length):
'''
Create a image cylinder
'''
r=length+2*radius
r=int(r)
#print('r value', r)
xrange, yrange, zrange = np.meshgrid(np.arange(-r, r+1),
np.arange(-r, r+1),
np.arange(-r, r+1), indexing='ij')
size=np.shape(xrange)
direction=direction.astype(float)
va=np.sqrt((direction**2).sum())
vnorm=direction/va
p=np.array([xrange.ravel(), yrange.ravel(), zrange.ravel()]).T
p=p.astype(float)
amp=np.sqrt(np.sum(p**2, axis=1))
amp[amp<1]=1
cos=np.abs(np.sum(p*vnorm, axis=1)/amp)
cos[cos>1]=1
sin=np.sqrt(1-cos**2)
shape0=(amp*sin)<radius # radius constrain
shape1=(amp*cos<length) # length constrain
a1=amp*cos-length
a2=amp*sin
shape2=(((a1**2+a2**2)**0.5)<(radius)) # rounded end constrain
shape=shape0*(shape2+shape1)
shape=np.reshape(shape, xrange.shape)
c0 = np.where(shape)
dot=np.sum(p*vnorm, axis=1)
dot=((dot-dot.min())/(dot.max()-dot.min()))
shape=shape*dot.reshape(shape.shape)
return c0, size
def get_cylinder_infos(self, g, radius_scaling=None):
info=dict()
if self.tags['binary']:
e=g.GetEdges()
pos1=np.array([g.node[i[0]]['pos'] for i in e])
pos2=np.array([g.node[i[1]]['pos'] for i in e])
radius1=np.array([g.node[i[0]]['r'] for i in e])
radius2=np.array([g.node[i[1]]['r'] for i in e])
radius=(radius1+radius2)/2.0# radius
if radius_scaling is not None:
radius*=radius_scaling
info['pos1']=pos1
info['pos2']=pos2
info['radius']=radius
vec=pos2-pos1
vec_amp=np.sqrt(np.sum(vec**2, axis=1))# norm
vec_amp[vec_amp==0]=1.0 # avoid divide by zero
vec_norm=vec/vec_amp[:, None]
# for edges of length < 2 set to length to 3 to avoid diconnedted maps
vec_amp[vec_amp<2.0]=2.0
info['vec_amp']=vec_amp
info['vec_norm']=vec_norm
if self.tags['so2']:
so21=np.array([g.node[i[0]]['so2'] for i in e])
so22=np.array([g.node[i[1]]['so2'] for i in e])
info['so21']=so21
info['so22']=so22
if self.tags['hct']:
types=np.array([g.node[i[0]]['type'] for i in e])
if types.max()==3: types-=1 # types should be 0-->Art., 1-->Vein, 2-->Capp
info['types']=types
if self.tags['velocity']:
velocity=np.array([g.node[i[0]]['velocity'] for i in e])
dx= | np.array([g.node[i[0]]['dx'] for i in e]) | numpy.array |
import os
import sys
import zipfile
import requests
import shutil
from shapely.geometry.polygon import Polygon
from shapely.ops import transform
import pyproj
from osgeo import gdal
from pandas import DataFrame
import pandas as pd
import numpy as np
from osgeo import gdal
import pickle
from tqdm import tqdm
import click
from threading import Thread
from multiprocessing.dummy import Pool as ThreadPool
import json
import subprocess
# pixel area only depends on latitude (not longitude)
# we re-project WGS84 to cylindrical equal area
def pixel_area(pix_deg):
project = lambda x, y: pyproj.transform(pyproj.Proj(init='epsg:4326'), pyproj.Proj(proj='cea'), x, y)
offset = pix_deg / 2
lts = np.arange(90-offset, -90, -pix_deg)
area = np.empty_like(lts)
lon = 0
for y, lat in enumerate(lts):
pixel1 = Polygon([(lon - offset, lat + offset), (lon + offset, lat + offset), (lon + offset, lat - offset), (lon - offset, lat - offset)])
pixel2 = transform(project, pixel1)
area[y] = pixel2.area
return area
def get_flow_dir(row):
if not os.path.exists(f'tiles/dir/3s/{row.tile}'):
tqdm.write(f'Downloading {row.tile}...')
r = requests.get(row.url + row.tile)
with open(f'tiles/dir/3s/{row.tile}', 'wb') as f:
f.write(r.content)
try:
with zipfile.ZipFile(f'tiles/dir/3s/{row.tile}', 'r') as z:
z.extractall(path = 'tmp/')
flow_dir = gdal.Open(f'tmp/{row.tile[:-9]}/{row.tile[:-9]}/w001001.adf')
geo = flow_dir.GetGeoTransform()
ySize, xSize = flow_dir.RasterYSize, flow_dir.RasterXSize
flow_dir = flow_dir.ReadAsArray()
shutil.rmtree(f'tmp/{row.tile[:-9]}')
# data is padded into a 6000x6000 array (some tiles may be smaller):
array_5x5 = | np.ones((6000, 6000), dtype='uint8') | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 14 14:40:04 2017
@author: r.dewinter
"""
from testFunctions.TBTD import TBTD
from testFunctions.SRD import SRD
from testFunctions.WB import WB
from testFunctions.DBD import DBD
from testFunctions.SPD import SPD
from testFunctions.CSI import CSI
from testFunctions.WP import WP
from testFunctions.OSY import OSY
from testFunctions.CTP1 import CTP1
from testFunctions.CEXP import CEXP
from testFunctions.C3DTLZ4 import C3DTLZ4
from testFunctions.TNK import TNK
from testFunctions.SRN import SRN
from testFunctions.BNH import BNH
from CONSTRAINED_SMSEGO import CONSTRAINED_SMSEGO
import time
import numpy as np
## Real world like problems
problemCall = CSI
rngMin = np.array([0.5, 0.45, 0.5, 0.5, 0.875, 0.4, 0.4])
rngMax = np.array([1.5, 1.35, 1.5, 1.5, 2.625, 1.2, 1.2])
initEval = 30
maxEval = 200
smooth = 2
nVar = 7
runNo = 11
ref = np.array([42,4.5,13])
nconstraints = 10
epsilonInit=0.01
epsilonMax=0.02
s = time.time()
CONSTRAINED_SMSEGO(problemCall, rngMin, rngMax, ref, nconstraints, initEval, maxEval, smooth, runNo, epsilonInit, epsilonMax)
print(time.time()-s)
problemCall = WB
rngMin = np.array([0.125, 0.1, 0.1, 0.125])
rngMax = np.array([5, 10, 10, 5])
initEval = 30
maxEval = 200
smooth = 2
nVar = 4
runNo = 3
ref = np.array([350,0.1])
nconstraints = 5
epsilonInit=0.01
epsilonMax=0.02
s = time.time()
CONSTRAINED_SMSEGO(problemCall, rngMin, rngMax, ref, nconstraints, initEval, maxEval, smooth, runNo, epsilonInit, epsilonMax)
print(time.time()-s)
problemCall = TBTD
rngMin = np.array([1,0.0005,0.0005])
rngMax = np.array([3,0.05,0.05])
initEval = 30
maxEval = 200
smooth = 2
runNo = 5
ref = np.array([0.1,100000])
nconstraints = 3
epsilonInit=0.01
epsilonMax=0.02
s = time.time()
CONSTRAINED_SMSEGO(problemCall, rngMin, rngMax, ref, nconstraints, initEval, maxEval, smooth, runNo, epsilonInit, epsilonMax)
print(time.time()-s)
problemCall = DBD
rngMin = np.array([55, 75, 1000, 2])
rngMax = np.array([80, 110, 3000, 20])
initEval = 30
maxEval = 200
smooth = 2
nVar = 4
runNo = 3
ref = np.array([5,50])
nconstraints = 5
epsilonInit=0.01
epsilonMax=0.02
s = time.time()
CONSTRAINED_SMSEGO(problemCall, rngMin, rngMax, ref, nconstraints, initEval, maxEval, smooth, runNo, epsilonInit, epsilonMax)
print(time.time()-s)
problemCall = SPD
rngMin = np.array([150, 25, 12, 8, 14, 0.63])
rngMax = np.array([274.32, 32.31, 22, 11.71, 18, 0.75])
initEval = 30
maxEval = 200
smooth = 2
nVar = 6
runNo = 5
ref = np.array([16,19000,-260000])
nconstraints=9
epsilonInit=0.01
epsilonMax=0.02
s = time.time()
CONSTRAINED_SMSEGO(problemCall, rngMin, rngMax, ref, nconstraints, initEval, maxEval, smooth, runNo, epsilonInit, epsilonMax)
print(time.time()-s)
problemCall = WP
rngMin = np.array([0.01, 0.01, 0.01])
rngMax = np.array([0.45, 0.1, 0.1])
initEval = 30
maxEval = 200
smooth = 2
nVar = 3
runNo = 3
ref = np.array([83000, 1350, 2.85, 15989825, 25000])
nconstraints = 7
epsilonInit=0.01
epsilonMax=0.02
s = time.time()
CONSTRAINED_SMSEGO(problemCall, rngMin, rngMax, ref, nconstraints, initEval, maxEval, smooth, runNo, epsilonInit, epsilonMax)
print(time.time()-s)
##########################theoreticala problems
problemCall = BNH
rngMin = np.array([0,0])
rngMax = np.array([5,3])
initEval = 30
maxEval = 200
smooth = 2
runNo = 2
ref = np.array([140,50])
nconstraints = 2
epsilonInit=0.01
epsilonMax=0.02
s = time.time()
CONSTRAINED_SMSEGO(problemCall, rngMin, rngMax, ref, nconstraints, initEval, maxEval, smooth, runNo, epsilonInit, epsilonMax)
print(time.time()-s)
problemCall = SRN
rngMin = np.array([-20,-20])
rngMax = np.array([20, 20])
initEval = 30
maxEval = 200
smooth = 2
runNo = 8
ref = np.array([301,72])
nconstraints = 2
epsilonInit=0.01
epsilonMax=0.02
s = time.time()
CONSTRAINED_SMSEGO(problemCall, rngMin, rngMax, ref, nconstraints, initEval, maxEval, smooth, runNo, epsilonInit, epsilonMax)
print(time.time()-s)
problemCall = TNK
rngMin = np.array([1e-5,1e-5])
rngMax = | np.array([np.pi, np.pi]) | numpy.array |
__author__ = '<NAME>'
from sklearn.datasets import make_classification
from sklearn.cross_validation import train_test_split
import subprocess
import numpy as np
import scipy
from quantify import CCforDouble
from quantification import Quantification
from time import sleep
class SVMperf():
def __init__(self,x_train,y_train,x_test,y_test):
self.train = 'train.txt'
self.test = 'test.txt'
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
# For automatic
self.getRepresentation(x_train,y_train,self.train)
self.getRepresentation(x_test,y_test,self.test)
sleep(1)
self.model = self.fitSVMperf(self.train)#'model.txt'#
self.predictions = self.predictSVMperf(self.test,self.model)
def getRepresentation(self, x, y, name = None):
if name!=None:
file = open(str(name), 'w')
else:
file = open('name.txt', 'w')
print(len(y))
# type ndarray
if type(x) == type(np.ndarray(None)):
for i in range(len(y)):
if y[i] == 1:
file.write('1 ')
for m in range(len(x[i])):
if x[i][m]!=0:
file.write(str(m+1)+':'+str(x[i][m])+' ')
file.write('\n')
else:
file.write('-1 ')
for m in range(len(x[i])):
if x[i][m]!=0:
file.write(str(m+1)+':'+str(x[i][m])+' ')
file.write('\n')
file.close()
# type csr_matrix
elif type(x) == type(scipy.sparse.csr_matrix(None)):
for i in range(len(y)):
if y[i] == 1:
file.write('1 ')
_x = x.getrow(i).toarray()[0]
for j in range(len(_x)):
if _x[j]!=0:
file.write(str(j+1)+':'+str(_x[j])+' ')
file.write('\n')
else:
file.write('-1 ')
_x = x.getrow(i).toarray()[0]
for j in range(len(_x)):
if _x[j]!=0:
file.write(str(j+1)+':'+str(_x[j])+' ')
file.write('\n')
file.close()
def fitSVMperf(self, trainData, model = 'model.txt'):
subprocess.Popen(["svm_kld/svm-perf-original/svm_perf_learn","-c","20",trainData,model], stdout=subprocess.PIPE)
sleep(1)
return model
def predictSVMperf(self, testData, model, predictions = 'predictions.txt'):
self.description = subprocess.Popen(["svm_kld/svm-perf-original/svm_perf_classify",testData,model,predictions], stdout=subprocess.PIPE)
sleep(1)
return predictions
def getDescriptionSVM(self):
return self.description.communicate()[0]
def getPredictions(self):
q = []
f = open(self.predictions,'r')
for line in f:
if float(line) >= 0.62 :
q.append(1)
else:
q.append(0)
f.close()
return np.array(q)
def getKLD(self,p, q):
p = np.asarray(p, dtype=np.float)
q = | np.asarray(q, dtype=np.float) | numpy.asarray |
import numpy as np
from sklearn import preprocessing
from datetime import datetime
def _corr(C):
R= | np.empty_like(C) | numpy.empty_like |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function)
import numpy as np
import scipy.stats as scistats
import scipy.linalg as sl
from enterprise import constants as const
from enterprise.signals import signal_base
try:
import cPickle as pickle
except:
import pickle
from enterprise.pulsar import Pulsar
from enterprise import constants as const
from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc
from .sampler import JumpProposal, get_parameter_groups
class HyperModel(object):
"""
Class to define hyper-model that is the concatenation of all models.
"""
def __init__(self, models, log_weights=None):
self.models = models
self.num_models = len(self.models)
self.log_weights = log_weights
#########
self.param_names, ind = np.unique(np.concatenate([p.param_names
for p in self.models.values()]),
return_index=True)
self.param_names = self.param_names[np.argsort(ind)]
self.param_names = np.append(self.param_names, 'nmodel').tolist()
#########
#########
self.params = [p for p in self.models[0].params] # start of param list
uniq_params = [str(p) for p in self.models[0].params] # which params are unique
for model in self.models.values():
# find differences between next model and concatenation of previous
param_diffs = np.setdiff1d([str(p) for p in model.params], uniq_params)
mask = np.array([str(p) in param_diffs for p in model.params])
# concatenate for next loop iteration
uniq_params = np.union1d([str(p) for p in model.params], uniq_params)
# extend list of unique parameters
self.params.extend([pp for pp in np.array(model.params)[mask]])
#########
#########
# get signal collections
self.snames = dict.fromkeys(np.unique(sum(sum([[[qq.signal_name for qq in pp._signals]
for pp in self.models[mm]._signalcollections]
for mm in self.models], []), [])))
for key in self.snames: self.snames[key] = []
for mm in self.models:
for sc in self.models[mm]._signalcollections:
for signal in sc._signals:
self.snames[signal.signal_name].extend(signal.params)
for key in self.snames: self.snames[key] = list(set(self.snames[key]))
for key in self.snames:
uniq_params, ind = np.unique([p.name for p in self.snames[key]],
return_index=True)
uniq_params = uniq_params[np.argsort(ind)].tolist()
all_params = [p.name for p in self.snames[key]]
self.snames[key] = np.array(self.snames[key])[[all_params.index(q)
for q in uniq_params]].tolist()
#########
def get_lnlikelihood(self, x):
# find model index variable
idx = list(self.param_names).index('nmodel')
nmodel = int(np.rint(x[idx]))
# find parameters of active model
q = []
for par in self.models[nmodel].param_names:
idx = self.param_names.index(par)
q.append(x[idx])
# only active parameters enter likelihood
active_lnlike = self.models[nmodel].get_lnlikelihood(q)
if self.log_weights is not None:
active_lnlike += self.log_weights[nmodel]
return active_lnlike
def get_lnprior(self, x):
# find model index variable
idx = list(self.param_names).index('nmodel')
nmodel = int(np.rint(x[idx]))
if nmodel not in self.models.keys():
return -np.inf
else:
lnP = 0
for p in self.models.values():
q = []
for par in p.param_names:
idx = self.param_names.index(par)
q.append(x[idx])
lnP += p.get_lnprior(np.array(q))
return lnP
def get_parameter_groups(self):
groups = []
for p in self.models.values():
groups.extend(get_parameter_groups(p))
list( | np.unique(groups) | numpy.unique |
import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GymEnv
from garage.tf.q_functions import ContinuousMLPQFunction
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
class TestContinuousMLPQFunction(TfGraphTestCase):
@pytest.mark.parametrize('hidden_sizes', [(1, ), (2, ), (3, ), (1, 1),
(2, 2)])
def test_q_vals(self, hidden_sizes):
env = GymEnv(DummyBoxEnv())
obs_dim = env.spec.observation_space.flat_dim
act_dim = env.spec.action_space.flat_dim
qf = ContinuousMLPQFunction(env_spec=env.spec,
action_merge_layer=0,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
obs = np.full(obs_dim, 1).flatten()
act = np.full(act_dim, 1).flatten()
expected_output = np.full((1, ),
(obs_dim + act_dim) * np.prod(hidden_sizes))
outputs = qf.get_qval([obs], [act])
assert np.array_equal(outputs[0], expected_output)
outputs = qf.get_qval([obs, obs, obs], [act, act, act])
for output in outputs:
assert np.array_equal(output, expected_output)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((2, ), (2, )),
((1, 1), (1, )),
((2, 2), (2, )),
])
def test_output_shape(self, obs_dim, action_dim):
env = GymEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
qf = ContinuousMLPQFunction(env_spec=env.spec)
env.reset()
obs = env.step(1).observation
obs = obs.flatten()
act = np.full(action_dim, 0.5).flatten()
outputs = qf.get_qval([obs], [act])
assert outputs.shape == (1, 1)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((2, ), (2, )),
((1, 1), (1, )),
((2, 2), (2, )),
])
def test_build(self, obs_dim, action_dim):
env = GymEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
qf = ContinuousMLPQFunction(env_spec=env.spec,
action_merge_layer=0,
hidden_sizes=(1, ),
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
obs = np.full(obs_dim, 1).flatten()
act = np.full(action_dim, 1).flatten()
output1 = qf.get_qval([obs], [act])
input_var1 = tf.compat.v1.placeholder(tf.float32,
shape=(None, obs.shape[0]))
input_var2 = tf.compat.v1.placeholder(tf.float32,
shape=(None, act.shape[0]))
q_vals = qf.build(input_var1, input_var2, 'another')
output2 = self.sess.run(q_vals,
feed_dict={
input_var1: [obs],
input_var2: [act]
})
expected_output = np.full((1, ),
np.prod(obs_dim) + np.prod(action_dim))
assert np.array_equal(output1, output2)
assert np.array_equal(output2[0], expected_output)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((2, ), (2, )),
((1, 1), (1, )),
((2, 2), (2, )),
])
def test_is_pickleable(self, obs_dim, action_dim):
env = GymEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
qf = ContinuousMLPQFunction(env_spec=env.spec)
env.reset()
obs = env.step(1).observation
obs = obs.flatten()
act = np.full(action_dim, 0.5).flatten()
with tf.compat.v1.variable_scope('ContinuousMLPQFunction', reuse=True):
bias = tf.compat.v1.get_variable('mlp_concat/hidden_0/bias')
# assign it to all one
bias.load(tf.ones_like(bias).eval())
output1 = qf.get_qval([obs], [act])
h_data = pickle.dumps(qf)
with tf.compat.v1.Session(graph=tf.Graph()):
qf_pickled = pickle.loads(h_data)
output2 = qf_pickled.get_qval([obs], [act])
assert | np.array_equal(output1, output2) | numpy.array_equal |
import numpy as np
import gmpy2
from gmpy2 import mpfr, mpc
import flamp
def to_fp(A):
return np.array(A, float)
def to_cpx(A):
return np.array(A, complex)
### linalg
def test_qr_real():
n = 5
A = np.random.rand(n, n)
AA = mpfr(1) * A
Q, R = flamp.qr(AA)
assert Q.shape == (n, n) and R.shape == (n, n)
assert np.allclose(to_fp(Q.T @ Q), np.eye(n))
assert np.allclose(to_fp(Q @ R), A)
assert np.all(np.tril(R, -1) == 0)
## special case: size 0 matrix
AA = flamp.zeros((4, 0))
Q, R = flamp.qr(AA)
assert np.allclose(to_fp(Q), np.eye(4))
assert R.shape == (4, 0)
def test_qr_complex():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
Q, R = flamp.qr(AA)
assert Q.shape == (n, n) and R.shape == (n, n)
assert np.allclose(to_cpx(Q.T.conj() @ Q), np.eye(n))
assert np.allclose(to_cpx(Q @ R), A)
assert np.all(np.tril(R, -1) == 0)
def test_inverse_real():
n = 5
A = np.random.rand(n, n)
AA = mpfr(1) * A
Ainv = flamp.inverse(AA)
assert A.shape == (n, n)
assert np.allclose(to_fp(Ainv @ A), np.eye(n))
def test_inverse_complex():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
Ainv = flamp.inverse(AA)
assert A.shape == (n, n)
assert np.allclose(to_cpx(Ainv @ A), np.eye(n))
def test_lu_solve_real():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n)
AA = mpfr(1) * A
x = flamp.lu_solve(AA, b)
assert x.shape == (n,)
assert np.allclose(to_fp(A @ x), b)
def test_lu_solve_real_block():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n, 3)
AA = mpfr(1) * A
x = flamp.lu_solve(AA, b)
assert x.shape == (n, 3)
assert np.allclose(to_fp(A @ x), b)
def test_lu_solve_complex():
n = 5
A, b = np.random.rand(n, n) + 1j * np.random.rand(n, n), np.random.rand(n)
AA = mpfr(1) * A
x = flamp.lu_solve(AA, b)
assert x.shape == (n,)
assert np.allclose(to_cpx(A @ x), b)
def test_lu():
n = 5
A = np.random.rand(n, n) + 1j * np.random.rand(n, n)
AA = mpfr(1) * A
P, L, U = flamp.lu(AA)
assert np.allclose(to_cpx(P @ AA), to_cpx(L @ U))
def test_cholesky_solve_real():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n)
A = A.T @ A
AA = mpfr(1) * A
x = flamp.cholesky_solve(AA, b)
assert x.shape == (n,)
assert np.allclose(to_fp(A @ x), b)
def test_cholesky_solve_real_block():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n, 3)
A = A.T @ A
AA = mpfr(1) * A
x = flamp.cholesky_solve(AA, b)
assert x.shape == (n, 3)
assert np.allclose(to_fp(A @ x), b)
def test_qr_solve_real():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n)
AA = mpfr(1) * A
x = flamp.qr_solve(AA, b)
assert x.shape == (n,)
assert np.allclose(to_fp(A @ x), b)
def test_qr_solve_real_block():
n = 5
A, b = np.random.rand(n, n), np.random.rand(n, 3)
AA = mpfr(1) * A
x = flamp.qr_solve(AA, b)
assert x.shape == (n, 3)
assert np.allclose(to_fp(A @ x), b)
def test_solve_real_overdet():
n = 5
A, b = np.random.rand(n + 2, n), np.random.rand(n + 2, 3)
AA = mpfr(1) * A
x = flamp.qr_solve(AA, b)
x2 = flamp.lu_solve(AA, b)
assert x.shape == (n, 3)
assert x2.shape == (n, 3)
assert np.allclose(to_fp(x), to_fp(x2))
def test_det():
n = 5
E = np.random.rand(n) # random eigenvalues
U = mpfr(1) * np.random.rand(n, n)
Uinv = flamp.inverse(U)
A = U @ np.diag(E) @ Uinv
det = flamp.det(A)
assert np.allclose(to_fp(det), np.prod(E))
### eigen
def test_eig_real():
A = mpfr(1) * | np.arange(9) | numpy.arange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 27 08:44:52 2021
@author: gianni
"""
from scipy import constants,optimize
import numpy as np
import matplotlib.pyplot as plt
import os
from astropy.io import fits
import h5py
this_folder = os.path.dirname(os.path.abspath(__file__))
R_Sun = 6.955e8
L_Sun = 3.828e26
Rydberg_J = constants.physical_constants['Rydberg constant times hc in J'][0] #J
ionisation_potential = {'C':11.26030*constants.eV, 'O':13.61806*constants.eV} #J
class RadiationSpectrum():
def flux(self,wavelength,**kwargs):
#W/m2/m
raise NotImplementedError
class DraineISF(RadiationSpectrum):
#interstellar radiation field, original from Draine (1978),
#here in the form of Lee (1984)
#(https://ui.adsabs.harvard.edu/abs/1984ApJ...282..172L/abstract)
lambda_min = 91.2*constants.nano
lambda_max = 200*constants.nano
lambda_grid = np.linspace(lambda_min,lambda_max,1000)
def __init__(self,scaling=(lambda wavelength: 1)):
self.scaling = scaling
def flux(self,wavelength):
#for the power law, the wavelenght has to be in nm
#photons/m2/s/m:
photon_flux= 3.2e13*((wavelength/constants.nano)**-3\
- 1.61e2*(wavelength/constants.nano)**-4\
+ 6.41e3*(wavelength/constants.nano)**-5)\
* constants.centi**-2*constants.nano**-1
photon_energy = constants.h*constants.c/wavelength
flux = photon_flux*photon_energy
valid_region = (wavelength>=self.lambda_min) & (wavelength<=self.lambda_max)
flux = np.where(valid_region,flux,0)
return flux*self.scaling(wavelength=wavelength)
class HabingField(RadiationSpectrum):
def __init__(self,scaling=(lambda wavelength: 1)):
self.scaling = scaling
data_filepath = os.path.join(this_folder,'habing_field.txt')
data = np.loadtxt(data_filepath)
self.lambda_grid = data[:,0]*constants.nano
photon_energy = constants.h*constants.c/self.lambda_grid
self.flux_grid = data[:,1]/constants.centi**2/constants.nano * photon_energy #W/m2/m
def flux(self,wavelength):
return np.interp(x=wavelength,xp=self.lambda_grid,fp=self.flux_grid,
left=0,right=0) * self.scaling(wavelength=wavelength)
class StellarAtmosphere(RadiationSpectrum):
def plot_model(self,label=None):
fig,ax = plt.subplots()
ax.plot(self.lambda_grid/constants.nano,self.modelflux,'.-',label=label)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('lambda [nm]')
ax.set_ylabel('flux at {:g} au [W/m2/m]'.format(self.ref_distance/constants.au))
if label is not None:
ax.legend(loc='best')
return ax
def flux(self,wavelength,distance):
return np.interp(wavelength,self.lambda_grid,self.modelflux,left=0,right=0)\
* (self.ref_distance/distance)**2
def luminosity(self):
flux_at_ref_distance = self.flux(wavelength=self.lambda_grid,
distance=self.ref_distance)
return np.trapz(flux_at_ref_distance,self.lambda_grid)\
* 4*np.pi*self.ref_distance**2
def _scale_spectrum(self,scaling):
self.modelflux *= scaling(wavelength=self.lambda_grid)
def write_modelflux_to_file(self,filepath,distance):
flux = self.flux(wavelength=self.lambda_grid,distance=distance)
np.savez(filepath,wavelength=self.lambda_grid,flux=flux)
class ATLASModelAtmosphere(StellarAtmosphere):
Teff_low_grid = np.arange(3000,12999,250)
Teff_high_grid = np.arange(13000,50001,1000)
Teff_grid = np.concatenate((Teff_low_grid,Teff_high_grid))
metallicity_grid = np.array((-2.5,-2.0,-1.5,-1.0,-0.5,0.0,0.2,0.5))
logg_grid = np.arange(0,5.1,0.5)
model_folder = os.path.join(this_folder,'ck04models')
max_RJ_wavelength = 3*constants.milli
@staticmethod
def assert_within_grid(value,grid):
assert np.min(grid) <= value <= np.max(grid)
@staticmethod
def get_closest_grid_value(value,grid):
index = np.argmin(np.abs(grid-value))
return grid[index]
def __init__(self,Teff,metallicity,logg,Rstar=None,obs_luminosity=None,
calibration_spec=None,verbose=False,scaling=None):
'''There are three ways to set the luminosity of the star:
1) define Rstar
2) define obs_luminosity, so that the model flux will be scaled
3) define calibration_spec (i.e. a spectrum, to which the model spectrum
will be scaled to)'''
self.assert_within_grid(value=Teff,grid=self.Teff_grid)
self.assert_within_grid(value=metallicity,grid=self.metallicity_grid)
self.assert_within_grid(value=logg,grid=self.logg_grid)
self.verbose = verbose
self.read_model(metallicity=metallicity,Teff=Teff,logg=logg)
self.extrapolate_RJ()
if Rstar is not None:
assert obs_luminosity is None and calibration_spec is None
self.ref_distance = Rstar
elif obs_luminosity is not None:
assert Rstar is None and calibration_spec is None
self.obs_luminosity = obs_luminosity
if self.verbose:
print('Rstar not specified, going to scale with luminosity')
self.calibrate_with_luminosity()
elif calibration_spec is not None:
assert Rstar is None and obs_luminosity is None
self.calibration_spec = calibration_spec
if self.verbose:
print('going to calibrate with provided spectrum')
self.calibrate_with_spectrum()
else:
raise ValueError('unable to define absolute flux and/or reference distance')
#now that modelflux is calibrated, I can apply the scaling:
if scaling is not None:
self._scale_spectrum(scaling=scaling)
def read_model(self,metallicity,Teff,logg):
self.metallicity = self.get_closest_grid_value(
value=metallicity,grid=self.metallicity_grid)
self.Teff = self.get_closest_grid_value(value=Teff,grid=self.Teff_grid)
self.logg = self.get_closest_grid_value(value=logg,grid=self.logg_grid)
if self.verbose:
print('input metallicity = {:g}, grid metallicity = {:g}'\
.format(metallicity,self.metallicity))
print('input Teff = {:g} K, grid Teff = {:g} K'.format(Teff,self.Teff))
print('input logg = {:g}, grid logg = {:g}'.format(logg,self.logg))
self.metallicity_str = 'ck'
if self.metallicity < 0:
sign_str = 'm'
else:
sign_str = 'p'
self.metallicity_str += '{:s}{:02d}'.format(
sign_str,np.abs(int(10*self.metallicity)))
if self.verbose:
print('metallicity ID: {:s}'.format(self.metallicity_str))
#this string is the key to access the flux for the specified log(g);
#for example for log(g)=4, it would be "g40"; for log(g)=4.5 it would be "g45":
logg_string = ('g%.1f'%self.logg).replace('.','')
filename = self.metallicity_str+'_{:d}.fits'.format(int(self.Teff))
if self.verbose:
print('filename: {:s}'.format(filename))
filepath = os.path.join(self.model_folder,self.metallicity_str,filename)
hdulist = fits.open(filepath)
modeldata = hdulist[1].data
hdulist.close()
self.lambda_grid = modeldata['WAVELENGTH'].astype(np.float64)*constants.angstrom
#flux in [W/m2/m] at the stellar surface:
self.modelflux = modeldata[logg_string].astype(np.float64)\
*constants.erg/constants.centi**2/constants.angstrom
def extrapolate_RJ(self):
max_wavelength = self.lambda_grid[-1]
prop_constant = max_wavelength**4*self.modelflux[-1]
RJ_wavelength = np.logspace(np.log10(max_wavelength*1.05),
np.log10(self.max_RJ_wavelength),100)
RJ_flux = prop_constant/RJ_wavelength**4
self.original_lambda_grid = self.lambda_grid.copy()
self.lambda_grid = np.concatenate((self.lambda_grid,RJ_wavelength))
self.modelflux = np.concatenate((self.modelflux,RJ_flux))
def calibrate_with_luminosity(self):
self.ref_distance = 1*constants.au
uncalibrated_luminosity = self.luminosity()
self.modelflux *= self.obs_luminosity/uncalibrated_luminosity
assert np.isclose(self.obs_luminosity,self.luminosity(),rtol=1e-6,atol=0)
def calibrate_with_spectrum(self):
cal_wave = self.calibration_spec['wave']
cal_flux = self.calibration_spec['flux']
self.ref_distance = self.calibration_spec['ref_distance']
try:
cal_errors = self.calibration_spec['error']
except KeyError:
cal_errors = | np.ones_like(cal_flux) | numpy.ones_like |
import numpy as np
from gym import spaces
class EpsilonWrapper(object):
def __init__(self, env, attrs=('distance_threshold', 'rotation_threshold'), compute_reward_with_internal=None):
"""Attrs is list of attributes (strings like "distance_threshold"). Only valid ones are used. """
self.env = env
if hasattr(self.env, 'mode'):
assert self.env.mode == 0
if compute_reward_with_internal is not None:
self.compute_reward_with_internal = compute_reward_with_internal
obs = self.env.reset()
self.internal_len = obs['achieved_goal'].shape[0]
self.attrs = []
self.defaults = []
for attr in attrs:
if hasattr(self.env, attr):
self.attrs.append(attr)
self.defaults.append(getattr(self.env, attr))
self.defaults = np.array(self.defaults)
self.observation_space = spaces.Dict(dict(
desired_goal=spaces.Box(-np.inf, np.inf, shape=(self.internal_len + len(self.attrs),), dtype='float32'),
achieved_goal=spaces.Box(-np.inf, np.inf, shape=(self.internal_len + len(self.attrs),), dtype='float32'),
observation=spaces.Box(-np.inf, np.inf, shape=(obs['observation'].shape[0],), dtype='float32'),
))
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.env, attr)
def compute_reward(self, achieved_goal, goal, info):
internal_achieved = achieved_goal[:self.internal_len]
internal_goal = goal[:self.internal_len]
if self.compute_reward_with_internal:
for attr, eps in zip(self.attrs, self.defaults):
setattr(self.env, attr, eps)
internal_reward = self.env.compute_reward(internal_achieved, internal_goal, info)
return internal_reward
# Otherwise, use epsilon in the goal to determine external reward
for attr, eps in zip(self.attrs, goal[self.internal_len:]):
setattr(self.env, attr, eps)
reward = self.env.compute_reward(internal_achieved, internal_goal, info)
return reward
def step(self, action):
obs, reward, done, info = self.env.step(action)
obs['achieved_goal'] = np.concatenate([obs['achieved_goal'], self.defaults])
obs['desired_goal'] = np.concatenate([obs['desired_goal'], self.defaults])
reward = self.compute_reward(obs['achieved_goal'], obs['desired_goal'], info)
return obs, reward, done, info
def _get_obs(self):
"""just adds a large epsilon (content doesn't super matter)"""
obs = self.env._get_obs()
obs['achieved_goal'] = np.concatenate([obs['achieved_goal'], self.defaults])
obs['desired_goal'] = np.concatenate([obs['desired_goal'], self.defaults])
return obs
def _sample_goal(self):
"""just adds a large epsilon (content doesn't super matter)"""
goal = self.env._sample_goal()
goal = np.concatenate([goal, self.defaults])
return goal
def reset(self):
obs = self.env.reset()
obs['achieved_goal'] = np.concatenate([obs['achieved_goal'], self.defaults])
obs['desired_goal'] = np.concatenate([obs['desired_goal'], self.defaults])
return obs
class OldEpsilonWrapper(object):
def __init__(self, env, epsilon, compute_reward_with_internal=None):
"""Epsilon is float or np.array, specifying default epsilon"""
self.env = env
if hasattr(self.env, 'mode'):
assert self.env.mode == 0
if compute_reward_with_internal is not None:
self.compute_reward_with_internal = compute_reward_with_internal
obs = self.env.reset()
self.default_epsilon = np.ones_like(obs['desired_goal']) * epsilon
self.observation_space = spaces.Dict(dict(
desired_goal=spaces.Box(-np.inf, np.inf, shape=(obs['achieved_goal'].shape[0]*2,), dtype='float32'),
achieved_goal=spaces.Box(-np.inf, np.inf, shape=(obs['achieved_goal'].shape[0]*2,), dtype='float32'),
observation=spaces.Box(-np.inf, np.inf, shape=(obs['observation'].shape[0],), dtype='float32'),
))
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.env, attr)
def compute_reward(self, achieved_goal, goal, info):
internal_len = len(achieved_goal) // 2
internal_achieved = achieved_goal[:internal_len]
internal_goal = goal[:internal_len]
if self.compute_reward_with_internal:
internal_reward = self.env.compute_reward(internal_achieved, internal_goal, info)
return internal_reward
# Otherwise, use epsilon to determine external reward
epsilon = goal[internal_len:]
success = np.all(np.abs(internal_achieved - internal_goal) < epsilon)
return success - 1.
def step(self, action):
obs, reward, done, info = self.env.step(action)
obs['achieved_goal'] = np.concatenate([obs['achieved_goal'], self.default_epsilon])
obs['desired_goal'] = np.concatenate([obs['desired_goal'], self.default_epsilon])
reward = self.compute_reward(obs['achieved_goal'], obs['desired_goal'], info)
return obs, reward, done, info
def _get_obs(self):
"""just adds a large epsilon (content doesn't super matter)"""
obs = self.env._get_obs()
obs['achieved_goal'] = np.concatenate([obs['achieved_goal'], self.default_epsilon])
obs['desired_goal'] = np.concatenate([obs['desired_goal'], self.default_epsilon])
return obs
def _sample_goal(self):
"""just adds a large epsilon (content doesn't super matter)"""
goal = self.env._sample_goal()
goal = np.concatenate([goal, self.default_epsilon])
return goal
def reset(self):
obs = self.env.reset()
obs['achieved_goal'] = np.concatenate([obs['achieved_goal'], self.default_epsilon])
obs['desired_goal'] = | np.concatenate([obs['desired_goal'], self.default_epsilon]) | numpy.concatenate |
# coding=utf-8
import numpy as np
import cv2
def SemanticLabelResize(dst_size, class_amount):
'''
this function return a lambda function which can be use to resize the semantic label
dst_size: [height, width]
class_amount: the class amount include the background
resize: the function which receive a source label and return the resized label
the source label should be in format: [batch_size, height, width, 1] or [batch_size, height, width]
the resized label is in format: [batch_size] + dst_size (dims:3)
'''
assert (type(dst_size) == list or type(dst_size) == tuple) is True
def resize(label):
if label.shape[-1] == 1:
one_hot = | np.eye(class_amount, dtype=np.float32) | numpy.eye |
# -*- coding: utf-8 -*-
"""
This script contains the transformations between world and different sensors.
"""
# Credit to https://github.com/MukhlasAdib/CARLA-2DBBox/blob/master/carla_vehicle_annotator.py
# Author: <NAME> <<EMAIL>>
# License: MIT
import numpy as np
from matplotlib import cm
from opencda.opencda_carla import Transform
VIRIDIS = np.array(cm.get_cmap('viridis').colors)
VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0])
def get_camera_intrinsic(sensor):
"""
Retrieve the camera intrinsic matrix
Args:
-sensor (carla.sensor.camera.rgb): The CARLA sensor object.
Returns:
-matrix_k (np.ndarray): The 2D intrinsic matrix.
"""
VIEW_WIDTH = int(sensor.attributes['image_size_x'])
VIEW_HEIGHT = int(sensor.attributes['image_size_y'])
VIEW_FOV = int(float(sensor.attributes['fov']))
matrix_k = np.identity(3)
matrix_k[0, 2] = VIEW_WIDTH / 2.0
matrix_k[1, 2] = VIEW_HEIGHT / 2.0
matrix_k[0, 0] = matrix_k[1, 1] = VIEW_WIDTH / (2.0 * np.tan(VIEW_FOV * np.pi / 360.0))
return matrix_k
def create_bb_points(vehicle):
"""
Extract the eight vertices of the bounding box from the vehicle.
Args:
-vehicle (carla.Vehicle or ObstacleVehicle): The object vehicle.
Returns:
- bbx(np.ndarray): 3d bounding box.
"""
bbx = np.zeros((8, 4))
extent = vehicle.bounding_box.extent
bbx[0, :] = np.array([extent.x, extent.y, -extent.z, 1])
bbx[1, :] = np.array([-extent.x, extent.y, -extent.z, 1])
bbx[2, :] = np.array([-extent.x, -extent.y, -extent.z, 1])
bbx[3, :] = np.array([extent.x, -extent.y, -extent.z, 1])
bbx[4, :] = np.array([extent.x, extent.y, extent.z, 1])
bbx[5, :] = np.array([-extent.x, extent.y, extent.z, 1])
bbx[6, :] = np.array([-extent.x, -extent.y, extent.z, 1])
bbx[7, :] = np.array([extent.x, -extent.y, extent.z, 1])
return bbx
def x_to_world_transformation(transform):
"""
Get the transformation matrix from x(it can be vehicle or sensor) coordinates to world coordinate.
Args:
-transform (carla.Transform): The transform that contains location and rotation.
Returns:
-matrix (np.ndarray): The transformation matrix
"""
rotation = transform.rotation
location = transform.location
# used for rotation matrix
c_y = np.cos(np.radians(rotation.yaw))
s_y = np.sin(np.radians(rotation.yaw))
c_r = np.cos(np.radians(rotation.roll))
s_r = np.sin(np.radians(rotation.roll))
c_p = np.cos(np.radians(rotation.pitch))
s_p = np.sin(np.radians(rotation.pitch))
matrix = np.identity(4)
# translation matrix
matrix[0, 3] = location.x
matrix[1, 3] = location.y
matrix[2, 3] = location.z
# rotation matrix
matrix[0, 0] = c_p * c_y
matrix[0, 1] = c_y * s_p * s_r - s_y * c_r
matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r
matrix[1, 0] = s_y * c_p
matrix[1, 1] = s_y * s_p * s_r + c_y * c_r
matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r
matrix[2, 0] = s_p
matrix[2, 1] = -c_p * s_r
matrix[2, 2] = c_p * c_r
return matrix
def bbx_to_world(cords, vehicle):
"""
Convert bounding box coordinate at vehicle reference to world reference.
Args:
-cords (np.ndarray): Bounding box coordinates with 8 vertices, shape (n, 4).
-vehicle (carla.vehicle or ObstacleVehicle): vehicle object.
Returns:
-bb_world_cords (np.ndarray): Bounding box coordinates under word reference.
"""
bb_transform = Transform(vehicle.bounding_box.location)
# bounding box to vehicle transformation matrix
bb_vehicle_matrix = x_to_world_transformation(bb_transform)
# vehicle to world transformation matrix
vehicle_world_matrix = x_to_world_transformation(vehicle.get_transform())
# bounding box to world transformation matrix
bb_world_matrix = np.dot(vehicle_world_matrix, bb_vehicle_matrix)
# 8 vertices are relative to bbx center, thus multiply with bbx_2_world to get the world coords.
bb_world_cords = np.dot(bb_world_matrix, np.transpose(cords))
return bb_world_cords
def world_to_sensor(cords, sensor_transform):
"""
Transform coordinate from world reference to sensor reference.
Args:
-cords (np.ndarray): Coordinates under world reference, shape:(4, n).
-sensor_transform (carla.Transform): sensor position in the world, shape:(3, 1).
Returns:
-sensor_cords(np.ndarray): Coordinates in sensor reference.
"""
sensor_world_matrix = x_to_world_transformation(sensor_transform)
world_sensor_matrix = np.linalg.inv(sensor_world_matrix)
sensor_cords = np.dot(world_sensor_matrix, cords)
return sensor_cords
def sensor_to_world(cords, sensor_transform):
"""
Project
Args:
-cords (np.ndarray): Coordinates under sensor reference.
-sensor_transform (carla.Transform): sensor position in the world
Returns:
-world_cords (np.ndarray): Coordinates projected to world space.
"""
sensor_world_matrix = x_to_world_transformation(sensor_transform)
world_cords = np.dot(sensor_world_matrix, cords)
return world_cords
def vehicle_to_sensor(cords, vehicle, sensor_transform):
"""
Transform coordinates from vehicle reference to sensor reference
Args:
-cords (np.ndarray): Coordinates under vehicle reference, shape (n, 4).
-vehicle (carla.vehicle or ObstacleVehicle): vehicle object.
-sensor_transform (carla.Transform): sensor position in the world, shape(3, 1).
Returns:
-(np.ndarray): Coordinates in sensor reference, shape(4, n).
"""
world_cord = bbx_to_world(cords, vehicle)
sensor_cord = world_to_sensor(world_cord, sensor_transform)
return sensor_cord
def get_bounding_box(vehicle, camera, sensor_transform):
"""
Get vehicle bounding box and project to sensor image.
Args:
-vehicle (carla.vehicle or ObstacleVehicle): vehicle object.
-camera (carla.sensor.camera.rgb): The CARLA sensor object.
-sensor_transform (carla.Transform): sensor position in the world
Returns:
-camera_bbox (np.ndarray): Bounding box coordinates in sensor image.
"""
camera_k_matrix = get_camera_intrinsic(camera)
# bb_cords is relative to bbx center(approximate the vehicle center)
bb_cords = create_bb_points(vehicle)
# bbx coordinates in sensor coordinate system. shape: (3, 8)
cords_x_y_z = vehicle_to_sensor(bb_cords, vehicle, sensor_transform)[:3, :]
# refer to https://github.com/carla-simulator/carla/issues/553
cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :].reshape(1, 8),
-cords_x_y_z[2, :].reshape(1, 8),
cords_x_y_z[0, :].reshape(1, 8)])
# bounding box in sensor image. Shape:(8, 3)
bbox = np.transpose(np.dot(camera_k_matrix, cords_y_minus_z_x))
new_x = (bbox[:, 0] / bbox[:, 2]).reshape(8, 1)
new_y = (bbox[:, 1] / bbox[:, 2]).reshape(8, 1)
new_z = bbox[:, 2].reshape(8, 1)
camera_bbox = np.concatenate([new_x, new_y, new_z], axis=1)
return camera_bbox
def p3d_to_p2d_bb(p3d_bb):
"""
Draw 2D bounding box (4 vertices) from 3D bounding box (8 vertices) in image.
2D bounding box is represented by two corner points
Args:
-p3d_bb (np.array): The objective 3D bounding box.
Returns:
"""
min_x = np.amin(p3d_bb[:, 0])
min_y = np.amin(p3d_bb[:, 1])
max_x = np.amax(p3d_bb[:, 0])
max_y = np.amax(p3d_bb[:, 1])
p2d_bb = np.array([[min_x, min_y], [max_x, max_y]])
return p2d_bb
def get_2d_bb(vehicle, sensor, senosr_transform):
"""
Summarize 2D bounding box creation
Args:
-vehicle (carla.vehicle or ObstacleVehicle): vehicle object.
-sensor (carla.sensor.camera.rgb): The CARLA sensor object.
-senosr_transform (carla.Transform): sensor position in the world
Returns:
-p2d_bb (np.ndarray): 2d bounding box in camera image.
"""
p3d_bb = get_bounding_box(vehicle, sensor, senosr_transform)
p2d_bb = p3d_to_p2d_bb(p3d_bb)
return p2d_bb
def project_lidar_to_camera(lidar, camera, point_cloud, rgb_image):
"""
Project lidar to camera space.
Args:
-lidar (carla.Sensor): Lidar sensor.
-camera (carla.Sensor): Camera seonsor.
-point_cloud (np.ndarray): cloud points, (x, y, z, intensity).
-rgb_image (np.ndarray): rgb image from camera.
Returns:
-rgb_image (np.ndarray): new rgb image with lidar points projected.
-points_2d (np.ndarray): point clouds projected to camera space.
"""
# Lidar intensity array of shape (p_cloud_size,) but, for now, let's
# focus on the 3D points.
intensity = np.array(point_cloud[:, 3])
# Point cloud in lidar sensor space array of shape (3, p_cloud_size).
local_lidar_points = np.array(point_cloud[:, :3]).T
# Add an extra 1.0 at the end of each 3d point so it becomes of
# shape (4, p_cloud_size) and it can be multiplied by a (4, 4) matrix.
local_lidar_points = np.r_[
local_lidar_points, [np.ones(local_lidar_points.shape[1])]]
# This (4, 4) matrix transforms the points from lidar space to world space.
lidar_2_world = x_to_world_transformation(lidar.get_transform())
# transform lidar points from lidar space to world space
world_points = np.dot(lidar_2_world, local_lidar_points)
# project world points to camera space
sensor_points = world_to_sensor(world_points, camera.get_transform())
# (x, y ,z) -> (y, -z, x)
point_in_camera_coords = np.array([
sensor_points[1],
sensor_points[2] * -1,
sensor_points[0]])
# retrieve camera intrinsic
K = get_camera_intrinsic(camera)
# project the 3d points in camera space to image space
points_2d = np.dot(K, point_in_camera_coords)
# normalize x,y,z
points_2d = np.array([
points_2d[0, :] / points_2d[2, :],
points_2d[1, :] / points_2d[2, :],
points_2d[2, :]])
image_w = int(camera.attributes['image_size_x'])
image_h = int(camera.attributes['image_size_y'])
# remove points out the camera scope
points_2d = points_2d.T
intensity = intensity.T
points_in_canvas_mask = \
(points_2d[:, 0] > 0.0) & (points_2d[:, 0] < image_w) & \
(points_2d[:, 1] > 0.0) & (points_2d[:, 1] < image_h) & \
(points_2d[:, 2] > 0.0)
new_points_2d = points_2d[points_in_canvas_mask]
new_intensity = intensity[points_in_canvas_mask]
# Extract the screen coords (uv) as integers.
u_coord = new_points_2d[:, 0].astype(np.int)
v_coord = new_points_2d[:, 1].astype(np.int)
# Since at the time of the creation of this script, the intensity function
# is returning high values, these are adjusted to be nicely visualized.
new_intensity = 4 * new_intensity - 3
color_map = np.array([
np.interp(new_intensity, VID_RANGE, VIRIDIS[:, 0]) * 255.0,
| np.interp(new_intensity, VID_RANGE, VIRIDIS[:, 1]) | numpy.interp |
# -*- coding: utf-8 -*-
'''
This modules contains functions necessary for applying OWL or group OWL
to the parameters
1. reg_params_init
2. apply_growl
3. apply_owl_prox
4. update_mask
5. measure_compression
6. adjust_learning_rate
7. preprocess_hparams
8. set_param_share
'''
from __future__ import division, print_function, absolute_import
import sys
sys.path.append('./owl_projection')
import tensorflow as tf
import numpy as np
from projectedOWL import proxOWL
from numpy.linalg import norm
from math import sqrt
from utils_nn import get_weight_placeholders, get_mask_placeholders
from flags import FLAGS, HParams
import re
import os
def reg_params_init(sess, hps):
'''
This function initializes the regularization paramters.
Args:
sess: the predefined computation graph.
hps: hyperparameters collection
Returns:
layer_owl_params: a list, each element is an array containing the weights
of the corresponding layer.
'''
weight_placeholder = get_weight_placeholders()
reg_applied_layers = hps.reg_applied_layers
layer_owl_params = []
for idx, triple in enumerate(weight_placeholder):
print('layer {}'.format(idx))
# if the layer is not regularized, then append []
if not reg_applied_layers[idx]:
layer_owl_params.append([])
continue
#Regularization parameters
reg_params = hps.reg_params
lambda_1 = np.float32(reg_params[idx][0])
lambda_2 = np.float32(reg_params[idx][1])
if (lambda_1 < 0) | (lambda_2 < 0):
raise Exception('regularization parameters must be non-negative')
#GrOWL weights should be applied to the rows of the (reshaped) weight matrix
param_i, placeholder_i, assign_op_i = triple
param_shape = sess.run(tf.shape(param_i))
if np.size(param_i.get_shape().as_list()) == 2:
row_num = param_shape[0]
elif np.size(param_i.get_shape().as_list()) == 4:
row_num = param_shape[2]
transition_ind = np.floor(row_num*FLAGS.PLD_transition)
param_index = np.linspace(start=transition_ind-1, stop=0, num=transition_ind)
print(' row num: {}, transition_ind: {}, largest reg: {}'.format(row_num, transition_ind, lambda_1 + lambda_2 * transition_ind))
if row_num > transition_ind:
param_index = np.append(param_index, np.zeros([1, int(row_num-transition_ind)]))
layer_owl_params.append(lambda_1 + lambda_2 * param_index)
print("length of weight_placeholder:{0}".format(len(weight_placeholder)))
assert len(layer_owl_params) == len(weight_placeholder)
assert len(layer_owl_params) == len(hps.reg_applied_layers)
return layer_owl_params, hps
def apply_group_lasso(W, weights):
#Prox op
W_norm = norm(W, axis=1)
new_W_norm = np.maximum(W_norm - weights[0], 0)
new_W = np.zeros_like(W)
for i in range(W.shape[0]):
if W_norm[i] < np.finfo(np.float32).eps:
new_W[i,:] = 0 * W[i,:]
else:
new_W[i,:] = new_W_norm[i] * W[i,:] / W_norm[i]
return new_W
def apply_growl(W, weights):
# Prox op
W_norm = norm(W, axis=1)
new_W_norm=proxOWL(W_norm, weights)
new_W = np.zeros_like(W)
for i in range(W.shape[0]):
if W_norm[i] < np.finfo(np.float32).eps:
new_W[i,:] = 0 * W[i,:]
else:
new_W[i,:] = new_W_norm[i] * W[i,:] / W_norm[i]
return new_W
def apply_reg_prox(sess, learning_rate_val, layer_reg_params, hps):
'''
Updates the weights parameter of each layer
Args:
sess: the comptutaion graph
learning_rate: the predefined learning rate
layer_reg_params: owl parameters, initially created by reg_params_init
hps:
Returns:
None
'''
# get weights of the network
weight_placeholders = get_weight_placeholders()
# prox_lr_val = min(learning_rate_val, 0.001)
prox_lr_val = learning_rate_val
for idx, triple in enumerate(weight_placeholders):
#Don't apply owl/growl if told not to
if not hps.reg_applied_layers[idx]:
continue
param_i, placeholder_i, assign_op_i = triple
param_val = sess.run(param_i)
dim_i = np.size(param_val.shape)
if dim_i == 2:
if FLAGS.use_growl:
prox_param_val = apply_growl(param_val, prox_lr_val * layer_reg_params[idx])
else:
prox_param_val = apply_group_lasso(param_val, prox_lr_val * layer_reg_params[idx])
elif dim_i == 4:
# For convolutional layer, we need to first reshape 4D tensor to 2D matrix
reduced_param_val = reshape_2D_4D(param_val, target_shape=None,
reshape_type=2, reshape_order='F')
if FLAGS.use_growl:
reduced_prox_param_val = apply_growl(reduced_param_val, prox_lr_val * layer_reg_params[idx])
else:
reduced_prox_param_val = apply_group_lasso(reduced_param_val, prox_lr_val * layer_reg_params[idx])
#Now reshape the 2D matrix back to 4D tensor
prox_param_val = reshape_2D_4D(reduced_prox_param_val, target_shape=param_val.shape,
reshape_type=1, reshape_order='F')
# assign the new weights to param_i using the assign_op_i
sess.run(assign_op_i, feed_dict={placeholder_i:prox_param_val})
def update_mask(sess, threshold, hps, res_dict, step):
'''
update the mask during the training process to prevent drifting from zero
Args:
sess: the computation graph
learning_rate: the predefined learning rate
threshold: the pruning threshold, this may help avoid the floating number error
occured during the masking process
model: the resnet class
hps: hyperparameters
res_dict: results dictionary
step: current step
Returns:
num_zero_layers: number of zero valued layers
'''
mask_palceholders = get_mask_placeholders()
weight_placeholders = get_weight_placeholders()
#count the zero valued layers in order to avoiding the nonsense results
num_zero_layers = 0
layer_ID = []
assert len(mask_palceholders) == len(weight_placeholders)
for idx, mask_triple in enumerate(mask_palceholders):
#Don't apply owl/growl if told not to
if not hps.reg_applied_layers[idx]:
continue
mask_i, mask_palceholders_i, mask_assign_op_i = mask_triple
param_i, param_placeholder_i, param_assign_op_i = weight_placeholders[idx]
dim_i = param_i.get_shape().as_list()
#Recover the masked weights to zeros if they drifted
param_val = sess.run(param_i)
mask = sess.run(mask_i)
param_val_masked = param_val * mask
#If apply to convolutional layer, compute the reshaped matrix
if np.size(dim_i) == 4:
param_val_masked_reshaped = reshape_2D_4D(param_val_masked, target_shape=None,
reshape_type=2, reshape_order='F')
mask_reshaped = reshape_2D_4D(mask, target_shape=None,
reshape_type=2, reshape_order='F')
#prune params and update the mask
row_norm = norm(param_val_masked_reshaped, axis=1)
row_size = param_val_masked_reshaped.shape[1]
print('layer:{}, largest row norm: {:6f}, median row norm: {:.6f}, min row norm: {:.6f}'.format(idx, np.max(row_norm), np.median(row_norm), np.min(row_norm)))
zero_row_idx = np.where(row_norm <=threshold)
print(' masked neurons: {}; total neurons: {}'.format(np.size(zero_row_idx), np.size(row_norm)))
param_val_masked_reshaped[zero_row_idx[0], :] = 0
mask_reshaped[zero_row_idx[0], :] = 0
#back to 4D
param_val_masked = reshape_2D_4D(param_val_masked_reshaped, target_shape=tuple(dim_i),
reshape_type=1, reshape_order='F')
mask = reshape_2D_4D(mask_reshaped, target_shape=tuple(dim_i),
reshape_type=1, reshape_order='F')
elif np.size(dim_i) == 2:
row_norm = norm(param_val_masked, axis=1)
row_size = param_val_masked.shape[1]
print('layer:{}, largest row norm: {:6f}, median row norm: {:.6f}, min row norm: {:.6f}'.format(idx, np.max(row_norm), np.median(row_norm), np.min(row_norm)))
zero_row_idx = np.where(row_norm <=threshold)
print(' masked rows: {}; total rows: {}'.format(np.size(zero_row_idx), np.size(row_norm)))
param_val_masked[zero_row_idx[0], :] = 0
mask[zero_row_idx[0], :] = 0
#Update the mask and weight matrix
sess.run(mask_assign_op_i, feed_dict={mask_palceholders_i:mask})
sess.run(param_assign_op_i, feed_dict={param_placeholder_i:param_val_masked})
nonzero_rows = np.size(row_norm) - np.size(zero_row_idx[0])
layer_nonzero_params = nonzero_rows * row_size
print(" total:{0}, nonzeros:{1}".format(np.size(param_val_masked),
layer_nonzero_params))
################################
#Record the zero valued layers
if np.size(row_norm) - np.size(zero_row_idx[0]) <= 3:
num_zero_layers += 1
layer_ID += [idx]
return num_zero_layers, layer_ID
def measure_compression(sess, res_dict, step, training, hps, num_cluster_arr=[]):
'''
Monitor the compression ratio
'''
mask_palceholders = get_mask_placeholders()
weight_placeholders = get_weight_placeholders()
num_nonzero_row_arr = []
num_total_row_arr = []
num_row_size_arr = []
num_nonzero_params = 0
num_unique_params = 0
num_total_params = 0
for idx, mask_triple in enumerate(mask_palceholders):
mask_i, mask_palceholders_i, mask_assign_op_i = mask_triple
param_i, param_placeholder_i, param_assign_op_i = weight_placeholders[idx]
dim_i = param_i.get_shape().as_list()
param_val = sess.run(param_i)
mask = sess.run(mask_i)
param_val_masked = param_val * mask
if np.size(dim_i) == 4:
param_val_masked_reshaped = reshape_2D_4D(param_val_masked, target_shape=None, reshape_type=2, reshape_order='F')
row_norm = norm(param_val_masked_reshaped, axis=1)
num_nonzero_params += np.count_nonzero(row_norm) * np.shape(param_val_masked_reshaped)[1]
num_unique_params += np.size(np.unique(param_val_masked_reshaped))
num_total_params += np.prod(dim_i)
num_nonzero_row_arr.append(np.count_nonzero(row_norm))
num_total_row_arr.append(np.size(row_norm))
num_row_size_arr.append(np.shape(param_val_masked_reshaped)[1])
elif np.size(dim_i) == 2:
row_norm = norm(param_val_masked, axis=1)
num_nonzero_params += np.count_nonzero(row_norm) * dim_i[1]
num_unique_params += np.size(np.unique(param_val_masked))
num_total_params += np.prod(dim_i)
num_nonzero_row_arr.append( | np.count_nonzero(row_norm) | numpy.count_nonzero |
import numpy as np
from ..base_channel import Channel
from tramp.utils.integration import gaussian_measure_2d_full, gaussian_measure_2d
from tramp.utils.misc import norm_cdf, phi_0, phi_1, phi_2, sigmoid, leaky_relu
from scipy.integrate import quad
class LeakyReluChannel(Channel):
def __init__(self, slope):
self.slope = slope
self.repr_init()
def sample(self, Z):
X = leaky_relu(Z, self.slope)
return X
def math(self):
return r"$\textrm{l-relu}$"
def second_moment(self, tau_z):
return 0.5 * (1 + self.slope**2) * tau_z
def compute_forward_posterior(self, az, bz, ax, bx):
# estimate x from x = leaky_relu(z)
a_pos = az + ax
a_neg = az + (self.slope**2) * ax
b_pos = bz + bx
b_neg = - bz - self.slope * bx
x_pos = b_pos / np.sqrt(a_pos)
x_neg = b_neg / np.sqrt(a_neg)
delta = phi_0(x_pos) - phi_0(x_neg) + 0.5 * np.log(a_neg / a_pos)
sigma_pos = sigmoid(+delta)
sigma_neg = sigmoid(-delta)
r_pos = phi_1(x_pos) / np.sqrt(a_pos)
r_neg = - self.slope * phi_1(x_neg) / np.sqrt(a_neg)
v_pos = phi_2(x_pos) / a_pos
v_neg = (self.slope**2) * phi_2(x_neg) / a_neg
rx = sigma_pos * r_pos + sigma_neg * r_neg
Dx = (r_pos - r_neg)**2
v = sigma_pos * sigma_neg * Dx + sigma_pos * v_pos + sigma_neg * v_neg
vx = np.mean(v)
return rx, vx
def compute_backward_posterior(self, az, bz, ax, bx):
# estimate z from x = leaky_relu(z)
a_pos = az + ax
a_neg = az + (self.slope**2) * ax
b_pos = bz + bx
b_neg = - bz - self.slope * bx
x_pos = b_pos / np.sqrt(a_pos)
x_neg = b_neg / np.sqrt(a_neg)
delta = phi_0(x_pos) - phi_0(x_neg) + 0.5 * np.log(a_neg / a_pos)
sigma_pos = sigmoid(+delta)
sigma_neg = sigmoid(-delta)
r_pos = phi_1(x_pos) / np.sqrt(a_pos)
r_neg = - phi_1(x_neg) / np.sqrt(a_neg)
v_pos = phi_2(x_pos) / a_pos
v_neg = phi_2(x_neg) / a_neg
rz = sigma_pos * r_pos + sigma_neg * r_neg
Dz = (r_pos - r_neg)**2
v = sigma_pos * sigma_neg * Dz + sigma_pos * v_pos + sigma_neg * v_neg
vz = np.mean(v)
return rz, vz
def beliefs_measure(self, az, ax, tau_z, f):
u_eff = np.maximum(0, az * tau_z - 1)
a_pos = az + ax
a_neg = az + (self.slope**2) * ax
def f_pos(bz, bx):
b_pos = bz + bx
x_pos = b_pos / np.sqrt(a_pos)
return norm_cdf(x_pos) * f(bz, bx)
def f_neg(bz, bx):
b_neg = - bz - self.slope * bx
x_neg = b_neg / np.sqrt(a_neg)
return norm_cdf(x_neg) * f(bz, bx)
if ax==0 or u_eff==0:
sx_eff_pos = np.sqrt(ax * (ax * tau_z + 1))
sx_eff_neg = | np.sqrt(ax * (self.slope**2 * ax * tau_z + 1)) | numpy.sqrt |
'''
File name: nodes.py
Author: <NAME>
Date created: 10/31/2017
Date last modified: 10/31/2017
Python Version: 2.7
Description: Script to compute connectome
Project: Psychosis
'''
from __future__ import division
from nilearn.input_data import NiftiMasker, NiftiMapsMasker, NiftiLabelsMasker
from nilearn.connectome import ConnectivityMeasure
from sklearn.covariance import GraphLassoCV
from sklearn.decomposition import FastICA
from matplotlib import pyplot as plt
from scipy.signal import lfilter
from operator import itemgetter
from collections import Counter
from datetime import datetime
from itertools import groupby
from nilearn import datasets
import nilearn.signal
import nibabel as nib
import nilearn.image
import pandas as pd
import numpy as np
import argparse
import nilearn
import sys
import os
CODEDIR = os.environ['CODEDIR']
subject = os.environ.get('SUBJECT')
cleandir = os.path.join(os.environ.get('CONDIR'),"sub-%s"%subject)
keys = os.listdir(cleandir)
#keys = np.array([x.split("_bold") for x in keys if 'task' in x]).flatten()
keys = np.unique([x for x in keys if 'task' in x]).tolist()
prepdir = os.environ.get('PREPDIR')
subprep = os.path.join(prepdir,"sub-"+subject,"MNINonLinear/Results")
print(datetime.now().strftime("%a %b %d %H:%M:%S"))
print("creating connectomes")
for gsr in ["_gsr",""]:
for key in keys:
print("extracting session "+key)
prepfile = os.path.join(subprep,key,key+".nii.gz") #original file
totaltp = nib.load(prepfile).shape[3]
if totaltp <= 10:
continue
imgfile = os.path.join(cleandir,key,key+'_removed_first10_despiked_masked_mvmreg%s_cmpc_bp.nii.gz'%gsr)
##################
# 1 Gordon atlas #
##################
atlasfile = os.path.join(os.environ.get("CODEDIR"),
'postbids/rest/Parcels_MNI_111.nii')
subcort_atlasfile = os.path.join(os.environ.get("CODEDIR"),
'postbids/rest/HarvardOxford-sub-prob-1mm.nii.gz')
cerebellum_atlasfile = os.path.join(os.environ.get("CODEDIR"),
'postbids/rest/Cerebellum-MNIfnirt-prob-1mm.nii.gz')
# extract signals
masker = NiftiLabelsMasker(labels_img=atlasfile,standardize=True,detrend=False,low_pass=None,high_pass=None,verbose=5)
subcortmasker = NiftiMapsMasker(maps_img=subcort_atlasfile,standardize=True,detrend=False,low_pass=None,high_pass=None,verbose=5)
cerebellummasker = NiftiMapsMasker(maps_img=cerebellum_atlasfile,standardize=True,detrend=False,low_pass=None,high_pass=None,verbose=5)
FDfile = os.path.join(cleandir,key,key+"_removed_first10_despiked_mvmreg.txt")
FD = pd.read_csv(FDfile,"\t",header=None)
FD = FD[[24,25]]
FD.columns = ['dvars','FD']
rmid = np.where(FD['FD'] > 0.5)[0]
rmid = np.unique(np.concatenate((rmid,rmid+1,rmid-1)))
short = np.append(False,np.logical_and(np.diff(rmid)>1,np.diff(rmid)<5))
#gives Bool for indices when closer than 5 frames (but evidently more than 1)
allrmid = [range(rmid[i-1],rmid[i])[1:] for i,val in enumerate(short) if val==True]
allrmid = np.sort([item for sublist in allrmid for item in sublist]+rmid.tolist())
ntp = nib.load(imgfile).shape[3]-len(allrmid)
percrem = len(allrmid)/nib.load(imgfile).shape[3]
rmidfile = os.path.join(cleandir,key,key+"_rmid.txt")
np.savetxt(rmidfile,allrmid)
percremfile = os.path.join(cleandir,key,key+"_percrem.txt")
np.savetxt(percremfile,np.array([len(allrmid),ntp,percrem]))
if percrem > 0.2:
continue
# if len(allrmid)>400:
# continue
time_series = masker.fit_transform(imgfile)
time_series_subcort = subcortmasker.fit_transform(imgfile)
time_series_cerebellum = cerebellummasker.fit_transform(imgfile)
time_series = np.concatenate((time_series,time_series_subcort,time_series_cerebellum),axis=1)
time_series_scrubbed = np.delete(time_series,allrmid,axis=0)
# Gordon_figure(correlation_matrix,limits=[-1,1])
# plt.show()
# save parcellated time series
outfile = os.path.join(cleandir,key,key+"_Gordon_ts_scrubbed%s.csv"%gsr)
np.savetxt(outfile,time_series_scrubbed)
outfile = os.path.join(cleandir,key,key+"_Gordon_ts%s.csv"%gsr)
np.savetxt(outfile,time_series)
# static correlation
outfile = os.path.join(cleandir,key,key+"_Gordon_correlation%s.csv"%gsr)
correlation_measure = ConnectivityMeasure(kind='correlation')
correlation_matrix = correlation_measure.fit_transform([time_series_scrubbed])[0]
correlation_std = 1/np.sqrt(ntp-3)
correlation_z = 1/2*np.log((1+correlation_matrix)/(1-correlation_matrix))#/correlation_std
np.fill_diagonal(correlation_z,0)
np.savetxt(outfile,correlation_z)
# static correlation
outfile = os.path.join(cleandir,key,key+"_Gordon_partial_correlation%s.csv"%gsr)
correlation_measure = ConnectivityMeasure(kind='partial correlation')
correlation_matrix = correlation_measure.fit_transform([time_series_scrubbed])[0]
correlation_z = 1/2*np.log((1+correlation_matrix)/(1-correlation_matrix))#/correlation_std
np.fill_diagonal(correlation_z,0)
| np.savetxt(outfile,correlation_z) | numpy.savetxt |
"""
USED -> IMAGE CLASS
"""
import copy
import os
import time
import warnings
from typing import Union
import logging
import numpy as np
import pandas as pd
from pydicom import FileDataset, Sequence, Dataset
from luna.radiology.mirp.utilities import get_version
# Monolithic classes.....
class ImageClass:
# Class for image volumes
def __init__(self, voxel_grid, origin, spacing, orientation, modality=None, spat_transform="base", no_image=False,
metadata=None, slice_table=None):
# Set details regarding voxel orientation and such
self.origin = np.array(origin)
self.orientation = np.array(orientation)
self.spat_transform = spat_transform # Signifies whether the current image is a base image or not
self.slice_table = slice_table
# The spacing, the affine matrix and its inverse are set using the set_spacing method.
self.spacing = None
self.m_affine = None
self.m_affine_inv = None
# Set voxel spacing. This also set the affine matrix and its inverse.
self.set_spacing(new_spacing=np.array(spacing))
# Image name
self.name = None
# Initialise voxel grid dependent parameters
self.isEncoded_voxel_grid = None
self.voxel_grid = None
self.size = None
self.dtype_name = None
# Interpolation settings
self.interpolated = False
self.interpolation_algorithm = None
# Bin settings
self.binned = False
self.bin_width = None
# Discretisation settings
self.discretised = False
self.discretisation_algorithm = None
self.discretisation_settings = None
# Noise addition parameters
self.noise = -1.0
self.noise_iter = 0
# Translation parameters
self.transl_fraction_x = 0.0
self.transl_fraction_y = 0.0
self.transl_fraction_z = 0.0
# Rotation parameters
self.rotation_angle = 0.0
# Set voxel grid and image
if not no_image:
self.is_missing = False
self.set_voxel_grid(voxel_grid=voxel_grid)
else:
self.is_missing = True
# Set metadata and a list of update tags
self.metadata: Union[FileDataset, None] = metadata
self.as_parametric_map = False
# Image modality
if modality is None and metadata is not None:
# Set imaging modality using metadata
self.modality = self.get_metadata(tag=(0x0008, 0x0060), tag_type="str") # Imaging modality
elif modality is None:
self.modality = "GENERIC"
else:
self.modality = modality
# Normalisation flags.
self.is_normalised = False
def copy(self, drop_image=False):
# Creates a new copy of the image object
img_copy = copy.deepcopy(self)
if drop_image:
img_copy.drop_image()
return img_copy
def show(self, img_slice):
import pylab
if self.is_missing:
return
pylab.imshow(self.get_voxel_grid()[img_slice, :, :], cmap=pylab.cm.bone)
pylab.show()
def set_origin(self, new_origin):
self.origin = new_origin
def set_spacing(self, new_spacing):
# Update spacing
self.spacing: np.ndarray = new_spacing
# Recompute the affine matrices
m_affine = np.zeros((3, 3), dtype=np.float)
# z-coordinates
m_affine[:, 0] = self.spacing[0] * np.array([self.orientation[0], self.orientation[1], self.orientation[2]])
# y-coordinates
m_affine[:, 1] = self.spacing[1] * np.array([self.orientation[3], self.orientation[4], self.orientation[5]])
# x-coordinates
m_affine[:, 2] = self.spacing[2] * np.array([self.orientation[6], self.orientation[7], self.orientation[8]])
self.m_affine = m_affine
self.m_affine_inv = np.linalg.inv(self.m_affine)
def set_voxel_grid(self, voxel_grid):
""" Sets voxel grid """
# Determine size
self.size = np.array(voxel_grid.shape)
self.dtype_name = voxel_grid.dtype.name
# Encode voxel grid
self.encode_voxel_grid(voxel_grid=voxel_grid)
# Return None for missing images
def get_voxel_grid(self) -> np.ndarray:
""" Return the voxel grid as a ndarray """
if self.is_missing:
return None
if self.isEncoded_voxel_grid:
# Decode voxel grid (typically roi)
decoded_voxel = np.zeros(np.prod(self.size), dtype=np.bool)
# Check if the voxel grid contains values
if self.voxel_grid is not None:
decode_zip = copy.deepcopy(self.voxel_grid)
for ii, jj in decode_zip:
decoded_voxel[ii:jj + 1] = True
# Shape into correct form
decoded_voxel = decoded_voxel.reshape(self.size)
return decoded_voxel
else:
return self.voxel_grid
def encode_voxel_grid(self, voxel_grid):
"""Performs run length encoding of the voxel grid"""
# Determine whether the voxel grid should be encoded (only True for boolean data types; typically roi)
if self.dtype_name == "bool":
# Run length encoding for "True"
rle_end = np.array(np.append(np.where(voxel_grid.ravel()[1:] != voxel_grid.ravel()[:-1]),
np.prod(self.size) - 1))
rle_start = np.cumsum(np.append(0, np.diff(np.append(-1, rle_end))))[:-1]
rle_val = voxel_grid.ravel()[rle_start]
# Check whether the voxel grid is empty (consists of 0s)
if np.all(~rle_val):
self.voxel_grid = None
self.isEncoded_voxel_grid = True
else:
# Select only True values entries for further compression
rle_start = rle_start[rle_val]
rle_end = rle_end[rle_val]
# Create zip
self.voxel_grid = zip(rle_start, rle_end)
self.isEncoded_voxel_grid = True
else:
self.voxel_grid = voxel_grid
self.isEncoded_voxel_grid = False
def decode_voxel_grid(self):
"""Performs run length decoding of the voxel grid and converts it to a numpy array"""
if self.dtype_name == "bool" and self.isEncoded_voxel_grid:
decoded_voxel = np.zeros(np.prod(self.size), dtype=np.bool)
# Check if the voxel grid contains values
if self.voxel_grid is not None:
decode_zip = copy.deepcopy(self.voxel_grid)
for ii, jj in decode_zip:
decoded_voxel[ii:jj + 1] = True
# Set shape to original grid
decoded_voxel = decoded_voxel.reshape(self.size)
# Update self.voxel_grid and isEncoded_voxel_grid tags
self.voxel_grid = decoded_voxel
self.isEncoded_voxel_grid = False
def decimate(self, by_slice):
"""
Decimates image voxel grid by removing every second element
:param by_slice:
:return:
"""
# Skip for missing images
if self.is_missing:
return
# Get the voxel grid
img_voxel_grid = self.get_voxel_grid()
# Update the voxel grid
if by_slice:
# Drop every second pixel
img_voxel_grid = img_voxel_grid[:, slice(None, None, 2), slice(None, None, 2)]
# Update voxel spacing
self.spacing[[1, 2]] *= 2.0
else:
# Drop every second voxel
img_voxel_grid = img_voxel_grid[slice(None, None, 2), slice(None, None, 2), slice(None, None, 2)]
# Update voxel spacing
self.spacing *= 2.0
# Update voxel grid. This also updates the size attribute.
self.set_voxel_grid(voxel_grid=img_voxel_grid)
def interpolate(self, by_slice, settings):
"""Performs interpolation of the image volume"""
from luna.radiology.mirp.imageProcess import interpolate_to_new_grid, gaussian_preprocess_filter # aauker: Circular import
# Skip for missing images
if self.is_missing:
return
# Local interpolation constants
if None not in settings.img_interpolate.new_spacing:
iso_spacing = settings.img_interpolate.new_spacing[0]
new_spacing = np.array([iso_spacing, iso_spacing, iso_spacing]) # Desired spacing in mm
elif type(settings.img_interpolate.new_non_iso_spacing) in [list, tuple]:
if None not in settings.img_interpolate.new_non_iso_spacing:
non_iso_spacing = settings.img_interpolate.new_non_iso_spacing
new_spacing = np.array(non_iso_spacing)
else:
new_spacing = self.spacing
else:
new_spacing = self.spacing
print (f"Interpolating main image to {new_spacing}")
# Read additional details
order = settings.img_interpolate.spline_order # Order of multidimensional spline filter (0=nearest neighbours, 1=linear, 3=cubic)
interpolate_flag = settings.img_interpolate.interpolate # Whether to interpolate or not
# Set spacing for interpolation across slices to the original spacing in case interpolation is only conducted within the slice
if by_slice: new_spacing[0] = self.spacing[0]
# Image translation
translate_z = 0#settings.vol_adapt.translate_z[0]
translate_y = 0#settings.vol_adapt.translate_y[0]
translate_x = 0#settings.vol_adapt.translate_x[0]
# Convert to [0.0, 1.0] range
translate_x = translate_x - np.floor(translate_x)
translate_y = translate_y - np.floor(translate_y)
translate_z = translate_z - np.floor(translate_z)
trans_vec = np.array([translate_z, translate_y, translate_x])
# Add translation fractions
self.transl_fraction_x = translate_x
self.transl_fraction_y = translate_y
self.transl_fraction_z = translate_z
# Skip if translation in both directions is 0.0
if translate_x == 0.0 and translate_y == 0.0 and translate_z == 0.0 and not interpolate_flag: return None
# Check if pre-processing is required
if settings.img_interpolate.anti_aliasing:
self.set_voxel_grid(voxel_grid=gaussian_preprocess_filter(orig_vox=self.get_voxel_grid(),
orig_spacing=self.spacing,
sample_spacing=new_spacing,
param_beta=settings.img_interpolate.smoothing_beta,
mode="nearest",
by_slice=by_slice))
# Interpolate image and positioning
self.size, sample_spacing, upd_voxel_grid, grid_origin = \
interpolate_to_new_grid(orig_dim=self.size,
orig_spacing=self.spacing,
orig_vox=self.get_voxel_grid(),
sample_spacing=new_spacing,
translation=trans_vec,
order=order,
mode="nearest",
align_to_center=True)
# Update origin before spacing, because computing the origin requires the original affine matrix.
self.origin = self.origin + np.dot(self.m_affine, np.transpose(grid_origin))
# Update spacing and affine matrix.
self.set_spacing(sample_spacing)
# Round intensities in case of modalities with inherently discretised intensities
if (self.modality == "CT") and (self.spat_transform == "base"):
upd_voxel_grid = np.round(upd_voxel_grid)
elif (self.modality == "PT") and (self.spat_transform == "base"):
upd_voxel_grid[upd_voxel_grid < 0.0] = 0.0
# Set interpolation
self.interpolated = True
# Set interpolation algorithm
if order == 0:
self.interpolation_algorithm = "nnb"
elif order == 1:
self.interpolation_algorithm = "lin"
elif order > 1:
self.interpolation_algorithm = "si" + str(order)
if settings.img_interpolate.bin:
self.binned = True
self.bin_width = settings.img_interpolate.bin_width
self.bins = np.arange(-1000,1000, settings.img_interpolate.bin_width )
upd_voxel_grid = np.digitize(upd_voxel_grid, self.bins).astype(np.float)
# Set voxel grid
self.set_voxel_grid(voxel_grid=upd_voxel_grid)
def add_noise(self, noise_level, noise_iter):
""" Adds Gaussian noise to the image volume
noise_level: standard deviation of image noise present """
# Add noise iteration number
self.noise_iter = noise_iter
# Skip for missing images
if self.is_missing:
return
# Skip for invalid noise levels
if noise_level is None:
return
if np.isnan(noise_level) or noise_level < 0.0:
return
# Add Gaussian noise to image
voxel_grid = self.get_voxel_grid()
voxel_grid += np.random.normal(loc=0.0, scale=noise_level, size=self.size)
# Check for corrections due to image modality
if self.spat_transform == "base":
# Round CT values to the nearest integer
if self.modality == "CT":
voxel_grid = np.round(a=voxel_grid, decimals=0)
# Set minimum PT to 0.0
if self.modality == "PT":
voxel_grid[voxel_grid < 0.0] = 0.0
# Set noise level in image
self.noise = noise_level
self.set_voxel_grid(voxel_grid=voxel_grid)
def saturate(self, intensity_range, fill_value=None):
"""
Saturate image intensities using an intensity range
:param intensity_range: range of intensity values
:param fill_value: fill value for out-of-range intensities. If None, the upper and lower ranges are used
:return:
"""
# Skip for missing images
if self.is_missing:
return
intensity_range = np.array(copy.deepcopy(intensity_range))
if np.any(~np.isnan(intensity_range)):
# Get voxel grid
voxel_grid = self.get_voxel_grid()
# Lower boundary
if not np.isnan(intensity_range[0]):
if fill_value is None:
voxel_grid[voxel_grid < intensity_range[0]] = intensity_range[0]
else:
voxel_grid[voxel_grid < intensity_range[0]] = fill_value[0]
# Upper boundary
if not np.isnan(intensity_range[1]):
if fill_value is None:
voxel_grid[voxel_grid > intensity_range[1]] = intensity_range[1]
else:
voxel_grid[voxel_grid > intensity_range[1]] = fill_value[1]
# Set the updated voxel grid
self.set_voxel_grid(voxel_grid=voxel_grid)
def normalise_intensities(self, norm_method="none", intensity_range=None, saturation_range=None, mask=None):
"""
Normalises image intensities
:param norm_method: string defining the normalisation method. Should be one of "none", "range", "standardisation"
:param intensity_range: range of intensities for normalisation
:return:
"""
# Skip for missing images
if self.is_missing:
return
if intensity_range is None:
intensity_range = [np.nan, np.nan]
if mask is None:
mask = np.ones(self.size, dtype=np.bool)
else:
mask = mask.astype(np.bool)
if np.sum(mask) == 0:
mask = np.ones(self.size, dtype=np.bool)
if saturation_range is None:
saturation_range = [np.nan, np.nan]
if norm_method == "none":
return
elif norm_method == "range":
# Normalisation to [0, 1] range using fixed intensities.
# Get voxel grid
voxel_grid = self.get_voxel_grid()
# Find maximum and minimum intensities
if np.isnan(intensity_range[0]):
min_int = np.min(voxel_grid[mask])
else:
min_int = intensity_range[0]
if np.isnan(intensity_range[1]):
max_int = np.max(voxel_grid[mask])
else:
max_int = intensity_range[1]
# Normalise by range
if not max_int == min_int:
voxel_grid = (voxel_grid - min_int) / (max_int - min_int)
else:
voxel_grid = voxel_grid - min_int
# Update the voxel grid
self.set_voxel_grid(voxel_grid=voxel_grid)
self.is_normalised = True
elif norm_method == "relative_range":
# Normalisation to [0, 1]-ish range using relative intensities.
# Get voxel grid
voxel_grid = self.get_voxel_grid()
min_int_rel = 0.0
if not np.isnan(intensity_range[0]):
min_int_rel = intensity_range[0]
max_int_rel = 1.0
if not np.isnan(intensity_range[1]):
max_int_rel = intensity_range[1]
# Compute minimum and maximum intensities.
value_range = [np.min(voxel_grid[mask]), np.max(voxel_grid[mask])]
min_int = value_range[0] + min_int_rel * (value_range[1] - value_range[0])
max_int = value_range[0] + max_int_rel * (value_range[1] - value_range[0])
# Normalise by range
if not max_int == min_int:
voxel_grid = (voxel_grid - min_int) / (max_int - min_int)
else:
voxel_grid = voxel_grid - min_int
# Update the voxel grid
self.set_voxel_grid(voxel_grid=voxel_grid)
self.is_normalised = True
elif norm_method == "quantile_range":
# Normalisation to [0, 1]-ish range based on quantiles.
# Get voxel grid
voxel_grid = self.get_voxel_grid()
min_quantile = 0.0
if not np.isnan(intensity_range[0]):
min_quantile = intensity_range[0]
max_quantile = 1.0
if not np.isnan(intensity_range[1]):
max_quantile = intensity_range[1]
# Compute quantiles from voxel grid.
min_int = np.quantile(voxel_grid[mask], q=min_quantile)
max_int = np.quantile(voxel_grid[mask], q=max_quantile)
# Normalise by range
if not max_int == min_int:
voxel_grid = (voxel_grid - min_int) / (max_int - min_int)
else:
voxel_grid = voxel_grid - min_int
# Update the voxel grid
self.set_voxel_grid(voxel_grid=voxel_grid)
self.is_normalised = True
elif norm_method == "standardisation":
# Normalisation to mean 0 and standard deviation 1.
# Get voxel grid
voxel_grid = self.get_voxel_grid()
# Determine mean and standard deviation of the voxel intensities
mean_int = np.mean(voxel_grid[mask])
sd_int = np.std(voxel_grid[mask])
# Protect against invariance.
if sd_int == 0.0: sd_int = 1.0
# Normalise
voxel_grid = (voxel_grid - mean_int) / sd_int
# Update the voxel grid
self.set_voxel_grid(voxel_grid=voxel_grid)
self.is_normalised = True
else:
raise ValueError(f"{norm_method} is not a valid method for normalising intensity values.")
self.saturate(intensity_range=saturation_range)
def rotate(self, angle):
"""Rotate volume along z-axis."""
# Skip for missing images
if self.is_missing:
return
import scipy.ndimage as ndi
from luna.radiology.mirp.featureSets.volumeMorphology import get_rotation_matrix
# Find actual output size of x-y plane
new_z_dim = np.asmatrix([self.size[0], 0.0, 0.0]) * get_rotation_matrix(np.radians(angle), dim=3, rot_axis=0)
new_y_dim = np.asmatrix([0.0, self.size[1], 0.0]) * get_rotation_matrix(np.radians(angle), dim=3, rot_axis=0)
new_x_dim = np.asmatrix([0.0, 0.0, self.size[2]]) * get_rotation_matrix(np.radians(angle), dim=3, rot_axis=0)
new_dim_flt = np.squeeze(np.array(np.abs(new_z_dim)) + np.array(np.abs(new_y_dim) + np.abs(new_x_dim)))
# Get voxel grid
voxel_grid = self.get_voxel_grid()
# Rotate voxels along angle in the y-x plane and find truncated output size
voxel_grid = ndi.rotate(voxel_grid.astype(np.float32), angle=angle, axes=(1, 2), reshape=True, order=1, mode="nearest")
new_dim_int = np.array(np.shape(voxel_grid)) * 1.0
if (self.modality == "CT") and (self.spat_transform == "base"):
voxel_grid = np.round(voxel_grid)
# Update spacing
self.spacing *= new_dim_int / new_dim_flt
# Set rotation angle
self.rotation_angle = angle
# Update voxel grid with rotated voxels
self.set_voxel_grid(voxel_grid=voxel_grid)
def crop(self, ind_ext_z=None, ind_ext_y=None, ind_ext_x=None,
xy_only=False, z_only=False):
""""Crop image to the provided map extent."""
from luna.radiology.mirp.utilities import world_to_index
# Skip for missing images
if self.is_missing:
return
# Determine corresponding voxel indices
max_ind = np.ceil(np.array((np.max(ind_ext_z), np.max(ind_ext_y), np.max(ind_ext_x)))).astype(np.int)
min_ind = np.floor(np.array((np.min(ind_ext_z), np.min(ind_ext_y), np.min(ind_ext_x)))).astype(np.int)
# Set bounding indices
max_bound_ind = np.minimum(max_ind, self.size).astype(np.int)
min_bound_ind = np.maximum(min_ind, np.array([0, 0, 0])).astype(np.int)
# Get voxel grid
voxel_grid = self.get_voxel_grid()
# Create corresponding image volumes by slicing original volume
if z_only:
voxel_grid = voxel_grid[min_bound_ind[0]:max_bound_ind[0] + 1, :, :]
min_bound_ind[1] = 0
min_bound_ind[2] = 0
elif xy_only:
voxel_grid = voxel_grid[:,
min_bound_ind[1]:max_bound_ind[1] + 1,
min_bound_ind[2]:max_bound_ind[2] + 1]
min_bound_ind[0] = 0
max_bound_ind[0] = self.size[0].astype(np.int)
else:
voxel_grid = voxel_grid[min_bound_ind[0]:max_bound_ind[0] + 1,
min_bound_ind[1]:max_bound_ind[1] + 1,
min_bound_ind[2]:max_bound_ind[2] + 1]
# Update origin and z-slice position
self.origin = self.origin + np.dot(self.m_affine, np.transpose(min_bound_ind))
# Update voxel grid
self.set_voxel_grid(voxel_grid=voxel_grid)
def crop_to_size(self, center, crop_size, xy_only=False):
"""Crop images to the exact size"""
# Skip for missing images
if self.is_missing:
return
# Make local copy
crop_size = np.array(copy.deepcopy(crop_size))
# Determine the new grid origin in the original index space. Only the dimensions with a number are updated
grid_origin = np.round(center - crop_size / 2.0).astype(np.int)
# Update grid origin and crop_size for the remainder of the calculation
grid_origin[np.isnan(crop_size)] = 0
crop_size[np.isnan(crop_size)] = self.size[np.isnan(crop_size)]
# Determine coordinates of the box that can be copied in the original space
max_ind_orig = grid_origin + crop_size
min_ind_orig = grid_origin
# Update coordinates based on boundaries in the original images
max_ind_orig = np.minimum(max_ind_orig, self.size).astype(np.int)
min_ind_orig = | np.maximum(min_ind_orig, [0, 0, 0]) | numpy.maximum |
# sample_submission.py
import numpy as np
from scipy.special import expit
import sys
class xor_net(object):
"""
This code will train and test the Neural Network for XOR data.
Args:
data: Is a tuple, ``(x,y)``
``x`` is a two or one dimensional ndarray ordered such that axis 0 is independent
data and data is spread along axis 1. If the array had only one dimension, it implies
that data is 1D.
``y`` is a 1D ndarray it will be of the same length as axis 0 or x.
"""
def __init__(self, data, labels):
self.x = data
self.y = labels
maxiteration = 300000
if self.x.shape[0] <= 100:
learningrate = .001
maxiteration = 1000000
elif self.x.shape[0] <= 500:
learningrate = .0001
maxiteration = 500000
else:
learningrate = .00001
R = .01
xdimension = self.x.shape[1]
neuorons = 3
self.w = np.random.rand(xdimension + 1, neuorons)
tempX = np.insert(self.x, 0, 1, axis=1)
tempX = np.array(tempX, dtype=np.float64)
validsize = int(.2 * len(self.x))
validsetX = tempX[0:validsize, :]
trainX = tempX[validsize:, :]
validsetY = self.y[0:validsize]
trainY = self.y[validsize:]
previouserror = sys.maxint
count = 0
self.wprime = np.random.rand(neuorons + 1, 1)
finalW = self.w
finalWprime = self.wprime
iteration = 0
momentum = .9
prevloss = np.random.rand(self.w.shape[0], self.w.shape[1])
prevlossprime = np.random.rand(self.wprime.shape[0], self.wprime.shape[1])
while True:
u = np.dot(self.w.T, trainX.T)
h = expit(u)
temph = h
h = np.insert(h, 0, 1, axis=0)
h = np.array(h, dtype=np.float64)
uprime = np.dot(self.wprime.T, h)
yprime = expit(uprime)
uvalid = np.dot(self.w.T, validsetX.T)
hvalid = expit(uvalid)
hvalid = np.insert(hvalid, 0, 1, axis=0)
uvalidprime = np.dot(self.wprime.T, hvalid)
yvalidprime = expit(uvalidprime)
currenterror = (np.mean((validsetY - yvalidprime) ** 2)) / 2
if iteration >= maxiteration:
finalW = self.w
finalWprime = self.wprime
break
if currenterror > previouserror:
if count == 0:
finalW = self.w
finalWprime = self.wprime
count = count + 1
if count >= 10 and iteration > 100000:
break
else:
count = 0
previouserror = currenterror
regwprime = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.wprime)))
l2delta = np.multiply(np.subtract(yprime, trainY.T), np.multiply(yprime, np.subtract(1, yprime)))
lossprime = np.multiply(learningrate, np.dot(l2delta, h.T))
self.wprime = np.subtract(self.wprime, lossprime.T)
self.wprime = np.subtract(self.wprime, regwprime)
self.wprime = np.subtract(self.wprime, np.multiply(momentum, prevlossprime))
prevlossprime = lossprime.T
tempWprime = self.wprime[1:]
regw = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.w)))
l1delta = (l2delta.T.dot(tempWprime.T)).T * (temph * (1 - temph))
loss = learningrate * (trainX.T.dot(l1delta.T))
self.w = np.subtract(self.w, loss)
self.w = np.subtract(self.w, regw)
self.w = np.subtract(self.w, np.multiply(momentum, prevloss))
prevloss = loss
iteration = iteration + 1
self.w = finalW
self.wprime = finalWprime
self.params = [(self.w[0, :], self.w[1:, :]), (self.wprime[0], self.wprime[1:])] # [(w,b),(w,b)]
def get_params(self):
"""
This code will return Weights and Bias of the trained network.
Returns:
tuple of numpy.ndarray: (w, b).
"""
return self.params
def get_predictions(self, x):
"""
This method will return prediction for unseen data.
Args:
x: array similar to ``x`` in ``data``. Might be of different size.
Returns:
numpy.ndarray: ``y`` which is a 1D array of predictions of the same length as axis 0 of
``x``
"""
testX = np.insert(x, 0, 1, axis=1)
utest = np.dot(self.w.T, testX.T)
htest = expit(utest)
htest = np.insert(htest, 0, 1, axis=0)
utestprime = np.dot(self.wprime.T, htest)
ytestprime = expit(utestprime)
predY = ytestprime > .5
predY = predY.astype(int)
predY = predY.flatten()
return predY
class mlnn(object):
"""
This code will train and test the Neural Network for image data.
Args:
data: Is a tuple, ``(x,y)``
``x`` is a two or one dimensional ndarray ordered such that axis 0 is independent
data and data is spread along axis 1. If the array had only one dimension, it implies
that data is 1D.
``y`` is a 1D ndarray it will be of the same length as axis 0 or x.
"""
def __init__(self, data, labels):
self.x = data / 255.0
self.y = labels
maxiteration=40000
if self.x.shape[0]<=100:
learningrate = .0001
elif self.x.shape[0]<=500:
learningrate=.0001
else:
learningrate = .00001
if self.x.shape[0]>500:
maxiteration=15000
R = 0.01
neuorons = 100
self.w = 0.01 * np.random.rand(self.x.shape[1] + 1, neuorons)
tempX = np.insert(self.x, 0, 1, axis=1)
tempX = np.array(tempX, dtype=np.float64)
validsize = int(.2 * len(self.x))
validsetX = tempX[0:validsize, :]
validsetX -= np.mean(validsetX, axis=0)
trainX = tempX[validsize:, :]
trainX -= np.mean(trainX, axis=0)
validsetY = self.y[0:validsize]
trainY = self.y[validsize:]
previouserror = sys.maxint
count = 0
self.wprime = 0.01 * | np.random.rand(neuorons + 1, 1) | numpy.random.rand |
import cv2
import torch
import torch.utils.data
import torch.optim.lr_scheduler as lr_scheduler
import numpy as np
import scipy.io as scio
import os
from ptflops import get_model_complexity_info
import model as model
import anchor as anchor
from tqdm import tqdm
import random_erasing
import logging
import time
import random
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
fx = 588.03
fy = -587.07
u0 = 320
v0 = 240
# 使用1/4采样的数据
TrainImgFrames = int(72757)
TestImgFrames = int(8252)
keypointsNumber = 14
cropWidth = 176
cropHeight = 176
batch_size = 32
learning_rate = 0.00035
Weight_Decay = 1e-4
nepoch = 35
RandCropShift = 5
RandshiftDepth = 1
RandRotate = 180
RandScale = (1.0, 0.5)
xy_thres = 110
depth_thres = 150
depth_pixel_ratio = cropHeight / 2 / depth_thres
downsample = 16
randomseed = 12345
random.seed(randomseed)
np.random.seed(randomseed)
torch.manual_seed(randomseed)
save_dir = './result/nyu'
try:
os.makedirs(save_dir)
except OSError:
pass
trainingImageDir = '/home/dejian/Dataset/nyu/preprocessed/train/'
train_center_file = './data/nyu/nyu_center_train.mat'
train_keypoint_file = './data/nyu/nyu_keypointsUVD_train.mat'
testingImageDir = '/home/dejian/Dataset/nyu/preprocessed/test/'
test_center_file = './data/nyu/nyu_center_test.mat'
test_keypoint_file = './data/nyu/nyu_keypointsUVD_test.mat'
MEAN = np.load('./data/nyu/nyu_mean.npy')
STD = np.load('./data/nyu/nyu_std.npy')
model_dir = './model/nyu_resnet50_8.29.pth'
result_file = 'result_NYU.txt'
def pixel2world(x, fx, fy, ux, uy):
x[:, :, 0] = (x[:, :, 0] - ux) * x[:, :, 2] / fx
x[:, :, 1] = (x[:, :, 1] - uy) * x[:, :, 2] / fy
return x
def world2pixel(x, fx, fy, ux, uy):
x[:, :, 0] = x[:, :, 0] * fx / x[:, :, 2] + ux
x[:, :, 1] = x[:, :, 1] * fy / x[:, :, 2] + uy
return x
joint_id_to_name = {
0: 'pinky tip',
1: 'pinky mid',
2: 'ring tip',
3: 'ring mid',
4: 'middle tip',
5: 'middle mid',
6: 'index tip',
7: 'index mid',
8: 'thumb tip',
9: 'thumb mid',
10: 'thumb root',
11: 'wrist back',
12: 'wrist',
13: 'palm',
}
## loading GT keypoints and center points
keypointsUVD_test = scio.loadmat(test_keypoint_file)['keypoints3D'].astype(np.float32)
center_test = scio.loadmat(test_center_file)['centre_pixel'].astype(np.float32)
# center_test = keypointsUVD_test[:,13]
# center_test = np.expand_dims(center_test, axis=1)
centre_test_world = pixel2world(center_test.copy(), fx, fy, u0, v0)
centerlefttop_test = centre_test_world.copy()
centerlefttop_test[:,0,0] = centerlefttop_test[:,0,0]-xy_thres
centerlefttop_test[:,0,1] = centerlefttop_test[:,0,1]+xy_thres
centerrightbottom_test = centre_test_world.copy()
centerrightbottom_test[:,0,0] = centerrightbottom_test[:,0,0]+xy_thres
centerrightbottom_test[:,0,1] = centerrightbottom_test[:,0,1]-xy_thres
test_lefttop_pixel = world2pixel(centerlefttop_test, fx, fy, u0, v0)
test_rightbottom_pixel = world2pixel(centerrightbottom_test, fx, fy, u0, v0)
keypointsUVD_train = scio.loadmat(train_keypoint_file)['keypoints3D'].astype(np.float32)
center_train = scio.loadmat(train_center_file)['centre_pixel'].astype(np.float32)
# center_train = keypointsUVD_train[:,13]
# center_train = np.expand_dims(center_train, axis=1)
centre_train_world = pixel2world(center_train.copy(), fx, fy, u0, v0)
centerlefttop_train = centre_train_world.copy()
centerlefttop_train[:,0,0] = centerlefttop_train[:,0,0]-xy_thres
centerlefttop_train[:,0,1] = centerlefttop_train[:,0,1]+xy_thres
centerrightbottom_train = centre_train_world.copy()
centerrightbottom_train[:,0,0] = centerrightbottom_train[:,0,0]+xy_thres
centerrightbottom_train[:,0,1] = centerrightbottom_train[:,0,1]-xy_thres
train_lefttop_pixel = world2pixel(centerlefttop_train, fx, fy, u0, v0)
train_rightbottom_pixel = world2pixel(centerrightbottom_train, fx, fy, u0, v0)
def transform(img, label, matrix):
'''
img: [H, W] label, [N,2]
'''
img_out = cv2.warpAffine(img,matrix,(cropWidth,cropHeight))
label_out = np.ones((keypointsNumber, 3))
label_out[:,:2] = label[:,:2].copy()
label_out = np.matmul(matrix, label_out.transpose())
label_out = label_out.transpose()
return img_out, label_out
def dataPreprocess(index, img, keypointsUVD, center, mean, std, lefttop_pixel, rightbottom_pixel, xy_thres=90, depth_thres=75, augment=True):
imageOutputs = np.ones((cropHeight, cropWidth, 1), dtype='float32')
labelOutputs = np.ones((keypointsNumber, 3), dtype = 'float32')
if augment:
RandomOffset_1 = np.random.randint(-1*RandCropShift,RandCropShift)
RandomOffset_2 = np.random.randint(-1*RandCropShift,RandCropShift)
RandomOffset_3 = np.random.randint(-1*RandCropShift,RandCropShift)
RandomOffset_4 = np.random.randint(-1*RandCropShift,RandCropShift)
RandomOffsetDepth = | np.random.normal(0, RandshiftDepth, cropHeight*cropWidth) | numpy.random.normal |
from collections import Sequence
import numpy as np
from PIL import Image
class CoOccur:
"""
Class used to compute all Co-occurrence matrices of an image for a set of distances and angles and some related
parameters: the inertia, the average, the spread.
An instance of the CoOccur class holds a tensor of shape (len(distances), len(angles), levels, levels) that holds
all Co-occurrence matrices of the image passed as constructor's parameter, for each distance and angle in the
sequences passed as constructor's parameters. During the instantiation are computed also the inertia matrix of shape
(len(distances), len(angles)), the average tensor of shape (len(distances), levels, levels), and the spread tensor
of shape (len(distances), levels, levels).
Co-occurrence matrices can dramatically grow in size, so the levels of pixels are usually quantized before the
computation of the Co-occurrence matrices. From the 256 possible values that pixels can assume, these are reduced
to a smaller number specified as constructor's parameter.
Args:
image (PIL.Image): The image that has to be analyzed, it is internally converted in B/W with Image.convert('L')
distances (Sequence[float]): The sequence of analyzed distances. Default: range(1, 16, 2)
angles (Sequence[float]): The sequence of analyzed angles expressed in degrees. Default: range(90, -90, -45)
levels (int): Pixel values are quantized in this number of levels. Should be lower than 256. Default: 8
Attributes:
matrices (numpy.ndarray): Co-Occurrence matrices, of shape (len(distances), len(angles), levels, levels)
inertia (numpy.ndarray): Inertia matrix, of shape (len(distances), len(angles))
average (numpy.ndarray): Average tensor, of shape (len(distances), levels, levels)
spread (numpy.ndarray): Spread tensor, of shape (len(distances), levels, levels)
distances (numpy.ndarray): Array of all analyzed distances
angles (numpy.ndarray): Array of all analyzed angles
"""
def __init__(self, image: Image, distances: Sequence[float] = range(1, 16, 2),
angles: Sequence[float] = range(90, -90, -45), levels: int = 8):
# ===Image quantization===
pixels = np.array(image.convert('L')) # pixels.dtype == np.uint8
pixels = np.floor(pixels / 256 * levels).astype(np.uint8) # quantized in the [0, levels) integer range
# ===Angles and distances===
self.distances = np.array(distances)
self.angles = np.array(angles)
self._idx_of_dist = {distance: idx for idx, distance in enumerate(distances)}
self._idx_of_angle = {angle: idx for idx, angle in enumerate(angles)}
# ===CoOccur Tensor=== (distances, angles, levels_start, levels_end)
self.matrices = self._co_occurrence_matrices(pixels, distances, angles, levels)
# ===Inertia, Average, Spread===
self.inertia = self._inertia_matrix(levels)
self.average = self._average_matrices()
self.spread = self._spread_matrices()
def _co_occurrence_matrices(self, pixels: np.ndarray, dists: Sequence, angles: Sequence, levels: int) -> np.ndarray:
"""Computes the Co-Occurrence matrix of pixels for every distance and every angle passed as parameters"""
dists_list = []
for distance in dists:
angles_list = []
for angle in angles:
slice_start, slice_end = self._offset_slices(distance, angle)
start = pixels[slice_start[0][0]:slice_start[0][1], slice_start[1][0]:slice_start[1][1]].reshape(-1)
end = pixels[slice_end[0][0]:slice_end[0][1], slice_end[1][0]:slice_end[1][1]].reshape(-1)
histogram2d = np.histogram2d(start, end, density=True, bins=levels, range=[[0, levels], [0, levels]])[0]
angles_list.append(histogram2d)
dists_list.append(angles_list)
co_occur_matrices = np.array(dists_list)
return co_occur_matrices
@staticmethod
def _offset_slices(distance: float, angle: float):
"""Returns the starting and ending ranges to slice the pixel matrix given an angle in degrees and a distance"""
angle = np.radians(angle)
offset = np.rint(np.array([-np.sin(angle), | np.cos(angle) | numpy.cos |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for the bfloat16 Python type."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
# pylint: disable=unused-import,g-bad-import-order
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
bfloat16 = pywrap_tensorflow.TF_bfloat16_type()
class Bfloat16Test(test.TestCase):
def float_values(self):
"""Returns values that should round trip exactly to float and back."""
epsilon = float.fromhex("1.0p-7")
return [
0.0, 1.0, -1, 0.5, -0.5, epsilon, 1.0 + epsilon, 1.0 - epsilon,
-1.0 - epsilon, -1.0 + epsilon, 3.5, 42.0, 255.0, 256.0,
float("inf"), float("-inf"), float("nan")]
def _assertFloatIdentical(self, v, w):
if math.isnan(v):
self.assertTrue(math.isnan(w))
else:
self.assertEqual(v, w)
def testRoundTripToFloat(self):
for v in self.float_values():
self._assertFloatIdentical(v, float(bfloat16(v)))
def testRoundTripToInt(self):
for v in [-256, -255, -34, -2, -1, 0, 1, 2, 10, 47, 128, 255, 256, 512]:
self.assertEqual(v, int(bfloat16(v)))
def testStr(self):
self.assertEqual("0", str(bfloat16(0.0)))
self.assertEqual("1", str(bfloat16(1.0)))
self.assertEqual("-3.5", str(bfloat16(-3.5)))
self.assertEqual("0.0078125", str(bfloat16(float.fromhex("1.0p-7"))))
self.assertEqual("inf", str(bfloat16(float("inf"))))
self.assertEqual("-inf", str(bfloat16(float("-inf"))))
self.assertEqual("nan", str(bfloat16(float("nan"))))
def testRepr(self):
self.assertEqual("bfloat16(0)", repr(bfloat16(0)))
self.assertEqual("bfloat16(1)", repr(bfloat16(1)))
self.assertEqual("bfloat16(-3.5)", repr(bfloat16(-3.5)))
self.assertEqual("bfloat16(0.0078125)",
repr(bfloat16(float.fromhex("1.0p-7"))))
self.assertEqual("bfloat16(inf)", repr(bfloat16(float("inf"))))
self.assertEqual("bfloat16(-inf)", repr(bfloat16(float("-inf"))))
self.assertEqual("bfloat16(nan)", repr(bfloat16(float("nan"))))
def testHash(self):
self.assertEqual(0, hash(bfloat16(0.0)))
self.assertEqual(0x3f80, hash(bfloat16(1.0)))
self.assertEqual(0x7fc0, hash(bfloat16(float("nan"))))
# Tests for Python operations
def testNegate(self):
for v in self.float_values():
self._assertFloatIdentical(-v, float(-bfloat16(v)))
def testAdd(self):
self._assertFloatIdentical(0, float(bfloat16(0) + bfloat16(0)))
self._assertFloatIdentical(1, float(bfloat16(1) + bfloat16(0)))
self._assertFloatIdentical(0, float(bfloat16(1) + bfloat16(-1)))
self._assertFloatIdentical(5.5, float(bfloat16(2) + bfloat16(3.5)))
self._assertFloatIdentical(1.25, float(bfloat16(3.5) + bfloat16(-2.25)))
self._assertFloatIdentical(float("inf"),
float(bfloat16(float("inf")) + bfloat16(-2.25)))
self._assertFloatIdentical(float("-inf"),
float(bfloat16(float("-inf")) + bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) + bfloat16(float("nan")))))
def testSub(self):
self._assertFloatIdentical(0, float(bfloat16(0) - bfloat16(0)))
self._assertFloatIdentical(1, float(bfloat16(1) - bfloat16(0)))
self._assertFloatIdentical(2, float(bfloat16(1) - bfloat16(-1)))
self._assertFloatIdentical(-1.5, float(bfloat16(2) - bfloat16(3.5)))
self._assertFloatIdentical(5.75, float(bfloat16(3.5) - bfloat16(-2.25)))
self._assertFloatIdentical(float("-inf"),
float(bfloat16(-2.25) - bfloat16(float("inf"))))
self._assertFloatIdentical(float("inf"),
float(bfloat16(-2.25) - bfloat16(float("-inf"))))
self.assertTrue(math.isnan(float(bfloat16(3.5) - bfloat16(float("nan")))))
def testMul(self):
self._assertFloatIdentical(0, float(bfloat16(0) * bfloat16(0)))
self._assertFloatIdentical(0, float(bfloat16(1) * bfloat16(0)))
self._assertFloatIdentical(-1, float(bfloat16(1) * bfloat16(-1)))
self._assertFloatIdentical(-7.875, float(bfloat16(3.5) * bfloat16(-2.25)))
self._assertFloatIdentical(float("-inf"),
float(bfloat16(float("inf")) * bfloat16(-2.25)))
self._assertFloatIdentical(float("inf"),
float(bfloat16(float("-inf")) * bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) * bfloat16(float("nan")))))
def testDiv(self):
self.assertTrue(math.isnan(float(bfloat16(0) / bfloat16(0))))
self._assertFloatIdentical(float("inf"), float(bfloat16(1) / bfloat16(0)))
self._assertFloatIdentical(-1, float(bfloat16(1) / bfloat16(-1)))
self._assertFloatIdentical(-1.75, float(bfloat16(3.5) / bfloat16(-2)))
self._assertFloatIdentical(float("-inf"),
float(bfloat16(float("inf")) / bfloat16(-2.25)))
self._assertFloatIdentical(float("inf"),
float(bfloat16(float("-inf")) / bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) / bfloat16(float("nan")))))
def testLess(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v < w, bfloat16(v) < bfloat16(w))
def testLessEqual(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v <= w, bfloat16(v) <= bfloat16(w))
def testGreater(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v > w, bfloat16(v) > bfloat16(w))
def testGreaterEqual(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v >= w, bfloat16(v) >= bfloat16(w))
def testEqual(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v == w, bfloat16(v) == bfloat16(w))
def testNotEqual(self):
for v in self.float_values():
for w in self.float_values():
self.assertEqual(v != w, bfloat16(v) != bfloat16(w))
def testNan(self):
a = np.isnan(bfloat16(float("nan")))
self.assertTrue(a)
np.testing.assert_allclose(np.array([1.0, a]), np.array([1.0, a]))
a = np.array(
[bfloat16(1.34375),
bfloat16(1.4375),
bfloat16(float("nan"))],
dtype=dtypes.bfloat16.as_numpy_dtype)
b = np.array(
[bfloat16(1.3359375),
bfloat16(1.4375),
bfloat16(float("nan"))],
dtype=dtypes.bfloat16.as_numpy_dtype)
np.testing.assert_allclose(
a, b, rtol=0.1, atol=0.1, equal_nan=True, err_msg="", verbose=True)
class Bfloat16NumPyTest(test.TestCase):
def testDtype(self):
self.assertEqual(bfloat16, np.dtype(bfloat16))
def testArray(self):
x = np.array([[1, 2, 3]], dtype=bfloat16)
self.assertEqual(bfloat16, x.dtype)
self.assertEqual("[[bfloat16(1) bfloat16(2) bfloat16(3)]]", str(x))
self.assertAllEqual(x, x)
self.assertAllClose(x, x)
self.assertTrue((x == x).all())
def testComparisons(self):
x = np.array([401408, 7, -32], dtype=np.float32)
bx = x.astype(bfloat16)
y = np.array([82432, 7, 0], dtype=np.float32)
by = y.astype(bfloat16)
self.assertAllEqual(x == y, bx == by)
self.assertAllEqual(x != y, bx != by)
self.assertAllEqual(x < y, bx < by)
self.assertAllEqual(x > y, bx > by)
self.assertAllEqual(x <= y, bx <= by)
self.assertAllEqual(x >= y, bx >= by)
def testEqual2(self):
a = np.array([401408], bfloat16)
b = np.array([82432], bfloat16)
self.assertFalse(a.__eq__(b))
def testCasts(self):
for dtype in [
np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
x = | np.array([[1, 2, 3]], dtype=dtype) | numpy.array |
## @ingroup Components-Energy-Converters
# Rotor.py
#
# Created: Jun 2014, <NAME>
# Modified: Jan 2016, <NAME>
# Feb 2019, <NAME>
# Mar 2020, <NAME>
# Sep 2020, <NAME>
# Mar 2021, <NAME>
# Apr 2021, <NAME>
# Jul 2021, <NAME>
# Jul 2021, <NAME>
# Sep 2021, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Data, Units
from SUAVE.Components.Energy.Energy_Component import Energy_Component
from SUAVE.Methods.Geometry.Three_Dimensional \
import orientation_product, orientation_transpose
from SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Lift.compute_HFW_inflow_velocities \
import compute_HFW_inflow_velocities
# package imports
import numpy as np
import scipy as sp
# ----------------------------------------------------------------------
# Generalized Rotor Class
# ----------------------------------------------------------------------
## @ingroup Components-Energy-Converters
class Rotor(Energy_Component):
"""This is a general rotor component.
Assumptions:
None
Source:
None
"""
def __defaults__(self):
"""This sets the default values for the component to function.
Assumptions:
None
Source:
N/A
Inputs:
None
Outputs:
None
Properties Used:
None
"""
self.tag = 'rotor'
self.number_of_blades = 0.0
self.tip_radius = 0.0
self.hub_radius = 0.0
self.twist_distribution = 0.0
self.sweep_distribution = 0.0 # quarter chord offset from quarter chord of root airfoil
self.chord_distribution = 0.0
self.mid_chord_alignment = 0.0
self.thickness_to_chord = 0.0
self.blade_solidity = 0.0
self.design_power = None
self.design_thrust = None
self.airfoil_geometry = None
self.airfoil_polars = None
self.airfoil_polar_stations = None
self.radius_distribution = None
self.rotation = 1
self.azimuthal_offset_angle = 0.0
self.orientation_euler_angles = [0.,0.,0.] # This is X-direction thrust in vehicle frame
self.ducted = False
self.number_azimuthal_stations = 24
self.number_points_around_airfoil = 40
self.induced_power_factor = 1.48 # accounts for interference effects
self.profile_drag_coefficient = .03
self.use_2d_analysis = False # True if rotor is at an angle relative to freestream or nonuniform freestream
self.nonuniform_freestream = False
self.axial_velocities_2d = None # user input for additional velocity influences at the rotor
self.tangential_velocities_2d = None # user input for additional velocity influences at the rotor
self.radial_velocities_2d = None # user input for additional velocity influences at the rotor
self.Wake_VD = Data()
self.wake_method = "momentum"
self.number_rotor_rotations = 6
self.number_steps_per_rotation = 100
self.wake_settings = Data()
self.wake_settings.initial_timestep_offset = 0 # initial timestep
self.wake_settings.wake_development_time = 0.05 # total simulation time required for wake development
self.wake_settings.number_of_wake_timesteps = 30 # total number of time steps in wake development
self.start_angle = 0.0 # angle of first blade from vertical
self.inputs.y_axis_rotation = 0.
self.inputs.pitch_command = 0.
self.variable_pitch = False
def spin(self,conditions):
"""Analyzes a general rotor given geometry and operating conditions.
Assumptions:
per source
Source:
<NAME>. "Qprop Formulation", MIT AeroAstro, June 2006
http://web.mit.edu/drela/Public/web/qprop/qprop_theory.pdf
Leishman, <NAME>. Principles of helicopter aerodynamics
Cambridge university press, 2006.
Inputs:
self.inputs.omega [radian/s]
conditions.freestream.
density [kg/m^3]
dynamic_viscosity [kg/(m-s)]
speed_of_sound [m/s]
temperature [K]
conditions.frames.
body.transform_to_inertial (rotation matrix)
inertial.velocity_vector [m/s]
conditions.propulsion.
throttle [-]
Outputs:
conditions.propulsion.outputs.
number_radial_stations [-]
number_azimuthal_stations [-]
disc_radial_distribution [m]
speed_of_sound [m/s]
density [kg/m-3]
velocity [m/s]
disc_tangential_induced_velocity [m/s]
disc_axial_induced_velocity [m/s]
disc_tangential_velocity [m/s]
disc_axial_velocity [m/s]
drag_coefficient [-]
lift_coefficient [-]
omega [rad/s]
disc_circulation [-]
blade_dQ_dR [N/m]
blade_dT_dr [N]
blade_thrust_distribution [N]
disc_thrust_distribution [N]
thrust_per_blade [N]
thrust_coefficient [-]
azimuthal_distribution [rad]
disc_azimuthal_distribution [rad]
blade_dQ_dR [N]
blade_dQ_dr [Nm]
blade_torque_distribution [Nm]
disc_torque_distribution [Nm]
torque_per_blade [Nm]
torque_coefficient [-]
power [W]
power_coefficient [-]
Properties Used:
self.
number_of_blades [-]
tip_radius [m]
twist_distribution [radians]
chord_distribution [m]
orientation_euler_angles [rad, rad, rad]
"""
# Unpack rotor blade parameters
B = self.number_of_blades
R = self.tip_radius
beta_0 = self.twist_distribution
c = self.chord_distribution
sweep = self.sweep_distribution # quarter chord distance from quarter chord of root airfoil
r_1d = self.radius_distribution
tc = self.thickness_to_chord
# Unpack rotor airfoil data
a_geo = self.airfoil_geometry
a_loc = self.airfoil_polar_stations
cl_sur = self.airfoil_cl_surrogates
cd_sur = self.airfoil_cd_surrogates
# Unpack rotor inputs and conditions
omega = self.inputs.omega
Na = self.number_azimuthal_stations
nonuniform_freestream = self.nonuniform_freestream
use_2d_analysis = self.use_2d_analysis
wake_method = self.wake_method
rotation = self.rotation
pitch_c = self.inputs.pitch_command
# Check for variable pitch
if np.any(pitch_c !=0) and not self.variable_pitch:
print("Warning: pitch commanded for a fixed-pitch rotor. Changing to variable pitch rotor for weights analysis.")
self.variable_pitch = True
# Unpack freestream conditions
rho = conditions.freestream.density[:,0,None]
mu = conditions.freestream.dynamic_viscosity[:,0,None]
a = conditions.freestream.speed_of_sound[:,0,None]
T = conditions.freestream.temperature[:,0,None]
Vv = conditions.frames.inertial.velocity_vector
nu = mu/rho
rho_0 = rho
# Helpful shorthands
pi = np.pi
# Calculate total blade pitch
total_blade_pitch = beta_0 + pitch_c
# Velocity in the rotor frame
T_body2inertial = conditions.frames.body.transform_to_inertial
T_inertial2body = orientation_transpose(T_body2inertial)
V_body = orientation_product(T_inertial2body,Vv)
body2thrust = self.body_to_prop_vel()
T_body2thrust = orientation_transpose(np.ones_like(T_body2inertial[:])*body2thrust)
V_thrust = orientation_product(T_body2thrust,V_body)
# Check and correct for hover
V = V_thrust[:,0,None]
V[V==0.0] = 1E-6
# Number of radial stations and segment control points
Nr = len(c)
ctrl_pts = len(Vv)
# Non-dimensional radial distribution and differential radius
chi = r_1d/R
diff_r = np.diff(r_1d)
deltar = np.zeros(len(r_1d))
deltar[1:-1] = diff_r[0:-1]/2 + diff_r[1:]/2
deltar[0] = diff_r[0]/2
deltar[-1] = diff_r[-1]/2
# Calculating rotational parameters
omegar = np.outer(omega,r_1d)
n = omega/(2.*pi) # Rotations per second
# 2 dimensional radial distribution non dimensionalized
chi_2d = np.tile(chi[:, None],(1,Na))
chi_2d = np.repeat(chi_2d[None,:,:], ctrl_pts, axis=0)
r_dim_2d = np.tile(r_1d[:, None] ,(1,Na))
r_dim_2d = np.repeat(r_dim_2d[None,:,:], ctrl_pts, axis=0)
c_2d = np.tile(c[:, None] ,(1,Na))
c_2d = np.repeat(c_2d[None,:,:], ctrl_pts, axis=0)
# Azimuthal distribution of stations
psi = np.linspace(0,2*pi,Na+1)[:-1]
psi_2d = np.tile(np.atleast_2d(psi),(Nr,1))
psi_2d = np.repeat(psi_2d[None, :, :], ctrl_pts, axis=0)
# apply blade sweep to azimuthal position
if np.any(np.array([sweep])!=0):
use_2d_analysis = True
sweep_2d = np.repeat(sweep[:, None], (1,Na))
sweep_offset_angles = np.tan(sweep_2d/r_dim_2d)
psi_2d += sweep_offset_angles
# Starting with uniform freestream
ua = 0
ut = 0
ur = 0
# Include velocities introduced by rotor incidence angles
if (np.any(abs(V_thrust[:,1]) >1e-3) or np.any(abs(V_thrust[:,2]) >1e-3)) and use_2d_analysis:
# y-component of freestream in the propeller cartesian plane
Vy = V_thrust[:,1,None,None]
Vy = np.repeat(Vy, Nr,axis=1)
Vy = np.repeat(Vy, Na,axis=2)
# z-component of freestream in the propeller cartesian plane
Vz = V_thrust[:,2,None,None]
Vz = np.repeat(Vz, Nr,axis=1)
Vz = np.repeat(Vz, Na,axis=2)
# check for invalid rotation angle
if (rotation == 1) or (rotation == -1):
pass
else:
print("Invalid rotation direction. Setting to 1.")
rotation = 1
# compute resulting radial and tangential velocities in polar frame
utz = Vz*np.cos(psi_2d* rotation)
urz = Vz*np.sin(psi_2d* rotation)
uty = Vy*np.sin(psi_2d* rotation)
ury = Vy*np.cos(psi_2d* rotation)
ut += (utz + uty)
ur += (urz + ury)
ua += np.zeros_like(ut)
# Include external velocities introduced by user
if nonuniform_freestream:
use_2d_analysis = True
# include additional influences specified at rotor sections, shape=(ctrl_pts,Nr,Na)
ua += self.axial_velocities_2d
ut += self.tangential_velocities_2d
ur += self.radial_velocities_2d
if use_2d_analysis:
# make everything 2D with shape (ctrl_pts,Nr,Na)
size = (ctrl_pts,Nr,Na )
PSI = np.ones(size)
PSIold = np.zeros(size)
# 2-D freestream velocity and omega*r
V_2d = V_thrust[:,0,None,None]
V_2d = np.repeat(V_2d, Na,axis=2)
V_2d = np.repeat(V_2d, Nr,axis=1)
omegar = (np.repeat(np.outer(omega,r_1d)[:,:,None], Na, axis=2))
# total velocities
Ua = V_2d + ua
# 2-D blade pitch and radial distributions
if np.size(pitch_c)>1:
# control variable is the blade pitch, repeat around azimuth
beta = np.repeat(total_blade_pitch[:,:,None], Na, axis=2)
else:
beta = np.tile(total_blade_pitch[None,:,None],(ctrl_pts,1,Na ))
r = np.tile(r_1d[None,:,None], (ctrl_pts, 1, Na))
c = np.tile(c[None,:,None], (ctrl_pts, 1, Na))
deltar = np.tile(deltar[None,:,None], (ctrl_pts, 1, Na))
# 2-D atmospheric properties
a = np.tile(np.atleast_2d(a),(1,Nr))
a = np.repeat(a[:, :, None], Na, axis=2)
nu = np.tile(np.atleast_2d(nu),(1,Nr))
nu = np.repeat(nu[:, :, None], Na, axis=2)
rho = np.tile(np.atleast_2d(rho),(1,Nr))
rho = np.repeat(rho[:, :, None], Na, axis=2)
T = np.tile(np.atleast_2d(T),(1,Nr))
T = np.repeat(T[:, :, None], Na, axis=2)
else:
# total velocities
r = r_1d
Ua = np.outer((V + ua),np.ones_like(r))
beta = total_blade_pitch
# Things that will change with iteration
size = (ctrl_pts,Nr)
PSI = np.ones(size)
PSIold = np.zeros(size)
# Total velocities
Ut = omegar - ut
U = np.sqrt(Ua*Ua + Ut*Ut + ur*ur)
if wake_method == 'momentum':
# Setup a Newton iteration
diff = 1.
tol = 1e-6 # Convergence tolerance
ii = 0
# BEMT Iteration
while (diff>tol):
# compute velocities
sin_psi = np.sin(PSI)
cos_psi = np.cos(PSI)
Wa = 0.5*Ua + 0.5*U*sin_psi
Wt = 0.5*Ut + 0.5*U*cos_psi
va = Wa - Ua
vt = Ut - Wt
# compute blade airfoil forces and properties
Cl, Cdval, alpha, Ma, W = compute_airfoil_aerodynamics(beta,c,r,R,B,Wa,Wt,a,nu,a_loc,a_geo,cl_sur,cd_sur,ctrl_pts,Nr,Na,tc,use_2d_analysis)
# compute inflow velocity and tip loss factor
lamdaw, F, piece = compute_inflow_and_tip_loss(r,R,Wa,Wt,B)
# compute Newton residual on circulation
Gamma = vt*(4.*pi*r/B)*F*(1.+(4.*lamdaw*R/(pi*B*r))*(4.*lamdaw*R/(pi*B*r)))**0.5
Rsquiggly = Gamma - 0.5*W*c*Cl
# use analytical derivative to get dR_dpsi
dR_dpsi = compute_dR_dpsi(B,beta,r,R,Wt,Wa,U,Ut,Ua,cos_psi,sin_psi,piece)
# update inflow angle
dpsi = -Rsquiggly/dR_dpsi
PSI = PSI + dpsi
diff = np.max(abs(PSIold-PSI))
PSIold = PSI
# If omega = 0, do not run BEMT convergence loop
if all(omega[:,0]) == 0. :
break
# If its really not going to converge
if np.any(PSI>pi/2) and np.any(dpsi>0.0):
print("Rotor BEMT did not converge to a solution (Stall)")
break
ii+=1
if ii>10000:
print("Rotor BEMT did not converge to a solution (Iteration Limit)")
break
elif wake_method == "helical_fixed_wake":
# converge on va for a semi-prescribed wake method
ii,ii_max = 0, 50
va_diff, tol = 1, 1e-3
while va_diff > tol:
# compute axial wake-induced velocity (a byproduct of the circulation distribution which is an input to the wake geometry)
va, vt = compute_HFW_inflow_velocities(self)
# compute new blade velocities
Wa = va + Ua
Wt = Ut - vt
# Compute aerodynamic forces based on specified input airfoil or surrogate
Cl, Cdval, alpha, Ma,W = compute_airfoil_aerodynamics(beta,c,r,R,B,Wa,Wt,a,nu,a_loc,a_geo,cl_sur,cd_sur,ctrl_pts,Nr,Na,tc,use_2d_analysis)
lamdaw, F, _ = compute_inflow_and_tip_loss(r,R,Wa,Wt,B)
va_diff = np.max(abs(va - self.outputs.disc_axial_induced_velocity))
# compute HFW circulation at the blade
Gamma = 0.5*W*c*Cl
# update the axial disc velocity based on new va from HFW
self.outputs.disc_axial_induced_velocity = self.outputs.disc_axial_induced_velocity + 0.5*(va - self.outputs.disc_axial_induced_velocity)
ii+=1
if ii>ii_max and va_diff>tol:
print("Semi-prescribed helical wake did not converge on axial inflow used for wake shape.")
# tip loss correction for velocities, since tip loss correction is only applied to loads in prior BEMT iteration
va = F*va
vt = F*vt
lamdaw = r*(va+Ua)/(R*(Ut-vt))
# More Cd scaling from Mach from AA241ab notes for turbulent skin friction
Tw_Tinf = 1. + 1.78*(Ma*Ma)
Tp_Tinf = 1. + 0.035*(Ma*Ma) + 0.45*(Tw_Tinf-1.)
Tp = (Tp_Tinf)*T
Rp_Rinf = (Tp_Tinf**2.5)*(Tp+110.4)/(T+110.4)
Cd = ((1/Tp_Tinf)*(1/Rp_Rinf)**0.2)*Cdval
epsilon = Cd/Cl
epsilon[epsilon==np.inf] = 10.
# thrust and torque and their derivatives on the blade.
blade_T_distribution = rho*(Gamma*(Wt-epsilon*Wa))*deltar
blade_Q_distribution = rho*(Gamma*(Wa+epsilon*Wt)*r)*deltar
blade_dT_dr = rho*(Gamma*(Wt-epsilon*Wa))
blade_dQ_dr = rho*(Gamma*(Wa+epsilon*Wt)*r)
if use_2d_analysis:
blade_T_distribution_2d = blade_T_distribution
blade_Q_distribution_2d = blade_Q_distribution
blade_dT_dr_2d = blade_dT_dr
blade_dQ_dr_2d = blade_dQ_dr
blade_Gamma_2d = Gamma
alpha_2d = alpha
Va_2d = Wa
Vt_2d = Wt
Va_avg = np.average(Wa, axis=2) # averaged around the azimuth
Vt_avg = np.average(Wt, axis=2) # averaged around the azimuth
Va_ind_2d = va
Vt_ind_2d = vt
Vt_ind_avg = np.average(vt, axis=2)
Va_ind_avg = np.average(va, axis=2)
# set 1d blade loadings to be the average:
blade_T_distribution = np.mean((blade_T_distribution_2d), axis = 2)
blade_Q_distribution = np.mean((blade_Q_distribution_2d), axis = 2)
blade_dT_dr = np.mean((blade_dT_dr_2d), axis = 2)
blade_dQ_dr = np.mean((blade_dQ_dr_2d), axis = 2)
# compute the hub force / rotor drag distribution along the blade
dL_2d = 0.5*rho*c_2d*Cd*omegar**2*deltar
dD_2d = 0.5*rho*c_2d*Cl*omegar**2*deltar
rotor_drag_distribution = np.mean(dL_2d*np.sin(psi_2d) + dD_2d*np.cos(psi_2d),axis=2)
else:
Va_2d = np.repeat(Wa[ :, :, None], Na, axis=2)
Vt_2d = np.repeat(Wt[ :, :, None], Na, axis=2)
blade_T_distribution_2d = np.repeat(blade_T_distribution[:, :, None], Na, axis=2)
blade_Q_distribution_2d = np.repeat(blade_Q_distribution[:, :, None], Na, axis=2)
blade_dT_dr_2d = np.repeat(blade_dT_dr[:, :, None], Na, axis=2)
blade_dQ_dr_2d = np.repeat(blade_dQ_dr[:, :, None], Na, axis=2)
blade_Gamma_2d = np.repeat(Gamma[ :, :, None], Na, axis=2)
alpha_2d = np.repeat(alpha[ :, :, None], Na, axis=2)
Vt_avg = Wt
Va_avg = Wa
Vt_ind_avg = vt
Va_ind_avg = va
Va_ind_2d = np.repeat(va[ :, :, None], Na, axis=2)
Vt_ind_2d = np.repeat(vt[ :, :, None], Na, axis=2)
# compute the hub force / rotor drag distribution along the blade
dL = 0.5*rho*c*Cd*omegar**2*deltar
dL_2d = np.repeat(dL[:, :, None], Na, axis=2)
dD = 0.5*rho*c*Cl*omegar**2*deltar
dD_2d = np.repeat(dD[:, :, None], Na, axis=2)
rotor_drag_distribution = np.mean(dL_2d*np.sin(psi_2d) + dD_2d*np.cos(psi_2d),axis=2)
# forces
thrust = np.atleast_2d((B * np.sum(blade_T_distribution, axis = 1))).T
torque = np.atleast_2d((B * np.sum(blade_Q_distribution, axis = 1))).T
rotor_drag = np.atleast_2d((B * np.sum(rotor_drag_distribution, axis=1))).T
power = omega*torque
# calculate coefficients
D = 2*R
Cq = torque/(rho_0*(n*n)*(D*D*D*D*D))
Ct = thrust/(rho_0*(n*n)*(D*D*D*D))
Cp = power/(rho_0*(n*n*n)*(D*D*D*D*D))
Crd = rotor_drag/(rho_0*(n*n)*(D*D*D*D))
etap = V*thrust/power
# prevent things from breaking
Cq[Cq<0] = 0.
Ct[Ct<0] = 0.
Cp[Cp<0] = 0.
thrust[conditions.propulsion.throttle[:,0] <=0.0] = 0.0
power[conditions.propulsion.throttle[:,0] <=0.0] = 0.0
torque[conditions.propulsion.throttle[:,0] <=0.0] = 0.0
rotor_drag[conditions.propulsion.throttle[:,0] <=0.0] = 0.0
thrust[omega<0.0] = -thrust[omega<0.0]
thrust[omega==0.0] = 0.0
power[omega==0.0] = 0.0
torque[omega==0.0] = 0.0
rotor_drag[omega==0.0] = 0.0
Ct[omega==0.0] = 0.0
Cp[omega==0.0] = 0.0
etap[omega==0.0] = 0.0
# Make the thrust a 3D vector
thrust_prop_frame = np.zeros((ctrl_pts,3))
thrust_prop_frame[:,0] = thrust[:,0]
thrust_vector = orientation_product(orientation_transpose(T_body2thrust),thrust_prop_frame)
# Assign efficiency to network
conditions.propulsion.etap = etap
# Store data
self.azimuthal_distribution = psi
results_conditions = Data
outputs = results_conditions(
number_radial_stations = Nr,
number_azimuthal_stations = Na,
disc_radial_distribution = r_dim_2d,
speed_of_sound = conditions.freestream.speed_of_sound,
density = conditions.freestream.density,
velocity = Vv,
blade_tangential_induced_velocity = Vt_ind_avg,
blade_axial_induced_velocity = Va_ind_avg,
blade_tangential_velocity = Vt_avg,
blade_axial_velocity = Va_avg,
disc_tangential_induced_velocity = Vt_ind_2d,
disc_axial_induced_velocity = Va_ind_2d,
disc_tangential_velocity = Vt_2d,
disc_axial_velocity = Va_2d,
drag_coefficient = Cd,
lift_coefficient = Cl,
omega = omega,
disc_circulation = blade_Gamma_2d,
blade_dT_dr = blade_dT_dr,
disc_dT_dr = blade_dT_dr_2d,
blade_thrust_distribution = blade_T_distribution,
disc_thrust_distribution = blade_T_distribution_2d,
disc_effective_angle_of_attack = alpha_2d,
thrust_per_blade = thrust/B,
thrust_coefficient = Ct,
disc_azimuthal_distribution = psi_2d,
blade_dQ_dr = blade_dQ_dr,
disc_dQ_dr = blade_dQ_dr_2d,
blade_torque_distribution = blade_Q_distribution,
disc_torque_distribution = blade_Q_distribution_2d,
torque_per_blade = torque/B,
torque_coefficient = Cq,
power = power,
power_coefficient = Cp,
converged_inflow_ratio = lamdaw,
propeller_efficiency = etap,
blade_H_distribution = rotor_drag_distribution,
rotor_drag = rotor_drag,
rotor_drag_coefficient = Crd,
)
return thrust_vector, torque, power, Cp, outputs , etap
def spin_HFW(self,conditions):
"""Analyzes a general rotor given geometry and operating conditions.
Runs the blade element theory with a helical fixed-wake model for the
iterative wake analysis.
Assumptions:
Helical fixed-wake with wake skew angle
Source:
N/A
Inputs:
self.inputs.omega [radian/s]
conditions.freestream.
density [kg/m^3]
dynamic_viscosity [kg/(m-s)]
speed_of_sound [m/s]
temperature [K]
conditions.frames.
body.transform_to_inertial (rotation matrix)
inertial.velocity_vector [m/s]
conditions.propulsion.
throttle [-]
Outputs:
conditions.propulsion.outputs.
number_radial_stations [-]
number_azimuthal_stations [-]
disc_radial_distribution [m]
speed_of_sound [m/s]
density [kg/m-3]
velocity [m/s]
disc_tangential_induced_velocity [m/s]
disc_axial_induced_velocity [m/s]
disc_tangential_velocity [m/s]
disc_axial_velocity [m/s]
drag_coefficient [-]
lift_coefficient [-]
omega [rad/s]
disc_circulation [-]
blade_dQ_dR [N/m]
blade_dT_dr [N]
blade_thrust_distribution [N]
disc_thrust_distribution [N]
thrust_per_blade [N]
thrust_coefficient [-]
azimuthal_distribution [rad]
disc_azimuthal_distribution [rad]
blade_dQ_dR [N]
blade_dQ_dr [Nm]
blade_torque_distribution [Nm]
disc_torque_distribution [Nm]
torque_per_blade [Nm]
torque_coefficient [-]
power [W]
power_coefficient [-]
Properties Used:
self.
number_of_blades [-]
tip_radius [m]
twist_distribution [radians]
chord_distribution [m]
orientation_euler_angles [rad, rad, rad]
"""
#--------------------------------------------------------------------------------
# Initialize by running BEMT to get initial blade circulation
#--------------------------------------------------------------------------------
_, _, _, _, bemt_outputs , _ = self.spin(conditions)
conditions.noise.sources.propellers[self.tag] = bemt_outputs
self.outputs = bemt_outputs
omega = self.inputs.omega
#--------------------------------------------------------------------------------
# generate rotor wake vortex distribution
#--------------------------------------------------------------------------------
props = Data()
props.propeller = self
# generate wake distribution for n rotor rotation
nrots = self.number_rotor_rotations
steps_per_rot = self.number_steps_per_rotation
rpm = omega/Units.rpm
# simulation parameters for n rotor rotations
init_timestep_offset = 0.
time = 60*nrots/rpm[0][0]
number_of_wake_timesteps = steps_per_rot*nrots
self.wake_settings.init_timestep_offset = init_timestep_offset
self.wake_settings.wake_development_time = time
self.wake_settings.number_of_wake_timesteps = number_of_wake_timesteps
self.use_2d_analysis = True
# spin propeller with helical fixed-wake
self.wake_method = "helical_fixed_wake"
thrust_vector, torque, power, Cp, outputs , etap = self.spin(conditions)
return thrust_vector, torque, power, Cp, outputs , etap
def vec_to_vel(self):
"""This rotates from the propellers vehicle frame to the propellers velocity frame
Assumptions:
There are two propeller frames, the vehicle frame describing the location and the propeller velocity frame
velocity frame is X out the nose, Z towards the ground, and Y out the right wing
vehicle frame is X towards the tail, Z towards the ceiling, and Y out the right wing
Source:
N/A
Inputs:
None
Outputs:
None
Properties Used:
None
"""
rot_mat = sp.spatial.transform.Rotation.from_rotvec([0,np.pi,0]).as_matrix()
return rot_mat
def body_to_prop_vel(self):
"""This rotates from the systems body frame to the propellers velocity frame
Assumptions:
There are two propeller frames, the vehicle frame describing the location and the propeller velocity frame
velocity frame is X out the nose, Z towards the ground, and Y out the right wing
vehicle frame is X towards the tail, Z towards the ceiling, and Y out the right wing
Source:
N/A
Inputs:
None
Outputs:
None
Properties Used:
None
"""
# Go from body to vehicle frame
body_2_vehicle = sp.spatial.transform.Rotation.from_rotvec([0,np.pi,0]).as_matrix()
# Go from vehicle frame to propeller vehicle frame: rot 1 including the extra body rotation
rots = np.array(self.orientation_euler_angles) * 1.
rots[1] = rots[1] + self.inputs.y_axis_rotation
vehicle_2_prop_vec = sp.spatial.transform.Rotation.from_rotvec(rots).as_matrix()
# GO from the propeller vehicle frame to the propeller velocity frame: rot 2
prop_vec_2_prop_vel = self.vec_to_vel()
# Do all the matrix multiplies
rot1 = np.matmul(body_2_vehicle,vehicle_2_prop_vec)
rot_mat = np.matmul(rot1,prop_vec_2_prop_vel)
return rot_mat
def prop_vel_to_body(self):
"""This rotates from the systems body frame to the propellers velocity frame
Assumptions:
There are two propeller frames, the vehicle frame describing the location and the propeller velocity frame
velocity frame is X out the nose, Z towards the ground, and Y out the right wing
vehicle frame is X towards the tail, Z towards the ceiling, and Y out the right wing
Source:
N/A
Inputs:
None
Outputs:
None
Properties Used:
None
"""
body2propvel = self.body_to_prop_vel()
r = sp.spatial.transform.Rotation.from_matrix(body2propvel)
r = r.inv()
rot_mat = r.as_matrix()
return rot_mat
def compute_airfoil_aerodynamics(beta,c,r,R,B,Wa,Wt,a,nu,a_loc,a_geo,cl_sur,cd_sur,ctrl_pts,Nr,Na,tc,use_2d_analysis):
"""
Cl, Cdval = compute_airfoil_aerodynamics( beta,c,r,R,B,
Wa,Wt,a,nu,
a_loc,a_geo,cl_sur,cd_sur,
ctrl_pts,Nr,Na,tc,use_2d_analysis )
Computes the aerodynamic forces at sectional blade locations. If airfoil
geometry and locations are specified, the forces are computed using the
airfoil polar lift and drag surrogates, accounting for the local Reynolds
number and local angle of attack.
If the airfoils are not specified, an approximation is used.
Assumptions:
N/A
Source:
N/A
Inputs:
beta blade twist distribution [-]
c chord distribution [-]
r radius distribution [-]
R tip radius [-]
B number of rotor blades [-]
Wa axial velocity [-]
Wt tangential velocity [-]
a speed of sound [-]
nu viscosity [-]
a_loc Locations of specified airfoils [-]
a_geo Geometry of specified airfoil [-]
cl_sur Lift Coefficient Surrogates [-]
cd_sur Drag Coefficient Surrogates [-]
ctrl_pts Number of control points [-]
Nr Number of radial blade sections [-]
Na Number of azimuthal blade stations [-]
tc Thickness to chord [-]
use_2d_analysis Specifies 2d disc vs. 1d single angle analysis [Boolean]
Outputs:
Cl Lift Coefficients [-]
Cdval Drag Coefficients (before scaling) [-]
alpha section local angle of attack [rad]
"""
alpha = beta - np.arctan2(Wa,Wt)
W = (Wa*Wa + Wt*Wt)**0.5
Ma = W/a
Re = (W*c)/nu
# If propeller airfoils are defined, use airfoil surrogate
if a_loc != None:
# Compute blade Cl and Cd distribution from the airfoil data
dim_sur = len(cl_sur)
if use_2d_analysis:
# return the 2D Cl and CDval of shape (ctrl_pts, Nr, Na)
Cl = np.zeros((ctrl_pts,Nr,Na))
Cdval = np.zeros((ctrl_pts,Nr,Na))
for jj in range(dim_sur):
Cl_af = cl_sur[a_geo[jj]](Re,alpha,grid=False)
Cdval_af = cd_sur[a_geo[jj]](Re,alpha,grid=False)
locs = np.where( | np.array(a_loc) | numpy.array |
import numpy as np
from numba import cuda
from numba.core import types
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
import unittest
from numba.np import numpy_support
def set_a(ary, i, v):
ary[i].a = v
def set_b(ary, i, v):
ary[i].b = v
def set_c(ary, i, v):
ary[i].c = v
def set_record(ary, i, j):
ary[i] = ary[j]
def record_set_a(r, v):
r.a = v
def record_set_b(r, v):
r.b = v
def record_set_c(r, v):
r.c = v
def record_read_a(r, arr):
arr[0] = r.a
def record_read_b(r, arr):
arr[0] = r.b
def record_read_c(r, arr):
arr[0] = r.c
def record_write_array(r):
r.g = 2
r.h[0] = 3.0
r.h[1] = 4.0
def record_write_2d_array(r):
r.i = 3
r.j[0, 0] = 5.0
r.j[0, 1] = 6.0
r.j[1, 0] = 7.0
r.j[1, 1] = 8.0
r.j[2, 0] = 9.0
r.j[2, 1] = 10.0
def record_read_array(r, a):
a[0] = r.h[0]
a[1] = r.h[1]
def record_read_2d_array(r, a):
a[0, 0] = r.j[0, 0]
a[0, 1] = r.j[0, 1]
a[1, 0] = r.j[1, 0]
a[1, 1] = r.j[1, 1]
a[2, 0] = r.j[2, 0]
a[2, 1] = r.j[2, 1]
recordtype = np.dtype(
[
('a', np.float64),
('b', np.int32),
('c', np.complex64),
('d', (np.uint8, 5))
],
align=True
)
recordwitharray = np.dtype(
[
('g', np.int32),
('h', np.float32, 2)
],
align=True
)
recordwith2darray = np.dtype([('i', np.int32),
('j', np.float32, (3, 2))])
nested_array1_dtype = np.dtype([("array1", np.int16, (3,))], align=True)
nested_array2_dtype = np.dtype([("array2", np.int16, (3, 2))], align=True)
# Functions used for "full array" tests
def record_write_full_array(rec):
rec.j[:, :] = np.ones((3, 2))
def record_write_full_array_alt(rec):
rec['j'][:, :] = np.ones((3, 2))
def recarray_set_record(ary, rec):
ary[0] = rec
def recarray_write_array_of_nestedarray_broadcast(ary):
ary.j[:, :, :] = 1
return ary
def record_setitem_array(rec_source, rec_dest):
rec_dest['j'] = rec_source['j']
def recarray_write_array_of_nestedarray(ary):
ary.j[:, :, :] = np.ones((2, 3, 2))
return ary
def recarray_getitem_return(ary):
return ary[0]
def recarray_getitem_field_return(ary):
return ary['h']
def recarray_getitem_field_return2(ary):
return ary.h
def recarray_getitem_field_return2_2d(ary):
return ary.j
def record_read_array0(ary):
return ary.h[0]
def record_read_array1(ary):
return ary.h[1]
def record_read_whole_array(ary):
return ary.h
def record_read_2d_array00(ary):
return ary.j[0, 0]
def record_read_2d_array10(ary):
return ary.j[1, 0]
def record_read_2d_array01(ary):
return ary.j[0, 1]
def assign_array_to_nested(dest, src):
dest['array1'] = src
def assign_array_to_nested_2d(dest, src):
dest['array2'] = src
class TestRecordDtype(CUDATestCase):
def _createSampleArrays(self):
self.sample1d = np.recarray(3, dtype=recordtype)
self.samplerec1darr = np.recarray(1, dtype=recordwitharray)[0]
self.samplerec2darr = np.recarray(1, dtype=recordwith2darray)[0]
def setUp(self):
super().setUp()
self._createSampleArrays()
ary = self.sample1d
for i in range(ary.size):
x = i + 1
ary[i]['a'] = x / 2
ary[i]['b'] = x
ary[i]['c'] = x * 1j
ary[i]['d'] = "%d" % x
def get_cfunc(self, pyfunc, argspec):
return cuda.jit()(pyfunc)
def _test_set_equal(self, pyfunc, value, valuetype):
rec = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (rec[:], types.intp, valuetype))
for i in range(self.sample1d.size):
got = self.sample1d.copy()
# Force the argument to the pure Python function to be
# a recarray, as attribute access isn't supported on
# structured arrays.
expect = got.copy().view(np.recarray)
cfunc[1, 1](got, i, value)
pyfunc(expect, i, value)
# Match the entire array to ensure no memory corruption
self.assertTrue(np.all(expect == got))
def test_set_a(self):
self._test_set_equal(set_a, 3.1415, types.float64)
# Test again to check if coercion works
self._test_set_equal(set_a, 3., types.float32)
def test_set_b(self):
self._test_set_equal(set_b, 123, types.int32)
# Test again to check if coercion works
self._test_set_equal(set_b, 123, types.float64)
def test_set_c(self):
self._test_set_equal(set_c, 43j, types.complex64)
# Test again to check if coercion works
self._test_set_equal(set_c, 43j, types.complex128)
def test_set_record(self):
pyfunc = set_record
rec = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (rec[:], types.intp, types.intp))
test_indices = [(0, 1), (1, 2), (0, 2)]
for i, j in test_indices:
expect = self.sample1d.copy()
pyfunc(expect, i, j)
got = self.sample1d.copy()
cfunc[1, 1](got, i, j)
# Match the entire array to ensure no memory corruption
self.assertEqual(expect[i], expect[j])
self.assertEqual(got[i], got[j])
self.assertTrue(np.all(expect == got))
def _test_rec_set(self, v, pyfunc, f):
rec = self.sample1d.copy()[0]
nbrecord = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (nbrecord,))
cfunc[1, 1](rec, v)
np.testing.assert_equal(rec[f], v)
def test_rec_set_a(self):
self._test_rec_set(np.float64(1.5), record_set_a, 'a')
def test_rec_set_b(self):
self._test_rec_set(np.int32(2), record_set_b, 'b')
def test_rec_set_c(self):
self._test_rec_set(np.complex64(4.0 + 5.0j), record_set_c, 'c')
def _test_rec_read(self, v, pyfunc, f):
rec = self.sample1d.copy()[0]
rec[f] = v
arr = np.zeros(1, v.dtype)
nbrecord = numpy_support.from_dtype(recordtype)
cfunc = self.get_cfunc(pyfunc, (nbrecord,))
cfunc[1, 1](rec, arr)
np.testing.assert_equal(arr[0], v)
def test_rec_read_a(self):
self._test_rec_read(np.float64(1.5), record_read_a, 'a')
def test_rec_read_b(self):
self._test_rec_read(np.int32(2), record_read_b, 'b')
def test_rec_read_c(self):
self._test_rec_read(np.complex64(4.0 + 5.0j), record_read_c, 'c')
def test_record_write_1d_array(self):
'''
Test writing to a 1D array within a structured type
'''
rec = self.samplerec1darr.copy()
nbrecord = numpy_support.from_dtype(recordwitharray)
cfunc = self.get_cfunc(record_write_array, (nbrecord,))
cfunc[1, 1](rec)
expected = self.samplerec1darr.copy()
expected['g'] = 2
expected['h'][0] = 3.0
expected['h'][1] = 4.0
np.testing.assert_equal(expected, rec)
def test_record_write_2d_array(self):
'''
Test writing to a 2D array within a structured type
'''
rec = self.samplerec2darr.copy()
nbrecord = numpy_support.from_dtype(recordwith2darray)
cfunc = self.get_cfunc(record_write_2d_array, (nbrecord,))
cfunc[1, 1](rec)
expected = self.samplerec2darr.copy()
expected['i'] = 3
expected['j'][:] = np.asarray([5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
np.float32).reshape(3, 2)
np.testing.assert_equal(expected, rec)
def test_record_read_1d_array(self):
'''
Test reading from a 1D array within a structured type
'''
rec = self.samplerec1darr.copy()
rec['h'][0] = 4.0
rec['h'][1] = 5.0
nbrecord = numpy_support.from_dtype(recordwitharray)
cfunc = self.get_cfunc(record_read_array, (nbrecord,))
arr = np.zeros(2, dtype=rec['h'].dtype)
cfunc[1, 1](rec, arr)
np.testing.assert_equal(rec['h'], arr)
def test_record_read_2d_array(self):
'''
Test reading from a 2D array within a structured type
'''
rec = self.samplerec2darr.copy()
rec['j'][:] = np.asarray([5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
np.float32).reshape(3, 2)
nbrecord = numpy_support.from_dtype(recordwith2darray)
cfunc = self.get_cfunc(record_read_2d_array, (nbrecord,))
arr = | np.zeros((3,2), dtype=rec['j'].dtype) | numpy.zeros |
"""
eels_tools
Model based quantification of electron energy-loss data
Copyright by <NAME>
The University of Tennessee, Knoxville
Department of Materials Science & Engineering
Sources:
M. Tian et al.
Units:
everything is in SI units, except length is given in nm and angles in mrad.
Usage:
See the notebooks for examples of these routines
All the input and output is done through a dictionary which is to be found in the meta_data
attribute of the sidpy.Dataset
"""
import numpy as np
import scipy
from scipy.interpolate import interp1d, splrep # splev, splint
from scipy import interpolate
from scipy.signal import peak_prominences
from scipy.ndimage.filters import gaussian_filter
from scipy import constants
import matplotlib.pyplot as plt
# import matplotlib.patches as patches
# from matplotlib.widgets import SpanSelector
# import ipywidgets as widgets
# from IPython.display import display
import requests
from scipy.optimize import leastsq # least square fitting routine fo scipy
import pickle # pkg_resources,
# ## And we use the image tool library of pyTEMlib
import pyTEMlib.file_tools as ft
from pyTEMlib.config_dir import data_path
major_edges = ['K1', 'L3', 'M5', 'N5']
all_edges = ['K1', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5', 'N1', 'N2', 'N3', 'N4', 'N5', 'N6', 'N7', 'O1', 'O2',
'O3', 'O4', 'O5', 'O6', 'O7', 'P1', 'P2', 'P3']
first_close_edges = ['K1', 'L3', 'M5', 'M3', 'N5', 'N3']
elements = [' ', 'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na',
'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V',
'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br',
'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag',
'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr',
'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu',
'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi']
# kroeger_core(e_data,a_data,eps_data,ee,thick, relativistic =True)
# kroeger_core2(e_data,a_data,eps_data,acceleration_voltage_kev,thickness, relativistic =True)
# get_wave_length(e0)
# plot_dispersion(plotdata, units, a_data, e_data, title, max_p, ee, ef = 4., ep= 16.8, Es = 0, IBT = [])
# drude(tags, e, ep, ew, tnm, eb)
# drude(ep, eb, gamma, e)
# drude_lorentz(epsInf,leng, ep, eb, gamma, e, Amplitude)
# zl_func( p, x)
################################################################
# Read Functions
#################################################################
def read_dm3_eels_info(original_metadata):
"""Reed dm3 file from a nested dictionary like original_metadata of sidpy.Dataset"""
if 'DM' not in original_metadata:
return {}
main_image = original_metadata['DM']['chosen_image']
exp_dictionary = original_metadata['ImageList'][str(main_image)]['ImageTags']
experiment = {}
if 'EELS' in exp_dictionary:
if 'Acquisition' in exp_dictionary['EELS']:
for key, item in exp_dictionary['EELS']['Acquisition'].items():
if 'Exposure' in key:
_, units = key.split('(')
if units[:-1] == 's':
experiment['single_exposure_time'] = item
if 'Integration' in key:
_, units = key.split('(')
if units[:-1] == 's':
experiment['exposure_time'] = item
if 'frames' in key:
experiment['number_of_frames'] = item
if 'Experimental Conditions' in exp_dictionary['EELS']:
for key, item in exp_dictionary['EELS']['Experimental Conditions'].items():
if 'Convergence' in key:
experiment['convergence_angle'] = item
if 'Collection' in key:
# print(item)
# for val in item.values():
experiment['collection_angle'] = item
if 'Microscope Info' in exp_dictionary:
if 'Voltage' in exp_dictionary['Microscope Info']:
experiment['acceleration_voltage'] = exp_dictionary['Microscope Info']['Voltage']
if 'Name' in exp_dictionary['Microscope Info']:
experiment['microscope'] = exp_dictionary['Microscope Info']['Name']
return experiment
def set_previous_quantification(current_dataset):
"""Set previous quantification from a sidpy.Dataset"""
current_channel = current_dataset.h5_dataset.parent
found_metadata = False
for key in current_channel:
if 'Log' in key:
if current_channel[key]['analysis'][()] == 'EELS_quantification':
current_dataset.metadata.update(ft.nest_dict(current_channel[key].attrs))
found_metadata = True
print('found previous quantification')
if not found_metadata:
# setting important experimental parameter
current_dataset.metadata['experiment'] = read_dm3_eels_info(current_dataset.original_metadata)
if 'experiment' not in current_dataset.metadata:
current_dataset.metadata['experiment'] = {}
if 'convergence_angle' not in current_dataset.metadata['experiment']:
current_dataset.metadata['experiment']['convergence_angle'] = 30
if 'collection_angle' not in current_dataset.metadata['experiment']:
current_dataset.metadata['experiment']['collection_angle'] = 50
if 'acceleration_voltage' not in current_dataset.metadata['experiment']:
current_dataset.metadata['experiment']['acceleration_voltage'] = 200000
################################################################
# Peak Fit Functions
#################################################################
def residuals_smooth(p, x, y, only_positive_intensity):
"""part of fit"""
err = (y - model_smooth(x, p, only_positive_intensity))
return err
def model_smooth(x, p, only_positive_intensity=False):
"""part of fit"""
y = np.zeros(len(x))
number_of_peaks = int(len(p) / 3)
for i in range(number_of_peaks):
if only_positive_intensity:
p[i * 3 + 1] = abs(p[i * 3 + 1])
p[i * 3 + 2] = abs(p[i * 3 + 2])
if p[i * 3 + 2] > abs(p[i * 3]) * 4.29193 / 2.0:
p[i * 3 + 2] = abs(p[i * 3]) * 4.29193 / 2. # ## width cannot extend beyond zero, maximum is FWTM/2
y = y + gauss(x, p[i * 3:])
return y
def residuals_ll(p, x, y, only_positive_intensity):
"""part of fit"""
err = (y - model_ll(x, p, only_positive_intensity)) / np.sqrt(np.abs(y))
return err
def residuals_ll2(p, x, y, only_positive_intensity):
"""part of fit"""
err = (y - model_ll(x, p, only_positive_intensity))
return err
def model_ll(x, p, only_positive_intensity):
"""part of fit"""
y = np.zeros(len(x))
number_of_peaks = int(len(p) / 3)
for i in range(number_of_peaks):
if only_positive_intensity:
p[i * 3 + 1] = abs(p[i * 3 + 1])
p[i * 3 + 2] = abs(p[i * 3 + 2])
if p[i * 3 + 2] > abs(p[i * 3]) * 4.29193 / 2.0:
p[i * 3 + 2] = abs(p[i * 3]) * 4.29193 / 2. # ## width cannot extend beyond zero, maximum is FWTM/2
y = y + gauss(x, p[i * 3:])
return y
def fit_peaks(spectrum, energy_scale, pin, start_fit, end_fit, only_positive_intensity=False):
"""fit peaks to spectrum
Parameters
----------
spectrum: numpy array
spectrum to be fitted
energy_scale: numpy array
energy scale of spectrum
pin: list of float
intial guess of peaks position amplitude width
start_fit: int
channel where fit starts
end_fit: int
channel where fit starts
only_positive_intensity: boolean
allows only for positive amplitueds if True; default = False)
Returns
-------
p: list of float
fitting parameters
"""
# TODO: remove zero_loss_fit_width add absolute
fit_energy = energy_scale[start_fit:end_fit]
fit_spectrum = spectrum[start_fit:end_fit]
pin_flat = [item for sublist in pin for item in sublist]
[p_out, _] = leastsq(residuals_ll, np.array(pin_flat), ftol=1e-3, args=(fit_energy, fit_spectrum,
only_positive_intensity))
p = []
for i in range(len(pin)):
if only_positive_intensity:
p_out[i * 3 + 1] = abs(p_out[i * 3 + 1])
p.append([p_out[i * 3], p_out[i * 3 + 1], abs(p_out[i * 3 + 2])])
return p
#################################################################
# CORE - LOSS functions
#################################################################
def get_x_sections(z=0):
"""Reads X-ray fluorescent cross sections from a pickle file.
Parameters
----------
z: int
atomic number if zero all cross sections will be returned
Returns
-------
dictionary
cross section of a element or of all elements if z = 0
"""
pkl_file = open(data_path + '/edges_db.pkl', 'rb')
x_sections = pickle.load(pkl_file)
pkl_file.close()
z = int(z)
if z < 1:
return x_sections
else:
z = str(z)
if z in x_sections:
return x_sections[z]
else:
return 0
def get_z(z):
"""Returns the atomic number independent of input as a string or number
Parameter
---------
z: int, str
atomic number of chemical symbol (0 if not valid)
"""
x_sections = get_x_sections()
z_out = 0
if str(z).isdigit():
z_out = int(z)
elif isinstance(z, str):
for key in x_sections:
if x_sections[key]['name'].lower() == z.lower(): # Well one really should know how to write elemental
z_out = int(key)
return z_out
def list_all_edges(z):
"""List all ionization edges of an element with atomic number z
Parameters
----------
z: int
atomic number
Returns
-------
out_string: str
string with all major edges in energy range
"""
element = str(z)
x_sections = get_x_sections()
out_string = ''
print('Major edges')
for key in all_edges:
if key in x_sections[element]:
if 'onset' in x_sections[element][key]:
print(f" {x_sections[element]['name']}-{key}: {x_sections[element][key]['onset']:8.1f} eV ")
out_string = out_string + f" {x_sections[element]['name']}-{key}: " \
f"{x_sections[element][key]['onset']:8.1f} eV /n"
return out_string
def find_major_edges(edge_onset, maximal_chemical_shift=5.):
"""Find all major edges within an energy range
Parameters
----------
edge_onset: float
approximate energy of ionization edge
maximal_chemical_shift: float
optional, range of energy window around edge_onset to look for major edges
Returns
-------
text: str
string with all major edges in energy range
"""
text = ''
x_sections = get_x_sections()
for element in x_sections:
for key in x_sections[element]:
# if isinstance(x_sections[element][key], dict):
if key in major_edges:
if abs(x_sections[element][key]['onset'] - edge_onset) < maximal_chemical_shift:
# print(element, x_sections[element]['name'], key, x_sections[element][key]['onset'])
text = text + f"\n {x_sections[element]['name']:2s}-{key}: " \
f"{x_sections[element][key]['onset']:8.1f} eV "
return text
def find_all_edges(edge_onset, maximal_chemical_shift=5):
"""Find all (major and minor) edges within an energy range
Parameters
----------
edge_onset: float
approximate energy of ionization edge
maximal_chemical_shift: float
optional, range of energy window around edge_onset to look for major edges
Returns
-------
text: str
string with all edges in energy range
"""
text = ''
x_sections = get_x_sections()
for element in x_sections:
for key in x_sections[element]:
if isinstance(x_sections[element][key], dict):
if 'onset' in x_sections[element][key]:
if abs(x_sections[element][key]['onset'] - edge_onset) < maximal_chemical_shift:
# print(element, x_sections[element]['name'], key, x_sections[element][key]['onset'])
text = text + f"\n {x_sections[element]['name']:2s}-{key}: " \
f"{x_sections[element][key]['onset']:8.1f} eV "
return text
def second_derivative(dataset, sensitivity):
"""Calculates second derivative of a sidpy.dataset"""
dim = ft.get_dimensions_by_type('spectral', dataset)
energy_scale = np.array(dim[0][1])
if dataset.data_type.name == 'SPECTRAL_IMAGE':
spectrum = dataset.view.get_spectrum()
else:
spectrum = np.array(dataset)
spec = scipy.ndimage.gaussian_filter(spectrum, 3)
dispersion = ft.get_slope(energy_scale)
second_dif = np.roll(spec, -3) - 2 * spec + np.roll(spec, +3)
second_dif[:3] = 0
second_dif[-3:] = 0
# find if there is a strong edge at high energy_scale
noise_level = 2. * np.std(second_dif[3:50])
[indices, _] = scipy.signal.find_peaks(second_dif, noise_level)
width = 50 / dispersion
if width < 50:
width = 50
start_end_noise = int(len(energy_scale) - width)
for index in indices[::-1]:
if index > start_end_noise:
start_end_noise = index - 70
noise_level_start = sensitivity * np.std(second_dif[3:50])
noise_level_end = sensitivity * np.std(second_dif[start_end_noise: start_end_noise + 50])
slope = (noise_level_end - noise_level_start) / (len(energy_scale) - 400)
noise_level = noise_level_start + np.arange(len(energy_scale)) * slope
return second_dif, noise_level
def find_edges(dataset, sensitivity=2.5):
"""find edges within a sidpy.Dataset"""
dim = ft.get_dimensions_by_type('spectral', dataset)
energy_scale = np.array(dim[0][1])
second_dif, noise_level = second_derivative(dataset, sensitivity=sensitivity)
[indices, peaks] = scipy.signal.find_peaks(second_dif, noise_level)
peaks['peak_positions'] = energy_scale[indices]
peaks['peak_indices'] = indices
edge_energies = [energy_scale[50]]
edge_indices = []
[indices, _] = scipy.signal.find_peaks(-second_dif, noise_level)
minima = energy_scale[indices]
for peak_number in range(len(peaks['peak_positions'])):
position = peaks['peak_positions'][peak_number]
if position - edge_energies[-1] > 20:
impossible = minima[minima < position]
impossible = impossible[impossible > position - 5]
if len(impossible) == 0:
possible = minima[minima > position]
possible = possible[possible < position + 5]
if len(possible) > 0:
edge_energies.append((position + possible[0])/2)
edge_indices.append(np.searchsorted(energy_scale, (position + possible[0])/2))
selected_edges = []
for peak in edge_indices:
if 525 < energy_scale[peak] < 533:
selected_edges.append('O-K1')
else:
selected_edge = ''
edges = find_major_edges(energy_scale[peak], 20)
edges = edges.split('\n')
minimum_dist = 100.
for edge in edges[1:]:
edge = edge[:-3].split(':')
name = edge[0].strip()
energy = float(edge[1].strip())
if np.abs(energy - energy_scale[peak]) < minimum_dist:
minimum_dist = np.abs(energy - energy_scale[peak])
selected_edge = name
if selected_edge != '':
selected_edges.append(selected_edge)
return selected_edges
def make_edges(edges_present, energy_scale, e_0, coll_angle):
"""Makes the edges dictionary for quantification
Parameters
----------
edges_present: list
list of edges
energy_scale: numpy array
energy scale on which to make cross section
e_0: float
acceleration voltage (in V)
coll_angle: float
collection angle in mrad
Returns
-------
edges: dict
dictionary with all information on cross section
"""
x_sections = get_x_sections()
edges = {}
for i, edge in enumerate(edges_present):
element, symmetry = edge.split('-')
z = 0
for key in x_sections:
if element == x_sections[key]['name']:
z = int(key)
edges[i] = {}
edges[i]['z'] = z
edges[i]['symmetry'] = symmetry
edges[i]['element'] = element
for key in edges:
xsec = x_sections[str(edges[key]['z'])]
if 'chemical_shift' not in edges[key]:
edges[key]['chemical_shift'] = 0
if 'symmetry' not in edges[key]:
edges[key]['symmetry'] = 'K1'
if 'K' in edges[key]['symmetry']:
edges[key]['symmetry'] = 'K1'
elif 'L' in edges[key]['symmetry']:
edges[key]['symmetry'] = 'L3'
elif 'M' in edges[key]['symmetry']:
edges[key]['symmetry'] = 'M5'
else:
edges[key]['symmetry'] = edges[key]['symmetry'][0:2]
edges[key]['original_onset'] = xsec[edges[key]['symmetry']]['onset']
edges[key]['onset'] = edges[key]['original_onset'] + edges[key]['chemical_shift']
edges[key]['start_exclude'] = edges[key]['onset'] - xsec[edges[key]['symmetry']]['excl before']
edges[key]['end_exclude'] = edges[key]['onset'] + xsec[edges[key]['symmetry']]['excl after']
edges = make_cross_sections(edges, energy_scale, e_0, coll_angle)
return edges
def make_cross_sections(edges, energy_scale, e_0, coll_angle):
"""Updates the edges dictionary with collection angle-integrated X-ray photo-absorption cross-sections
"""
for key in edges:
if key.isdigit():
edges[key]['data'] = xsec_xrpa(energy_scale, e_0 / 1000., edges[key]['Z'], coll_angle,
edges[key]['chemical_shift']) / 1e10 # from barnes to 1/nm^2
edges[key]['onset'] = edges[key]['original_onset'] + edges[key]['chemical_shift']
edges[key]['X_section_type'] = 'XRPA'
edges[key]['X_section_source'] = 'pyTEMlib'
return edges
def power_law(energy, a, r):
"""power law for power_law_background"""
return a * np.power(energy, -r)
def power_law_background(spectrum, energy_scale, fit_area, verbose=False):
"""fit of power law to spectrum """
# Determine energy window for background fit in pixels
startx = np.searchsorted(energy_scale, fit_area[0])
endx = np.searchsorted(energy_scale, fit_area[1])
x = np.array(energy_scale)[startx:endx]
y = np.array(spectrum)[startx:endx].flatten()
# Initial values of parameters
p0 = np.array([1.0E+20, 3])
# background fitting
def bgdfit(pp, yy, xx):
err = yy - power_law(xx, pp[0], pp[1])
return err
[p, _] = leastsq(bgdfit, p0, args=(y, x), maxfev=2000)
background_difference = y - power_law(x, p[0], p[1])
background_noise_level = std_dev = np.std(background_difference)
if verbose:
print(f'Power-law background with amplitude A: {p[0]:.1f} and exponent -r: {p[1]:.2f}')
print(background_difference.max() / background_noise_level)
print(f'Noise level in spectrum {std_dev:.3f} counts')
# Calculate background over the whole energy scale
background = power_law(energy_scale, p[0], p[1])
return background, p
def cl_model(x, p, number_of_edges, xsec):
""" core loss model for fitting"""
y = (p[9] * np.power(x, (-p[10]))) + p[7] * x + p[8] * x * x
for i in range(number_of_edges):
y = y + p[i] * xsec[i, :]
return y
def fit_edges2(spectrum, energy_scale, edges):
"""fit edges for quantification"""
dispersion = energy_scale[1] - energy_scale[0]
# Determine fitting ranges and masks to exclude ranges
mask = np.ones(len(spectrum))
background_fit_start = edges['fit_area']['fit_start']
if edges['fit_area']['fit_end'] > energy_scale[-1]:
edges['fit_area']['fit_end'] = energy_scale[-1]
background_fit_end = edges['fit_area']['fit_end']
startx = np.searchsorted(energy_scale, background_fit_start)
endx = np.searchsorted(energy_scale, background_fit_end)
mask[0:startx] = 0.0
mask[endx:-1] = 0.0
for key in edges:
if key.isdigit():
if edges[key]['start_exclude'] > background_fit_start + dispersion:
if edges[key]['start_exclude'] < background_fit_end - dispersion * 2:
if edges[key]['end_exclude'] > background_fit_end - dispersion:
# we need at least one channel to fit.
edges[key]['end_exclude'] = background_fit_end - dispersion
startx = np.searchsorted(energy_scale, edges[key]['start_exclude'])
if startx < 2:
startx = 1
endx = np.searchsorted(energy_scale, edges[key]['end_exclude'])
mask[startx: endx] = 0.0
########################
# Background Fit
########################
bgd_fit_area = [background_fit_start, background_fit_end]
background, [A, r] = power_law_background(spectrum, energy_scale, bgd_fit_area, verbose=False)
#######################
# Edge Fit
#######################
x = energy_scale
blurred = gaussian_filter(spectrum, sigma=5)
y = blurred # now in probability
y[np.where(y < 1e-8)] = 1e-8
xsec = []
number_of_edges = 0
for key in edges:
if key.isdigit():
xsec.append(edges[key]['data'])
number_of_edges += 1
xsec = np.array(xsec)
def model(xx, pp):
yy = background + pp[6] + pp[7] * xx + pp[8] * xx * xx
for i in range(number_of_edges):
pp[i] = np.abs(pp[i])
yy = yy + pp[i] * xsec[i, :]
return yy
def residuals(pp, xx, yy):
err = np.abs((yy - model(xx, pp)) * mask) # / np.sqrt(np.abs(y))
return err
scale = y[100]
pin = np.array([scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, -scale / 10, 1.0, 0.001])
[p, _] = leastsq(residuals, pin, args=(x, y))
for key in edges:
if key.isdigit():
edges[key]['areal_density'] = p[int(key)]
edges['model'] = {}
edges['model']['background'] = (background + p[6] + p[7] * x + p[8] * x * x)
edges['model']['background-poly_0'] = p[6]
edges['model']['background-poly_1'] = p[7]
edges['model']['background-poly_2'] = p[8]
edges['model']['background-A'] = A
edges['model']['background-r'] = r
edges['model']['spectrum'] = model(x, p)
edges['model']['blurred'] = blurred
edges['model']['mask'] = mask
edges['model']['fit_parameter'] = p
edges['model']['fit_area_start'] = edges['fit_area']['fit_start']
edges['model']['fit_area_end'] = edges['fit_area']['fit_end']
return edges
def fit_edges(spectrum, energy_scale, region_tags, edges):
"""fit edges for quantification"""
# Determine fitting ranges and masks to exclude ranges
mask = np.ones(len(spectrum))
background_fit_end = energy_scale[-1]
for key in region_tags:
end = region_tags[key]['start_x'] + region_tags[key]['width_x']
startx = np.searchsorted(energy_scale, region_tags[key]['start_x'])
endx = np.searchsorted(energy_scale, end)
if key == 'fit_area':
mask[0:startx] = 0.0
mask[endx:-1] = 0.0
else:
mask[startx:endx] = 0.0
if region_tags[key]['start_x'] < background_fit_end: # Which is the onset of the first edge?
background_fit_end = region_tags[key]['start_x']
########################
# Background Fit
########################
bgd_fit_area = [region_tags['fit_area']['start_x'], background_fit_end]
background, [A, r] = power_law_background(spectrum, energy_scale, bgd_fit_area, verbose=False)
#######################
# Edge Fit
#######################
x = energy_scale
blurred = gaussian_filter(spectrum, sigma=5)
y = blurred # now in probability
y[np.where(y < 1e-8)] = 1e-8
xsec = []
number_of_edges = 0
for key in edges:
if key.isdigit():
xsec.append(edges[key]['data'])
number_of_edges += 1
xsec = np.array(xsec)
def model(xx, pp):
yy = background + pp[6] + pp[7] * xx + pp[8] * xx * xx
for i in range(number_of_edges):
pp[i] = | np.abs(pp[i]) | numpy.abs |
import wandb
import argparse
import exmol
import torch as th
import numpy as np
import yaml
import os
import matplotlib.pyplot as plt
import glob
import json
import selfies as sf
import tqdm
import pandas as pd
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
from dataclasses import dataclass, asdict
import seaborn as sns
sns.set()
from train import load_data, get_loss_criteria, run_an_eval_epoch, to_device
from models import get_model
from data_loader import get_explain_dataset
from plot_utils import fig_to_data
from grover_feats import convert_smiles_to_fp
from data_loader import make_timestamp
@dataclass
class GeneticMolecule(exmol.Example):
"""Example of a molecule"""
distance: float = 1
#: Output of model function
generation: int = 0
#: Raw data prediction
y: float = None
#: True if base
is_origin: bool = False
#: Genetic score
genetic_score: float = np.inf
#: Label for this example
label: str = None
#: Label for this example
crossed: bool = False
# to make it look nicer
def __str__(self):
return str(asdict(self))
def str2bool(v):
return v.lower() in ('yes', 'true', 't', 'y', '1')
def load_dicts(new_run_dp,template_d,args):
with open(os.path.join(new_run_dp,"config.yaml"),"r") as config_file:
config_d = yaml.load(config_file, Loader=yaml.FullLoader)
data_d_keys = template_d["data"].keys()
model_d_keys = template_d["model"].keys()
run_d_keys = template_d["run"].keys()
data_d, model_d, run_d = {}, {}, {}
for k,v in config_d.items():
if k in data_d_keys:
data_d[k] = v["value"]
elif k in model_d_keys:
model_d[k] = v["value"]
elif k in run_d_keys:
run_d[k] = v["value"]
# data_d["primary_dset"] = args.primary_dset
# data_d["secondary_dset"] = args.secondary_dset
run_d["do_test"] = False
run_d["do_matching"] = False
run_d["batch_size"] = 512
return data_d, model_d, run_d
def get_samples(target_smiles,preset,num_samples):
# set up stoned
stoned_kw = {
"num_samples": num_samples
}
if preset == "medium":
stoned_kw["max_mutations"] = 2
stoned_kw["alphabet"] = exmol.get_basic_alphabet()
elif preset == "narrow":
stoned_kw["max_mutations"] = 1
stoned_kw["alphabet"] = exmol.get_basic_alphabet()
elif preset == "wide":
stoned_kw["max_mutations"] = 5
stoned_kw["alphabet"] = sf.get_semantic_robust_alphabet()
pbar = tqdm.tqdm(total=num_samples)
samples, _ = exmol.run_stoned(target_smiles,_pbar=pbar,**stoned_kw)
return samples
def calculate_genetic_score(distance, y_delta, delta_cut_off=0.5):
if np.abs(y_delta) < delta_cut_off:
return 0
if np.abs(y_delta) < delta_cut_off:
delta_score = np.abs(y_delta)
else:
delta_score = delta_cut_off + (np.abs(y_delta) - delta_cut_off) * .2
return (1 - distance) * delta_score
def get_genetic_molecules(
fxn_values, smiles, selfies, distances, target_molecule, flags, generation=0):
# pack them into data structure with filtering out identical
# and nan
exps = []
for i, (sm, se, d, y) in enumerate(zip(smiles, selfies, distances, fxn_values)):
exps.append(GeneticMolecule(
smiles=sm,
selfies=se,
distance=d,
similarity=1-d,
yhat=np.squeeze(y),
is_origin=False,
index=0,
generation=generation,
genetic_score=calculate_genetic_score(
d, target_molecule.yhat - np.squeeze(y), flags.delta
),
# label
# y,
))
for i, e in enumerate(exps):
e.index = i
return exps
def plot_scatter(
molecule_bank, target_molecule, flags, carc_df, fig_kwargs):
# Identify counterfactuals
pass_threshold = [mol for mol in molecule_bank if np.abs(mol.yhat - target_molecule.yhat) > flags.delta]
positive_candidates = [mol for mol in pass_threshold if mol.yhat > target_molecule.yhat]
negative_candidates = [mol for mol in pass_threshold if mol.yhat < target_molecule.yhat]
cfs = [target_molecule]
positive_candidates = sorted(positive_candidates, key=lambda mol: mol.distance)
negative_candidates = sorted(negative_candidates, key=lambda mol: mol.distance)
if negative_candidates:
cfs.append(negative_candidates[0])
if positive_candidates:
cfs.append(positive_candidates[0])
x_cfs = [mol.distance for mol in cfs]
y_cfs = [return_percentile(e.yhat, carc_df['carc_continuous'].values) for e in cfs]
cmap = "viridis"
dists = np.array([mol.distance for mol in molecule_bank])
yhats = np.array([mol.yhat for mol in molecule_bank])
pred_yhat = molecule_bank[0].yhat
lower_yhat = pred_yhat - flags.delta
upper_yhat = pred_yhat + flags.delta
true_percentile = return_percentile(target_molecule.y, carc_df['carc_continuous'].values)
pred_percentile = return_percentile(pred_yhat, carc_df['carc_continuous'].values)
upper_percentile = return_percentile(upper_yhat, carc_df['carc_continuous'].values)
lower_percentile = return_percentile(lower_yhat, carc_df['carc_continuous'].values)
# make index selection somewhat stochastic so that we
# don't select from the same cluster
idx = np.argsort(dists)[1:5 * flags.num_viz + 1]
np.random.seed(1337)
idx = np.random.choice(idx, flags.num_viz)
sns.set()
sns.set_context('talk')
fig = plt.figure(figsize=(12, 12))
gs = fig.add_gridspec(3, 3)
ax = fig.add_subplot(gs[:2, :])
scatter_dists = np.concatenate([dists[idx], x_cfs])
scatter_yhats = | np.concatenate([yhats[idx], y_cfs]) | numpy.concatenate |
import numpy as np
from scipy.stats import lognorm
from scipy.optimize import curve_fit
from abc import ABC, abstractmethod
from icecube_tools.detector.effective_area import (
R2015AeffReader,
R2015_AEFF_FILENAME,
)
from icecube_tools.utils.data import IceCubeData, find_files, data_directory
"""
Module for handling the energy resolution
of IceCube using publicly available information.
"""
GIVEN_ETRUE = 0
GIVEN_ERECO = 1
_supported_dataset_ids = ["20150820"]
class EnergyResolutionBase(ABC):
"""
Abstract base class for energy resolution.
Stores information on how the reconstructed
energy in the detector relates to the true
neutrino energy.
"""
@property
def values(self):
"""
A 2D histogram of probabilities normalised
over reconstructed energy.
x-axis <=> true_energy
y-axis <=> reco_energy
"""
return self._values
@values.setter
def values(self, values):
if len(np.shape(values)) > 2:
raise ValueError(str(values) + " is not a 2D array.")
else:
self._values = values
@property
def true_energy_bins(self):
return self._true_energy_bins
@true_energy_bins.setter
def true_energy_bins(self, value):
self._true_energy_bins = value
@property
def reco_energy_bins(self):
return self._reco_energy_bins
@reco_energy_bins.setter
def reco_energy_bins(self, value):
self._reco_energy_bins = value
@abstractmethod
def sample(self):
pass
class EnergyResolution(EnergyResolutionBase):
"""
Muon neutrino energy resolution using public data.
Makes use of the 2015 effective area release and its
corresponding reader class.
Based on implementation by <NAME> (@chrhck).
"""
supported_datasets = _supported_dataset_ids
def __init__(self, filename, conditional=GIVEN_ETRUE, **kwargs):
"""
Muon neutrino energy resolution using public data.
Makes use of the 2015 effective area release and its
corresponding reader class.
Based on implementation by <NAME> (@chrhck).
:param filename: Name of file to be read in.
:param kwargs: year and/or nu_type can be specified.
See release for more info.
Link: https://icecube.wisc.edu/science/data/HE_NuMu_diffuse.
"""
super().__init__()
self._conditional = conditional
self._reader = R2015AeffReader(filename, **kwargs)
self.true_energy_bins = self._reader.true_energy_bins
self.reco_energy_bins = self._reader.reco_energy_bins
self.values = self._integrate_out_cos_zenith()
self.values = self._get_conditional()
self.values = self._normalise()
self._fit_lognormal()
self._fit_polynomial()
@classmethod
def from_dataset(cls, dataset_id, fetch=True, **kwargs):
"""
Load energy resolution from publicly
available data.
:param dataset_id: Date identifying the dataset
e.g. "20181018"
:param fetch: If true, download dataset if missing
"""
if dataset_id not in _supported_dataset_ids:
raise NotImplementedError("This dataset is not currently supported")
if fetch:
data_interface = IceCubeData()
dataset = data_interface.find(dataset_id)
data_interface.fetch(dataset)
dataset_dir = data_interface.get_path_to(dataset[0])
else:
dataset_dir = data_directory
if dataset_id == "20150820":
files = find_files(dataset_dir, R2015_AEFF_FILENAME)
eres_file_name = files[0]
return cls(eres_file_name, **kwargs)
def _integrate_out_cos_zenith(self):
"""
We are only interested in the energy
dependence.
"""
dim_to_int = self._reader._label_order["cos_zenith"]
return np.sum(self._reader.effective_area_values, axis=dim_to_int)
def _get_conditional(self):
"""
From the joint distribution of Etrue and Ereco
we want the conditional of Ereco | Etrue OR Etrue | Ereco.
"""
if self._conditional == GIVEN_ETRUE:
true_energy_dist = self.values.T.sum(axis=0)
# To avoid zero division
true_energy_dist[true_energy_dist == 0] = 1e-10
conditional = np.nan_to_num(self.values.T / true_energy_dist).T
elif self._conditional == GIVEN_ERECO:
reco_energy_dist = self.values.sum(axis=0)
conditional = np.nan_to_num(self.values / reco_energy_dist)
else:
raise ValueError("conditional must be GIVEN_ETRUE or GIVEN_ERECO")
return conditional
def _normalise(self):
"""
Normalise over the reconstruted energy so
at each Etrue bin the is a probability
distribution over Ereco.
"""
if self._conditional == GIVEN_ETRUE:
normalised = np.zeros(
(len(self.true_energy_bins[:-1]), len(self.reco_energy_bins[:-1]))
)
for i, Etrue in enumerate(self.true_energy_bins[:-1]):
norm = 0
for j, Ereco in enumerate(self.reco_energy_bins[:-1]):
delta_Ereco = self.reco_energy_bins[j + 1] - Ereco
norm += self.values[i][j] * delta_Ereco
# Avoid zero division
if norm == 0:
norm = 1e-10
normalised[i] = self.values[i] / norm
elif self._conditional == GIVEN_ERECO:
normalised = np.zeros(
(len(self.true_energy_bins[:-1]), len(self.reco_energy_bins[:-1]))
).T
for i, Ereco in enumerate(self.reco_energy_bins[:-1]):
norm = 0
for j, Etrue in enumerate(self.true_energy_bins[:-1]):
delta_Etrue = self.true_energy_bins[j + 1] - Etrue
norm += self.values.T[i][j] * delta_Etrue
normalised[i] = self.values.T[i] / norm
normalised = normalised.T
return normalised
def _fit_lognormal(self):
"""
Fit a lognormal distribution for each Etrue
and store its parameters.
"""
def _lognorm_wrapper(E, mu, sigma):
return lognorm.pdf(E, sigma, loc=0, scale=mu)
self._mu = []
self._sigma = []
if self._conditional == GIVEN_ETRUE:
self.reco_energy_bin_cen = (
self.reco_energy_bins[:-1] + self.reco_energy_bins[1:]
) / 2
for i, Etrue in enumerate(self.true_energy_bins[:-1]):
try:
fit_vals, _ = curve_fit(
_lognorm_wrapper,
self.reco_energy_bin_cen,
np.nan_to_num(self.values[i]),
p0=(Etrue, 0.5),
)
self._mu.append(fit_vals[0])
self._sigma.append(fit_vals[1])
except:
self._mu.append(np.nan)
self._sigma.append(np.nan)
elif self._conditional == GIVEN_ERECO:
self.true_energy_bin_cen = (
self.true_energy_bins[:-1] + self.true_energy_bins[1:]
) / 2
for i, Ereco in enumerate(self.reco_energy_bins[:-1]):
try:
fit_vals, _ = curve_fit(
_lognorm_wrapper,
self.true_energy_bin_cen,
np.nan_to_num(self.values.T[i]),
p0=(Ereco, 0.5),
)
self._mu.append(fit_vals[0])
self._sigma.append(fit_vals[1])
except:
self._mu.append(np.nan)
self._sigma.append(np.nan)
def _fit_polynomial(self):
"""
Fit a polynomial to approximate the lognormal
params at extreme energies where there are
little statistics.
"""
# polynomial degree
degree = 5
mu_sel = np.where(np.isfinite(self._mu))
mu = np.array(self._mu)[mu_sel]
sigma_sel = np.where(np.isfinite(self._sigma))
sigma = | np.array(self._sigma) | numpy.array |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.