prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from tqdm import tqdm
from taskinit import ms, tb, qa
from taskinit import iatool
from taskinit import cltool
from delmod_cli import delmod_cli as delmod
from clearcal_cli import clearcal_cli as clearcal
from suncasa.utils import mstools as mstl
from suncasa.utils import helioimage2fits as hf
import shutil, os
import sunpy.coordinates.ephemeris as eph
import numpy as np
from gaincal_cli import gaincal_cli as gaincal
from applycal_cli import applycal_cli as applycal
from flagdata_cli import flagdata_cli as flagdata
from flagmanager_cli import flagmanager_cli as flagmanager
from uvsub_cli import uvsub_cli as uvsub
from split_cli import split_cli as split
from tclean_cli import tclean_cli as tclean
from ft_cli import ft_cli as ft
from suncasa.utils import mstools as mstl
# def ant_trange(vis):
# ''' Figure out nominal times for tracking of old EOVSA antennas, and return time
# range in CASA format
# '''
# import eovsa_array as ea
# from astropy.time import Time
# # Get the Sun transit time, based on the date in the vis file name (must have UDByyyymmdd in the name)
# aa = ea.eovsa_array()
# date = vis.split('UDB')[-1][:8]
# slashdate = date[:4] + '/' + date[4:6] + '/' + date[6:8]
# aa.date = slashdate
# sun = aa.cat['Sun']
# mjd_transit = Time(aa.next_transit(sun).datetime(), format='datetime').mjd
# # Construct timerange based on +/- 3h55m from transit time (when all dishes are nominally tracking)
# trange = Time(mjd_transit - 0.1632, format='mjd').iso[:19] + '~' + Time(mjd_transit + 0.1632, format='mjd').iso[:19]
# trange = trange.replace('-', '/').replace(' ', '/')
# return trange
def ant_trange(vis):
''' Figure out nominal times for tracking of old EOVSA antennas, and return time
range in CASA format
'''
import eovsa_array as ea
from astropy.time import Time
from taskinit import ms
# Get timerange from the visibility file
# msinfo = dict.fromkeys(['vis', 'scans', 'fieldids', 'btimes', 'btimestr', 'inttimes', 'ras', 'decs', 'observatory'])
ms.open(vis)
# metadata = ms.metadata()
scans = ms.getscansummary()
sk = np.sort(scans.keys())
vistrange = np.array([scans[sk[0]]['0']['BeginTime'], scans[sk[-1]]['0']['EndTime']])
# Get the Sun transit time, based on the date in the vis file name (must have UDByyyymmdd in the name)
aa = ea.eovsa_array()
date = vis.split('UDB')[-1][:8]
slashdate = date[:4] + '/' + date[4:6] + '/' + date[6:8]
aa.date = slashdate
sun = aa.cat['Sun']
mjd_transit = Time(aa.next_transit(sun).datetime(), format='datetime').mjd
# Construct timerange limits based on +/- 3h55m from transit time (when all dishes are nominally tracking)
# and clip the visibility range not to exceed those limits
mjdrange = np.clip(vistrange, mjd_transit - 0.1632, mjd_transit + 0.1632)
trange = Time(mjdrange[0], format='mjd').iso[:19] + '~' + Time(mjdrange[1], format='mjd').iso[:19]
trange = trange.replace('-', '/').replace(' ', '/')
return trange
def gaussian2d(x, y, amplitude, x0, y0, sigma_x, sigma_y, theta):
x0 = float(x0)
y0 = float(y0)
a = (np.cos(theta) ** 2) / (2 * sigma_x ** 2) + (np.sin(theta) ** 2) / (2 * sigma_y ** 2)
b = -(np.sin(2 * theta)) / (4 * sigma_x ** 2) + (np.sin(2 * theta)) / (4 * sigma_y ** 2)
c = (np.sin(theta) ** 2) / (2 * sigma_x ** 2) + (np.cos(theta) ** 2) / (2 * sigma_y ** 2)
g = amplitude * np.exp(- (a * ((x - x0) ** 2) + 2 * b * (x - x0) * (y - y0) + c * ((y - y0) ** 2)))
return g
def writediskxml(dsize, fdens, freq, xmlfile='SOLDISK.xml'):
import xml.etree.ElementTree as ET
# create the file structure
sdk = ET.Element('SOLDISK')
sdk_dsize = ET.SubElement(sdk, 'item')
sdk_fdens = ET.SubElement(sdk, 'item')
sdk_freqs = ET.SubElement(sdk, 'item')
sdk_dsize.set('disk_size', ','.join(dsize))
sdk_fdens.set('flux_dens', ','.join(['{:.1f}Jy'.format(s) for s in fdens]))
sdk_freqs.set('freq', ','.join(freq))
# create a new XML file with the results
mydata = ET.tostring(sdk)
if os.path.exists(xmlfile):
os.system('rm -rf {}'.format(xmlfile))
with open(xmlfile, 'w') as sf:
sf.write(mydata)
return xmlfile
def readdiskxml(xmlfile):
import astropy.units as u
import xml.etree.ElementTree as ET
tree = ET.parse(xmlfile)
root = tree.getroot()
diskinfo = {}
for elem in root:
d = elem.attrib
for k, v in d.items():
v_ = v.split(',')
v_ = [u.Unit(f).to_string().split(' ') for f in v_]
diskinfo[k] = []
for val, uni in v_:
diskinfo[k].append(float(val))
diskinfo[k] = np.array(diskinfo[k]) * u.Unit(uni)
return diskinfo
def image_adddisk(eofile, diskinfo, edgeconvmode='frommergeddisk', caltbonly=False):
'''
:param eofile:
:param diskxmlfile:
:param edgeconvmode: available mode: frommergeddisk,frombeam
:return:
'''
from sunpy import map as smap
from suncasa.utils import plot_mapX as pmX
from scipy import constants
import astropy.units as u
from sunpy import io as sio
dsize = diskinfo['disk_size']
fdens = diskinfo['flux_dens']
freqs = diskinfo['freq']
eomap = smap.Map(eofile)
eomap_ = pmX.Sunmap(eomap)
header = eomap.meta
bmaj = header['bmaj'] * 3600 * u.arcsec
bmin = header['bmin'] * 3600 * u.arcsec
cell = (header['cdelt1'] * u.Unit(header['cunit1']) + header['cdelt2'] * u.Unit(header['cunit2'])) / 2.0
bmsize = (bmaj + bmin) / 2.0
data = eomap.data # remember the data order is reversed due to the FITS convension
keys = header.keys()
values = header.values()
mapx, mapy = eomap_.map2wcsgrids(cell=False)
mapx = mapx[:-1, :-1]
mapy = mapy[:-1, :-1]
rdisk = np.sqrt(mapx ** 2 + mapy ** 2)
k_b = constants.k
c_l = constants.c
const = 2. * k_b / c_l ** 2
pix_area = (cell.to(u.rad).value) ** 2
jy_to_si = 1e-26
factor2 = 1.
faxis = keys[values.index('FREQ')][-1]
if caltbonly:
edgeconvmode = ''
if edgeconvmode == 'frommergeddisk':
nul = header['CRVAL' + faxis] + header['CDELT' + faxis] * (1 - header['CRPIX' + faxis])
nuh = header['CRVAL' + faxis] + header['CDELT' + faxis] * (header['NAXIS' + faxis] - header['CRPIX' + faxis])
## get the frequency range of the image
nu_bound = (np.array([nul, nuh]) + 0.5 * np.array([-1, 1]) * header['CDELT' + faxis]) * u.Unit(
header['cunit' + faxis])
nu_bound = nu_bound.to(u.GHz)
## get the frequencies of the disk models
fidxs = np.logical_and(freqs > nu_bound[0], freqs < nu_bound[1])
ny, nx = rdisk.shape
freqs_ = freqs[fidxs]
fdens_ = fdens[fidxs] / 2.0 # divide by 2 because fdens is 2x solar flux density
dsize_ = dsize[fidxs]
fdisk_ = np.empty((len(freqs_), ny, nx))
fdisk_[:] = np.nan
for fidx, freq in enumerate(freqs_):
fdisk_[fidx, ...][rdisk <= dsize_[fidx].value] = 1.0
# nu = header['CRVAL' + faxis] + header['CDELT' + faxis] * (1 - header['CRPIX' + faxis])
factor = const * freq.to(u.Hz).value ** 2 # SI unit
jy2tb = jy_to_si / pix_area / factor * factor2
fdisk_[fidx, ...] = fdisk_[fidx, ...] / np.nansum(fdisk_[fidx, ...]) * fdens_[fidx].value
fdisk_[fidx, ...] = fdisk_[fidx, ...] * jy2tb
# # fdisk_[np.isnan(fdisk_)] = 0.0
tbdisk = np.nanmean(fdisk_, axis=0)
tbdisk[np.isnan(tbdisk)] = 0.0
sig2fwhm = 2.0 * np.sqrt(2 * np.log(2))
x0, y0 = 0, 0
sigx, sigy = bmaj.value / sig2fwhm, bmin.value / sig2fwhm
theta = -(90.0 - header['bpa']) * u.deg
x = (np.arange(31) - 15) * cell.value
y = (np.arange(31) - 15) * cell.value
x, y = np.meshgrid(x, y)
kernel = gaussian2d(x, y, 1.0, x0, y0, sigx, sigy, theta.to(u.radian).value)
kernel = kernel / np.nansum(kernel)
from scipy import signal
tbdisk = signal.fftconvolve(tbdisk, kernel, mode='same')
else:
nu = header['CRVAL' + faxis] + header['CDELT' + faxis] * (1 - header['CRPIX' + faxis])
freqghz = nu / 1.0e9
factor = const * nu ** 2 # SI unit
jy2tb = jy_to_si / pix_area / factor * factor2
p_dsize = np.poly1d(np.polyfit(freqs.value, dsize.value, 15))
p_fdens = np.poly1d(
np.polyfit(freqs.value, fdens.value, 15)) / 2. # divide by 2 because fdens is 2x solar flux density
if edgeconvmode == 'frombeam':
from scipy.special import erfc
factor_erfc = 2.0 ## erfc function ranges from 0 to 2
fdisk = erfc((rdisk - p_dsize(freqghz)) / bmsize.value) / factor_erfc
else:
fdisk = np.zeros_like(rdisk)
fdisk[rdisk <= p_dsize(freqghz)] = 1.0
fdisk = fdisk / np.nansum(fdisk) * p_fdens(freqghz)
tbdisk = fdisk * jy2tb
tb_disk = np.nanmax(tbdisk)
if caltbonly:
return tb_disk
else:
datanew = data + tbdisk
# datanew[np.isnan(data)] = 0.0
header['TBDISK'] = tb_disk
header['TBUNIT'] = 'K'
eomap_disk = smap.Map(datanew, header)
nametmp = eofile.split('.')
nametmp.insert(-1, 'disk')
outfits = '.'.join(nametmp)
datanew = datanew.astype(np.float32)
if os.path.exists(outfits):
os.system('rm -rf {}'.format(outfits))
sio.write_file(outfits, datanew, header)
return eomap_disk, tb_disk, outfits
def read_ms(vis):
''' Read a CASA ms file and return a dictionary of amplitude, phase, uvdistance,
uvangle, frequency (GHz) and time (MJD). Currently only returns the XX IF channel.
vis Name of the visibility (ms) folder
'''
ms.open(vis)
spwinfo = ms.getspectralwindowinfo()
nspw = len(spwinfo.keys())
for i in range(nspw):
print('Working on spw', i)
ms.selectinit(datadescid=0, reset=True)
ms.selectinit(datadescid=i)
if i == 0:
spw = ms.getdata(['amplitude', 'phase', 'u', 'v', 'axis_info'], ifraxis=True)
xxamp = spw['amplitude']
xxpha = spw['phase']
fghz = spw['axis_info']['freq_axis']['chan_freq'][:, 0] / 1e9
band = np.ones_like(fghz) * i
mjd = spw['axis_info']['time_axis']['MJDseconds'] / 86400.
uvdist = np.sqrt(spw['u'] ** 2 + spw['v'] ** 2)
uvang = np.angle(spw['u'] + 1j * spw['v'])
else:
spw = ms.getdata(['amplitude', 'phase', 'axis_info'], ifraxis=True)
xxamp = np.concatenate((xxamp, spw['amplitude']), 1)
xxpha = np.concatenate((xxpha, spw['phase']), 1)
fg = spw['axis_info']['freq_axis']['chan_freq'][:, 0] / 1e9
fghz = np.concatenate((fghz, fg))
band = np.concatenate((band, np.ones_like(fg) * i))
ms.close()
return {'amp': xxamp, 'phase': xxpha, 'fghz': fghz, 'band': band, 'mjd': mjd, 'uvdist': uvdist, 'uvangle': uvang}
def im2cl(imname, clname, convol=True, verbose=False):
if os.path.exists(clname):
os.system('rm -rf {}'.format(clname))
ia = iatool()
ia.open(imname)
ia2 = iatool()
ia2.open(imname.replace('.model', '.image'))
bm = ia2.restoringbeam()
bmsize = (qa.convert(qa.quantity(bm['major']), 'arcsec')['value'] +
qa.convert(qa.quantity(bm['minor']), 'arcsec')['value']) / 2.0
if convol:
im2 = ia.sepconvolve(types=['gaussian', 'gaussian'], widths="{0:}arcsec {0:}arcsec".format(2.5*bmsize),
overwrite=True)
ia2.done()
else:
im2 = ia
cl = cltool()
srcs = im2.findsources(point=False, cutoff=0.3, width=int(np.ceil(bmsize/2.5)))
# srcs = ia.findsources(point=False, cutoff=0.1, width=5)
if verbose:
for k, v in srcs.iteritems():
if k.startswith('comp'):
## note: Stokes I to XX
print(srcs[k]['flux']['value'])
# srcs[k]['flux']['value'] = srcs[k]['flux']['value'] / 2.0
cl.fromrecord(srcs)
cl.rename(clname)
cl.done()
ia.done()
im2.done()
def fit_diskmodel(out, bidx, rstn_flux, uvfitrange=[1, 150], angle_tolerance=np.pi / 2, doplot=True):
''' Given the result returned by read_ms(), plots the amplitude vs. uvdistance
separately for polar and equatorial directions rotated for P-angle, then overplots
a disk model for a disk enlarged by eqfac in the equatorial direction, and polfac
in the polar direction. Also requires the RSTN flux spectrum for the date of the ms,
determined from (example for 2019-09-01):
import rstn
frq, flux = rstn.rd_rstnflux(t=Time('2019-09-01'))
rstn_flux = rstn.rstn2ant(frq, flux, out['fghz']*1000, t=Time('2019-09-01'))
'''
from util import bl2ord, lobe
import matplotlib.pylab as plt
import sun_pos
from scipy.special import j1
import scipy.constants
mperns = scipy.constants.c / 1e9 # speed of light in m/ns
# Rotate uv angle for P-angle
pa, b0, r = sun_pos.get_pb0r(out['mjd'][0], arcsec=True)
uvangle = lobe(out['uvangle'] - pa * np.pi / 180.)
a = 2 * r * np.pi ** 2 / (180. * 3600.) # Initial scale for z, uses photospheric radius of the Sun
if doplot: f, ax = plt.subplots(3, 1)
uvmin, uvmax = uvfitrange
uvdeq = []
uvdpol = []
ampeq = []
amppol = []
zeq = []
zpol = []
# Loop over antennas 1-4
antmax = 7
at = angle_tolerance
for i in range(4):
fidx, = np.where(out['band'] == bidx) # Array of frequency indexes for channels in this band
for j, fi in enumerate(fidx):
amp = out['amp'][0, fi, bl2ord[i, i + 1:antmax]].flatten() / 10000. # Convert to sfu
# Use only non-zero amplitudes
good, = np.where(amp != 0)
amp = amp[good]
uva = uvangle[bl2ord[i, i + 1:antmax]].flatten()[good]
# Equatorial points are within +/- pi/8 of solar equator
eq, = np.where(np.logical_or(np.abs(uva) < at / 2, np.abs(uva) >= np.pi - at / 2))
# Polar points are within +/- pi/8 of solar pole
pol, = np.where(np.logical_and(np.abs(uva) >= np.pi / 2 - at / 2, np.abs(uva) < np.pi / 2 + at / 2))
uvd = out['uvdist'][bl2ord[i, i + 1:antmax]].flatten()[good] * out['fghz'][fi] / mperns # Wavelengths
# Add data for this set of baselines to global arrays
uvdeq.append(uvd[eq])
uvdpol.append(uvd[pol])
ampeq.append(amp[eq])
amppol.append(amp[pol])
zeq.append(uvd[eq])
zpol.append(uvd[pol])
uvdeq = np.concatenate(uvdeq)
uvdpol = np.concatenate(uvdpol)
uvdall = np.concatenate((uvdeq, uvdpol))
ampeq = np.concatenate(ampeq)
amppol = np.concatenate(amppol)
ampall = np.concatenate((ampeq, amppol))
zeq = np.concatenate(zeq)
zpol = np.concatenate(zpol)
zall = np.concatenate((zeq, zpol))
# These indexes are for a restricted uv-range to be fitted
ieq, = np.where(np.logical_and(uvdeq > uvmin, uvdeq <= uvmax))
ipol, = np.where(np.logical_and(uvdpol > uvmin, uvdpol <= uvmax))
iall, = np.where(np.logical_and(uvdall > uvmin, uvdall <= uvmax))
if doplot:
# Plot all of the data points
ax[0].plot(uvdeq, ampeq, 'k+')
ax[1].plot(uvdpol, amppol, 'k+')
ax[2].plot(uvdall, ampall, 'k+')
# Overplot the fitted data points in a different color
ax[0].plot(uvdeq[ieq], ampeq[ieq], 'b+')
ax[1].plot(uvdpol[ipol], amppol[ipol], 'b+')
ax[2].plot(uvdall[iall], ampall[iall], 'b+')
# Minimize ratio of points to model
ntries = 300
solfac = np.linspace(1.0, 1.3, ntries)
d2m_eq = np.zeros(ntries, np.float)
d2m_pol = np.zeros(ntries, np.float)
d2m_all = np.zeros(ntries, np.float)
sfac = np.zeros(ntries, np.float)
sfacall = np.zeros(ntries, np.float)
# Loop over ntries (300) models of solar disk size factor ranging from 1.0 to 1.3 r_Sun
for k, sizfac in enumerate(solfac):
eqpts = rstn_flux[fidx][0] * 2 * np.abs(j1(a * sizfac * zeq[ieq]) / (a * sizfac * zeq[ieq]))
polpts = rstn_flux[fidx[0]] * 2 * np.abs(j1(a * sizfac * zpol[ipol]) / (a * sizfac * zpol[ipol]))
sfac[k] = (np.nanmedian(ampeq[ieq] / eqpts) + np.nanmedian(amppol[ipol] / polpts)) / 2
eqpts = rstn_flux[fidx[0]] * (2 * sfac[k]) * np.abs(j1(a * sizfac * zeq[ieq]) / (a * sizfac * zeq[ieq]))
polpts = rstn_flux[fidx[0]] * (2 * sfac[k]) * np.abs(j1(a * sizfac * zpol[ipol]) / (a * sizfac * zpol[ipol]))
allpts = rstn_flux[fidx[0]] * (2 * sfac[k]) * np.abs(j1(a * sizfac * zall[iall]) / (a * sizfac * zall[iall]))
sfacall[k] = np.nanmedian(ampall[iall] / allpts)
d2m_eq[k] = np.nanmedian(abs(ampeq[ieq] / eqpts - 1))
d2m_pol[k] = np.nanmedian(abs(amppol[ipol] / polpts - 1))
d2m_all[k] = np.nanmedian(abs(ampall[iall] / allpts - 1))
keq = np.argmin(d2m_eq)
kpol = np.argmin(d2m_pol)
kall = | np.argmin(d2m_all) | numpy.argmin |
"""
This file is dedicated to the static nonconvex problem taking into consideration the transmission losses B
It concerns the First order solvers
Author: <NAME>
Date : 09/06/2020
"""
import numpy as np
import gurobipy as gp
from gurobipy import GRB
import time
import matplotlib.pyplot as plt
from Params import load,loss
from Static_model import SimplePriceFun
from Relaxed import LinUpperB,LinearRelaxation
from NLSolverStat import Solve
from scipy.optimize import minimize
from scipy.optimize import NonlinearConstraint
"""
This function solves the static EED with losses.
If method=="NonConvex": it considers the equality constraint
Elif "ConvexRelax": inequality constraint
else "LinRelax": uses the linear relaxation using N points
"""
def SolveGurobi(N,w_E,w_C, Demand, method="ConvexRelax"):
(Unused,Pmax,Pmin,a,b,c,alpha,beta,gamma,delta,eta,UR,DR) = load(N)
B=loss(N)
m = gp.Model('Static Nonlinear EED with transmission losses')
Pow = m.addVars(range(N),lb=Pmin,ub=Pmax, name='P')
PLoss = m.addVar()
x = m.addVars(N)
y = m.addVars(N)
for n in range(N):
m.addConstr(x[n]==delta[n]*Pow[n])
m.addGenConstrExp(x[n], y[n])
if (method=="NonConvex"):
m.setParam('NonConvex', 2)
m.addQConstr(PLoss== sum( sum(Pow[i]*Pow[j]*B[i][j] for j in range(N))for i in range(N)))
m.addConstr(Pow.sum() == Demand+PLoss)
elif (method=="ConvexRelax"):
m.addQConstr(PLoss>= sum( sum(Pow[i]*Pow[j]*B[i][j] for j in range(N))for i in range(N)))
m.addConstr(Pow.sum() == Demand+PLoss)
elif (method=="LinRelax"):
t=time.time()
(n,k_upper, k_lower) = LinearRelaxation(N,Demand)
print(time.time()-t,' sec to add for computing the extreme points')
m.addConstr( sum(Pow[i]*n[i] for i in range(N))+k_upper<=0)
m.addConstr( sum(Pow[i]*n[i] for i in range(N))+k_lower>=0)
Cost = sum(a[k]+b[k]*Pow[k]+c[k]*Pow[k]*Pow[k] for k in range(N))
Emission = sum(alpha[k]+beta[k]*Pow[k]+gamma[k]*Pow[k]*Pow[k]+eta[k]*y[k] for k in range(N))
obj= w_E*Emission + w_C*Cost
m.setObjective(obj)
m.setParam( 'OutputFlag', False )
m.optimize()
opt=obj.getValue()
P=np.zeros(N)
for i in range(N):
P[i] = Pow[i].x
return(opt,P)
"""
Performs the Constrained Gradient Method
Choose between:
N=2,3,6,10(,40,100)
method='NonConvex', 'ConvexRelax'
solver='Gurobi', 'Scipy'
"""
def GradMethod(N=10, method='ConvexRelax', solver='Gurobi'):
plt.close("all")
(Demand,Pmax,Pmin,a,b,c,alpha,beta,gamma,delta,eta,UR,DR) = load(N)
B=loss(N)
price = SimplePriceFun(Pmin,Pmax,a,b,c,alpha,beta,gamma,Demand)
model=gp.Model('Projection Model')
model.setParam( 'OutputFlag', False )
P = model.addVars(range(N),lb=Pmin,ub=Pmax)
PL = model.addVar()
if (method=="NonConvex"):
model.setParam('NonConvex', 2)
model.addQConstr(PL== sum( sum(P[i]*P[j]*B[i][j] for j in range(N))for i in range(N)))
model.addConstr(P.sum() == Demand+PL)
else:
model.addQConstr(PL>= sum( sum(P[i]*P[j]*B[i,j] for j in range(N))for i in range(N)))
model.addConstr(P.sum()-PL == Demand, name='Demand')
if (solver=='Gurobi'):
t0=time.time()
[opt,P_opt]=SolveGurobi(N,price,1,Demand, method)
t1=time.time()
print(t1-t0 ,'sec for Gurobi')
"""Computing P0"""
model.setObjective(0)
model.optimize()
t2=time.time()
print(t2-t1 ,'P0')
P0=np.zeros(N)
for i in range(N):
P0[i] = P[i].x
else:
t0=time.time()
[opt,P_opt]=Solve(N,price,1,Demand)
t1=time.time()
print(t1-t0 ,'sec for Scipy')
"""Computing P0"""
bnds=np.transpose(np.vstack((Pmin,Pmax)))
P0=Pmin.copy()
def objective(P):
return (0)
def Gradient(P):
return(np.zeros(N))
def Hessian(P):
return(np.zeros([N,N]))
def cons_f(P):
PL=sum(sum(P[i]*P[j]*B[i,j] for j in range(N)) for i in range(N))
sum_eq=sum(P)-PL-Demand
return (sum_eq)
if (N<=10):
const=[{'type': 'eq', 'fun': cons_f}]
solution = minimize(objective ,P0, method='SLSQP',jac=Gradient, bounds=bnds,constraints=const)
else:
def cons_J(P):
Jac=np.ones(N)-2*P@B
return(Jac)
def cons_H(P,v):
return(-2*v*B)
NL_const = NonlinearConstraint(cons_f, 0, 0, jac=cons_J, hess=cons_H)
solution = minimize(objective ,P0, method='trust-constr',jac=Gradient,
hess=Hessian,constraints=NL_const, bounds=bnds)
P0 = solution.x
t2=time.time()
print(t2-t1 ,'P0')
print()
print("Gradient Method")
tol=1e-2
L=max(2*c+price*(2*gamma+delta*delta*eta*np.exp(delta*Pmax)))
mu=min(2*c+price*(2*gamma+delta*delta*eta*np.exp(delta*Pmin)))
Maxiter=int(0.25*(1+L/mu)*np.log(L*np.linalg.norm(P0-P_opt)**2/(2*tol)))+1
Maxiter=min(Maxiter,50) #Otherwise too large vector of iterates
Obj=np.zeros(Maxiter)
C = sum(a[k]+b[k]*P0[k]+c[k]*P0[k]*P0[k] for k in range(N))
E = sum(alpha[k]+beta[k]*P0[k]+gamma[k]*P0[k]*P0[k]+eta[k]*np.exp(delta[k]*P0[k]) for k in range(N))
Obj[0]=C+price*E
#Used if method=ConvexRelax
GradRate=np.zeros(Maxiter)
normP0=np.linalg.norm(P0-P_opt)**2
GradRate[0]=L/2*normP0
Pk=P0.copy()
it=1
if (method=='NonConvex'):
print(L,mu)
h=1/L
else:
h=2/(mu+L)
while (it<Maxiter and tol<Obj[it-1]-opt):
GradC=b+c*Pk*2
GradE= beta+gamma*Pk*2+delta*eta*np.exp(delta*Pk)
Grad=GradC+price*GradE
Pk=Pk-h*Grad
projection= sum((P[i]-Pk[i])*(P[i]-Pk[i]) for i in range(N))
model.setObjective(projection)
model.optimize()
if model.Status!= GRB.OPTIMAL:
print('Optimization was stopped with status ' + str(model.Status))
for i in range(N):
Pk[i] = P[i].x
C = sum(a[k]+b[k]*Pk[k]+c[k]*Pk[k]*Pk[k] for k in range(N))
E = sum(alpha[k]+beta[k]*Pk[k]+gamma[k]*Pk[k]*Pk[k]+eta[k]*np.exp(delta[k]*Pk[k]) for k in range(N))
Obj[it]=C+price*E
GradRate[it]=L/2*((L-mu)/(L+mu))**(2*it)*normP0
if( (it % 10)==0):
print(it, " of ", Maxiter)
it=it+1
plt.figure()
if (method=='ConvexRelax'):
plt.plot(range(it),GradRate[:it],'b--', label='Gradient theoretical rate')
plt.plot(range(it),Obj[:it]-np.ones(it)*opt,'b', label='Gradient Method ')
plt.xlabel('Iterations')
plt.ylabel('$f_k-f*$')
plt.title('Rate of convergence of the Gradient method ')
plt.legend()
plt.grid(True)
t3=time.time()
print(t3-t1, "for gradient")
"""
Performs the Accelerated Method
Choose between:
N=2,3,6,10(,40,100)
method='NonConvex', 'ConvexRelax'
solver='Gurobi', 'Scipy'
"""
def AccMethod(N=10, method='ConvexRelax', solver='Gurobi'):
(Demand,Pmax,Pmin,a,b,c,alpha,beta,gamma,delta,eta,UR,DR) = load(N)
B=loss(N)
price = SimplePriceFun(Pmin,Pmax,a,b,c,alpha,beta,gamma,Demand)
model=gp.Model('Projection Model')
model.setParam( 'OutputFlag', False )
P = model.addVars(range(N),lb=Pmin,ub=Pmax)
PL = model.addVar()
if (method=="NonConvex"):
model.setParam('NonConvex', 2)
model.addQConstr(PL== sum( sum(P[i]*P[j]*B[i][j] for j in range(N))for i in range(N)))
model.addConstr(P.sum() == Demand+PL)
elif (method=="ConvexRelax"):
model.addQConstr(PL>= sum( sum(P[i]*P[j]*B[i,j] for j in range(N))for i in range(N)))
model.addConstr(P.sum()-PL == Demand, name='Demand')
if (solver=='Gurobi'):
t0=time.time()
[opt,P_opt]=SolveGurobi(N,price,1,Demand, method)
t1=time.time()
print(t1-t0 ,'sec for Gurobi')
"""Computing P0"""
model.setObjective(0)
model.optimize()
t2=time.time()
print(t2-t1 ,'P0')
P0=np.zeros(N)
for i in range(N):
P0[i] = P[i].x
else:
t0=time.time()
[opt,P_opt]=Solve(N,price,1,Demand)
t1=time.time()
print(t1-t0 ,'sec for Scipy')
"""Computing P0"""
bnds=np.transpose(np.vstack((Pmin,Pmax)))
P0=Pmin.copy()
def objective(P):
return (0)
def Gradient(P):
return(np.zeros(N))
def Hessian(P):
return(np.zeros([N,N]))
def cons_f(P):
PL=sum(sum(P[i]*P[j]*B[i,j] for j in range(N)) for i in range(N))
sum_eq=sum(P)-PL-Demand
return (sum_eq)
if (N<=10):
const=[{'type': 'eq', 'fun': cons_f}]
solution = minimize(objective ,P0, method='SLSQP',jac=Gradient, bounds=bnds,constraints=const)
else:
def cons_J(P):
Jac=np.ones(N)-2*P@B
return(Jac)
def cons_H(P,v):
return(-2*v*B)
NL_const = NonlinearConstraint(cons_f, 0, 0, jac=cons_J, hess=cons_H)
solution = minimize(objective ,P0, method='trust-constr',jac=Gradient,
hess=Hessian,constraints=NL_const, bounds=bnds)
P0 = solution.x
t2=time.time()
print(t2-t1 ,'P0')
tol=1e-2
L=max(2*c+price*(2*gamma+delta*delta*eta*np.exp(delta*Pmax)))
mu=min(2*c+price*(2*gamma+delta*delta*eta*np.exp(delta*Pmin)))
C = sum(a[k]+b[k]*P0[k]+c[k]*P0[k]*P0[k] for k in range(N))
E = sum(alpha[k]+beta[k]*P0[k]+gamma[k]*P0[k]*P0[k]+eta[k]*np.exp(delta[k]*P0[k]) for k in range(N))
f0= C+price*E
Maxiter= int(np.sqrt(L/mu)*np.log(2*(f0-opt)/tol))+1
Maxiter=min(Maxiter,50)
Obj=np.zeros(Maxiter)
Obj[0]=f0
AccRate=np.zeros(Maxiter)
AccRate[0]=2*(Obj[0]-opt)
print()
print("Accelerated Gradient")
it=1
Pk=P0.copy()
yk=Pk.copy()
stepsize=(np.sqrt(L)-np.sqrt(mu))/(np.sqrt(L)+np.sqrt(mu))
while (it<Maxiter and tol<Obj[it-1]-opt):
GradC=b+c*yk*2
GradE= beta+gamma*yk*2+delta*eta*np.exp(delta*yk)
Grad=GradC+price*GradE
Prev=Pk.copy()
Pk=yk-Grad/L
projection= sum((P[i]-Pk[i])*(P[i]-Pk[i]) for i in range(N))
model.setObjective(projection)
model.optimize()
for i in range(N):
Pk[i] = P[i].x
yk=Pk+stepsize*(Pk-Prev)
C = sum(a[k]+b[k]*Pk[k]+c[k]*Pk[k]*Pk[k] for k in range(N))
E = sum(alpha[k]+beta[k]*Pk[k]+gamma[k]*Pk[k]*Pk[k]+eta[k]* | np.exp(delta[k]*Pk[k]) | numpy.exp |
import numpy as np
import torch
from torch.optim import Adam
import gym
from gym.spaces import Box, Discrete
import time
import spinup.algos.pytorch.ppo.core as core
from spinup.utils.logx import EpochLogger
from spinup.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads
from spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs
class StickyActionEnv(gym.Wrapper):
def __init__(self, env, p=0.25):
super(StickyActionEnv, self).__init__(env)
self.p = p
self.last_action = 0
def reset(self):
self.last_action = 0
return self.env.reset()
def step(self, action):
if self.unwrapped.np_random.uniform() < self.p:
action = self.last_action
self.last_action = action
obs, reward, done, info = self.env.step(action)
return obs, reward, done, info
class PPOBuffer_2V:
"""
A buffer for storing trajectories experienced by a PPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95, two_v_heads=True):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.intr_rew_buf = | np.zeros(size, dtype=np.float32) | numpy.zeros |
import numpy as np
import shapely.geometry as geom
class Bbox:
def __init__(self, name, part_id, depth_image, xyz, box_size, projection):
if not isinstance(xyz, np.ndarray):
raise ValueError("xyz must be an np.ndarray")
self.name = name
self.id = part_id
self.center = np.array([xyz[0], xyz[1]])
self.z = xyz[2]
self.im_d = depth_image
self.im_d[self.im_d == 0] = 255
x_delta_scaled = box_size[0]/2
self.weight = 1.0
y_delta_scaled = box_size[1]/2
self.xmin, self.xmax = xyz[0]-x_delta_scaled, xyz[0]+x_delta_scaled
self.ymin, self.ymax = xyz[1]-y_delta_scaled, xyz[1]+y_delta_scaled
self.poly = geom.box(self.xmin, self.ymin, self.xmax, self.ymax)
self.color_min = (int(projection['fx']*self.xmin/xyz[2] + projection['cx']),
int(projection['fy']*self.ymin/xyz[2] + projection['cy']))
self.color_max = (int(projection['fx']*self.xmax/xyz[2] + projection['cx']),
int(projection['fy']*self.ymax/xyz[2] + projection['cy']))
self.depth_min = (int(projection['fx_d']*self.xmin/xyz[2] + projection['cx_d']),
int(projection['fy_d']*self.ymin/xyz[2] + projection['cy_d']))
self.depth_max = (int(projection['fx_d']*self.xmax/xyz[2] + projection['cx_d']),
int(projection['fy_d']*self.ymax/xyz[2] + projection['cy_d']))
def __str__(self):
return "{{{: 1.4f},{: 1.4f}}}, {{{: 1.4f},{: 1.4f}}}".format(self.xmin, self.ymin, self.xmax, self.ymax)
def __repr__(self):
return "(bbox: {{{: 1.4f},{: 1.4f}}}, {{{: 1.4f},{: 1.4f}}})".format(self.xmin, self.ymin, self.xmax, self.ymax)
def size(self):
return (self.xmax - self.xmin) * (self.ymax - self.ymin)
def get_bb_depth_matrix(self):
""" Get the portion of the depth image inside the bounding box """
min_x, max_x = sorted((self.depth_min[0], self.depth_max[0]))
min_y, max_y = sorted((self.depth_min[1], self.depth_max[1]))
bounded_im = self.im_d[min_y: max_y+1, min_x: max_x+1]
return bounded_im
def overlap(self, bb2):
dx = min(self.xmax, bb2.xmax) - max(self.xmin, bb2.xmin)
dy = min(self.ymax, bb2.ymax) - max(self.ymin, bb2.ymin)
if (dx>=0) and (dy>=0):
return dx*dy
return 0
def p_over(self, bb2):
return self.overlap(bb2)/(min(self.size(), bb2.size()))
def p_depth(self, bb2):
bounded_im1 = self.get_bb_depth_matrix()
bounded_im2 = bb2.get_bb_depth_matrix()
print(bounded_im1.empty or bounded_im2.empty)
mean1 = np.mean(bounded_im1)
mean2 = np.mean(bounded_im2)
stdev1 = np.std(bounded_im1)
stdev2 = np.std(bounded_im2)
half_negative_square_of_mean_difference = -1/2 * (mean1 - mean2) ** 2
term1_power = half_negative_square_of_mean_difference / (stdev1 ** 2)
term2_power = half_negative_square_of_mean_difference / (stdev2 ** 2)
out = ( | np.exp(term1_power) | numpy.exp |
import abc
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numexpr as ne
import numpy as np
import pandas as pd
import scipy.sparse
import scipy.spatial
from sklearn.gaussian_process.kernels import Kernel
from sklearn.preprocessing import normalize
from sklearn.utils import check_scalar
from datafold.pcfold.distance import compute_distance_matrix
from datafold.pcfold.timeseries.accessor import TSCAccessor
from datafold.utils.general import (
df_type_and_indices_from,
diagmat_dot_mat,
is_df_same_index,
is_float,
is_integer,
is_symmetric_matrix,
mat_dot_diagmat,
remove_numeric_noise_symmetric_matrix,
)
KernelType = Union[pd.DataFrame, np.ndarray, scipy.sparse.csr_matrix]
def _apply_kernel_function(distance_matrix, kernel_function):
if scipy.sparse.issparse(distance_matrix):
kernel = distance_matrix
# NOTE: applies on stored data, it is VERY important, that real distance zeros are
# included in 'distance_matrix' (E.g. normalized kernels have to have a 1.0 on
# the diagonal) are included in the sparse matrix!
kernel.data = kernel_function(kernel.data)
else:
kernel = kernel_function(distance_matrix)
return kernel
def _apply_kernel_function_numexpr(distance_matrix, expr, expr_dict=None):
expr_dict = expr_dict or {}
assert "D" not in expr_dict.keys()
if scipy.sparse.issparse(distance_matrix):
# copy because the distance matrix may be used further by the user
distance_matrix = distance_matrix.copy()
expr_dict["D"] = distance_matrix.data
ne.evaluate(expr, expr_dict, out=distance_matrix.data)
return distance_matrix # returns actually the kernel
else:
expr_dict["D"] = distance_matrix
return ne.evaluate(expr, expr_dict)
def _symmetric_matrix_division(
matrix: Union[np.ndarray, scipy.sparse.spmatrix],
vec: np.ndarray,
vec_right: Optional[np.ndarray] = None,
scalar: float = 1.0,
value_zero_division: Union[str, float] = "raise",
) -> Union[np.ndarray, scipy.sparse.csr_matrix,]:
r"""Symmetric division, often appearing in kernels.
.. math::
\frac{M_{i, j}}{a v^(l)_i v^(r)_j}
where :math:`M` is a (kernel-) matrix and its elements are divided by the
(left and right) vector elements :math:`v` and scalar :math:`a`.
.. warning::
The implementation is in-place and can overwrites the input matrix. Make a copy
beforehand if the matrix values are still required.
Parameters
----------
matrix
Matrix of shape `(n_rows, n_columns)` to apply symmetric division on.
If matrix is square and ``vec_right=None``, then `matrix` is assumed to be
symmetric (this enables removing numerical noise to return a perfectly symmetric
matrix).
vec
Vector of shape `(n_rows,)` in the denominator (Note, the reciprocal
is is internal of the function).
vec_right
Vector of shape `(n_columns,)`. If matrix is non-square or matrix input is
not symmetric, then this input is required. If None, it is set
to ``vec_right=vec``.
scalar
Scalar ``a`` in the denominator.
Returns
-------
"""
if matrix.ndim != 2:
raise ValueError("Parameter 'matrix' must be a two dimensional array.")
if matrix.shape[0] != matrix.shape[1] and vec_right is None:
raise ValueError(
"If 'matrix' is non-square, then 'vec_right' must be provided."
)
vec = vec.astype(float)
if (vec == 0.0).any():
if value_zero_division == "raise":
raise ZeroDivisionError(
f"Encountered zero values in division in {(vec == 0).sum()} points."
)
else:
# division results into 'nan' without raising a ZeroDivisionWarning. The
# nan values will be replaced later
vec[vec == 0.0] = np.nan
vec_inv_left = np.reciprocal(vec)
if vec_right is None:
vec_inv_right = vec_inv_left.view()
else:
vec_right = vec_right.astype(float)
if (vec_right == 0.0).any():
if value_zero_division == "raise":
raise ZeroDivisionError(
f"Encountered zero values in division in {(vec == 0).sum()}"
)
else:
vec_right[vec_right == 0.0] = np.inf
vec_inv_right = np.reciprocal(vec_right.astype(float))
if vec_inv_left.ndim != 1 or vec_inv_left.shape[0] != matrix.shape[0]:
raise ValueError(
f"Invalid input: 'vec.shape={vec.shape}' is not compatible with "
f"'matrix.shape={matrix.shape}'."
)
if vec_inv_right.ndim != 1 or vec_inv_right.shape[0] != matrix.shape[1]:
raise ValueError(
f"Invalid input: 'vec_right.shape={vec_inv_right.shape}' is not compatible "
f"with 'matrix.shape={matrix.shape}'."
)
if scipy.sparse.issparse(matrix):
left_inv_diag_sparse = scipy.sparse.spdiags(
vec_inv_left, 0, m=matrix.shape[0], n=matrix.shape[0]
)
right_inv_diag_sparse = scipy.sparse.spdiags(
vec_inv_right, 0, m=matrix.shape[1], n=matrix.shape[1]
)
# The performance of DIA-sparse matrices is good if the matrix is actually
# sparse. I.e. the performance drops for a sparse-dense-sparse multiplication.
# The zeros are removed in the matrix multiplication, but because 'matrix' is
# usually a distance matrix we need to preserve the "true zeros"!
matrix.data[matrix.data == 0] = np.inf
matrix = left_inv_diag_sparse @ matrix @ right_inv_diag_sparse
matrix.data[np.isinf(matrix.data)] = 0
# this imposes precedence order
# --> np.inf/np.nan -> np.nan
# i.e. for cases with 0/0, set 'value_zero_division'
if isinstance(value_zero_division, (int, float)):
matrix.data[np.isnan(matrix.data)] = value_zero_division
else:
# This computes efficiently:
# np.diag(1/vector_elements) @ matrix @ np.diag(1/vector_elements)
matrix = diagmat_dot_mat(vec_inv_left, matrix, out=matrix)
matrix = mat_dot_diagmat(matrix, vec_inv_right, out=matrix)
if isinstance(value_zero_division, (int, float)):
matrix[np.isnan(matrix)] = value_zero_division
# sparse and dense
if vec_right is None:
matrix = remove_numeric_noise_symmetric_matrix(matrix)
if scalar != 1.0:
scalar = 1.0 / scalar
matrix = np.multiply(matrix, scalar, out=matrix)
return matrix
def _conjugate_stochastic_kernel_matrix(
kernel_matrix: Union[np.ndarray, scipy.sparse.spmatrix]
) -> Tuple[Union[np.ndarray, scipy.sparse.spmatrix], scipy.sparse.dia_matrix]:
r"""Conjugate transformation to obtain symmetric (conjugate) kernel matrix with same
spectrum properties.
Rabin et al. :cite:`rabin_heterogeneous_2012` states in equation Eq. 3.1 \
(notation adapted):
.. math::
P = D^{-1} K
the standard row normalization. Eq. 3.3 shows that matrix :math:`P` has a similar
matrix with
.. math::
A = D^{1/2} P D^{-1/2}
Replacing :math:`P` from above we get:
.. math::
A = D^{1/2} D^{-1} K D^{-1/2}
A = D^{-1/2} K D^{-1/2}
Where the last equation is the conjugate transformation performed in this function.
The matrix :math:`A` has the same eigenvalues to :math:`P` and the eigenvectors
can be recovered (Eq. 3.4. in reference):
.. math::
\Psi = D^{-1/2} V
where :math:`V` are the eigenvectors of :math:`A` and :math:`\Psi` from matrix \
:math:`P`.
.. note::
The conjugate-stochastic matrix is not stochastic, but still has the trivial
eigenvalue 1 (i.e. the row-sums are not equal to 1).
Parameters
----------
kernel_matrix
non-symmetric kernel matrix
Returns
-------
Tuple[Union[np.ndarray, scipy.sparse.spmatrix], scipy.sparse.dia_matrix]
conjugate matrix (tpye as `kernel_matrix`) and (sparse) diagonal matrix to recover
eigenvectors
References
----------
:cite:`rabin_heterogeneous_2012`
"""
left_vec = kernel_matrix.sum(axis=1)
if scipy.sparse.issparse(kernel_matrix):
# to np.ndarray in case it is depricated format np.matrix
left_vec = left_vec.A1
if left_vec.dtype.kind != "f":
left_vec = left_vec.astype(float)
left_vec = np.sqrt(left_vec, out=left_vec)
kernel_matrix = _symmetric_matrix_division(
kernel_matrix, vec=left_vec, vec_right=None
)
# This is D^{-1/2} in sparse matrix form.
basis_change_matrix = scipy.sparse.diags(np.reciprocal(left_vec, out=left_vec))
return kernel_matrix, basis_change_matrix
def _stochastic_kernel_matrix(kernel_matrix: Union[np.ndarray, scipy.sparse.spmatrix]):
"""Normalizes matrix rows.
This function performs
.. math::
M = D^{-1} K
where matrix :math:`M` is the row-normalized kernel from :math:`K` by the
matrix :math:`D` with the row sums of :math:`K` on the diagonal.
.. note::
If the kernel matrix is evaluated component wise (points compared to reference
points), then outliers can have a row sum close to zero. In this case the
respective element on the diagonal is set to zero. For a pairwise kernel
(pdist) this can not happen, as the diagonal element must be non-zero.
Parameters
----------
kernel_matrix
kernel matrix (square or rectangular) to normalize
Returns
-------
Union[np.ndarray, scipy.sparse.spmatrix]
normalized kernel matrix with type same as `kernel_matrix`
"""
if scipy.sparse.issparse(kernel_matrix):
# in a microbenchmark this turned out to be the fastest solution for sparse
# matrices
kernel_matrix = normalize(kernel_matrix, copy=False, norm="l1")
else: # dense
normalize_diagonal = np.sum(kernel_matrix, axis=1)
with np.errstate(divide="ignore", over="ignore"):
# especially in cdist computations there can be far away outliers
# (or very small scale/epsilon). This results in elements near 0 and
# the reciprocal can then
# - be inf
# - overflow (resulting in negative values)
# these cases are catched with 'bool_invalid' below
normalize_diagonal = np.reciprocal(
normalize_diagonal, out=normalize_diagonal
)
bool_invalid = np.logical_or(
np.isinf(normalize_diagonal), normalize_diagonal < 0
)
normalize_diagonal[bool_invalid] = 0
kernel_matrix = diagmat_dot_mat(normalize_diagonal, kernel_matrix)
return kernel_matrix
def _kth_nearest_neighbor_dist(
distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix], k
) -> np.ndarray:
"""Compute the distance to the `k`-th nearest neighbor.
Parameters
----------
distance_matrix
Distance matrix of shape `(n_samples_Y, n_samples_X)` from which to find the
`k`-th nearest neighbor and its corresponding distance to return. If the matrix is
sparse each point must have a minimum number of `k` neighbours (i.e. non-zero
elements per row).
k
The distance of the `k`-th nearest neighbor.
Returns
-------
numpy.ndarray
distance values
"""
if not is_integer(k):
raise ValueError(f"parameter 'k={k}' must be a positive integer")
else:
# make sure we deal with Python built-in
k = int(k)
if not (0 <= k <= distance_matrix.shape[1]):
raise ValueError(
"'k' must be an integer between 1 and "
f"distance_matrix.shape[1]={distance_matrix.shape[1]}"
)
if isinstance(distance_matrix, np.ndarray):
dist_knn = np.partition(distance_matrix, k - 1, axis=1)[:, k - 1]
elif isinstance(distance_matrix, scipy.sparse.csr_matrix):
# see mircobenchmark_kth_nn.py for a comparison of implementations for the
# sparse case
def _get_kth_largest_elements_sparse(
data: np.ndarray,
indptr: np.ndarray,
row_nnz,
k_neighbor: int,
):
dist_knn = np.zeros(len(row_nnz))
for i in range(len(row_nnz)):
start_row = indptr[i]
dist_knn[i] = np.partition(
data[start_row : start_row + row_nnz[i]], k_neighbor - 1
)[k_neighbor - 1]
return dist_knn
row_nnz = distance_matrix.getnnz(axis=1)
if (row_nnz < k).any():
raise ValueError(
f"There are {(row_nnz < k).sum()} points that "
f"do not have at least k_neighbor={k}."
)
dist_knn = _get_kth_largest_elements_sparse(
distance_matrix.data,
distance_matrix.indptr,
row_nnz,
k,
)
else:
raise TypeError(f"type {type(distance_matrix)} not supported")
return dist_knn
class BaseManifoldKernel(Kernel):
@abc.abstractmethod
def __call__(self, X, Y=None, *, dist_kwargs=None, **kernel_kwargs):
"""Compute kernel matrix.
If `Y=None`, then the pairwise-kernel is computed with `Y=X`. If `Y` is given,
then the kernel matrix is computed component-wise with `X` being the reference
and `Y` the query point cloud.
Because the kernel can return a variable number of return values, this is
unified with :meth:`PCManifoldKernel.read_kernel_output`.
Parameters
----------
args
kwargs
See parameter documentation in subclasses.
Returns
-------
"""
def diag(self, X):
"""(Not implemented, not used in datafold)
Raises
------
NotImplementedError
this is only to overwrite abstract method in super class
"""
raise NotImplementedError("base class")
def is_stationary(self):
"""(Not implemented, not used in datafold)
Raises
------
NotImplementedError
this is only to overwrite abstract method in super class
"""
# in datafold there is no handling of this attribute, if required this has to
# be implemented
raise NotImplementedError("base class")
def __repr__(self):
param_str = ", ".join(
[f"{name}={val}" for name, val in self.get_params().items()]
)
return f"{self.__class__.__name__}({param_str})"
def _read_kernel_kwargs(self, attrs: Optional[List[str]], kernel_kwargs: dict):
return_values: List[Any] = []
if attrs is not None:
for attr in attrs:
return_values.append(kernel_kwargs.pop(attr, None))
if kernel_kwargs != {}:
raise KeyError(
f"kernel_kwargs.keys = {kernel_kwargs.keys()} are not " f"supported"
)
if len(return_values) == 0:
return None
elif len(return_values) == 1:
return return_values[0]
else:
return return_values
@staticmethod
def read_kernel_output(
kernel_output: Union[Union[np.ndarray, scipy.sparse.csr_matrix], Tuple]
) -> Tuple[Union[np.ndarray, scipy.sparse.csr_matrix], Dict, Dict]:
"""Unifies kernel output for all possible return scenarios of a kernel.
This is required for models that allow generic kernels to be set where the
number of outputs of the internal kernel are not known *apriori*.
A kernel must return a computed kernel matrix in the first position. The two
other places are optional:
2. A dictionary containing keys that are required for a component-wise kernel
computation (and set in `**kernel_kwargs`, see also below). Examples are
computed density values.
3. A dictionary that containes additional information computed during the
computation. These extra information must always be at the third return
position.
.. code-block:: python
# we don't know how the exact kernel and therefore not how many return
# values are contained in kernel_output
kernel_output = compute_kernel_matrix(X)
# we read the output and obtain the three psossible places
kernel_matrix, cdist_kwargs, extra_info = \
PCManifold.read_kernel_output(kernel_output)
# we can compute a follow up component-wise kernel matrix
cdist_kernel_output = compute_kernel_matrix(X,Y, **cdist_kwargs)
Parameters
----------
kernel_output
Output from an generic kernel, from which we don't know if it contains one,
two or three return values.
Returns
-------
Union[numpy.ndarray, scipy.sparse.csr_matrix]
Kernel matrix.
Dict
Data required for follow-up component-wise computation. The dictionary
should contain keys that can be included as `**kernel_kwargs` to the follow-up
``__call__``. Dictionary is empty if no data is contained in kernel output.
Dict
Quantities of interest with keys specific of the respective kernel.
Dictionary is empty if no data is contained in kernel output.
"""
if isinstance(
kernel_output, (pd.DataFrame, np.ndarray, scipy.sparse.csr_matrix)
):
# easiest case, we simply return the kernel matrix
kernel_matrix, ret_cdist, ret_extra = [kernel_output, None, None]
elif isinstance(kernel_output, tuple):
if len(kernel_output) == 1:
kernel_matrix, ret_cdist, ret_extra = [kernel_output[0], None, None]
elif len(kernel_output) == 2:
kernel_matrix, ret_cdist, ret_extra = (
kernel_output[0],
kernel_output[1],
None,
)
elif len(kernel_output) == 3:
kernel_matrix, ret_cdist, ret_extra = kernel_output
else:
raise ValueError(
"kernel_output must has more than three elements. "
"Please report bug"
)
else:
raise TypeError(
"'kernel_output' must be either pandas.DataFrame (incl. TSCDataFrame), "
"numpy.ndarray or tuple. Please report bug."
)
ret_cdist = ret_cdist or {}
ret_extra = ret_extra or {}
if not isinstance(
kernel_matrix, (pd.DataFrame, np.ndarray, scipy.sparse.csr_matrix)
):
raise TypeError(
f"Illegal type of kernel_matrix (type={type(kernel_matrix)}. "
f"Please report bug."
)
return kernel_matrix, ret_cdist, ret_extra
class PCManifoldKernel(BaseManifoldKernel):
"""Abstract base class for kernels acting on point clouds.
See Also
--------
:py:class:`PCManifold`
"""
@abc.abstractmethod
def __call__(
self,
X: np.ndarray,
Y: Optional[np.ndarray] = None,
*,
dist_kwargs: Optional[Dict[str, object]] = None,
**kernel_kwargs,
):
"""Abstract method to compute the kernel matrix from a point cloud.
Parameters
----------
X
Data of shape `(n_samples_X, n_features)`.
Y
Reference data of shape `(n_samples_y, n_features_y)`
dist_kwargs
Keyword arguments for the distance computation.
**kernel_kwargs
Keyword arguments for the kernel algorithm.
Returns
-------
numpy.ndarray
Kernel matrix of shape `(n_samples_X, n_samples_X)` for the pairwise
case or `(n_samples_y, n_samples_X)` for component-wise kernels
Optional[Dict]
For the pairwise computation, a kernel can return data that is required for a
follow-up component-wise computation. The dictionary should contain keys
that can be included as `**kernel_kwargs` to a follow-up ``__call__``. Note
that if a kernel has no such values, this is empty (i.e. not even `None` is
returned).
Optional[Dict]
If the kernel computes quantities of interest, then these quantities can be
included in this dictionary. If this is returned, then this
must be at the third return position (with a possible `None` return at the
second position). If a kernel has no such values, this can be empty (i.e. not
even `None` is returned).
"""
raise NotImplementedError("base class")
@abc.abstractmethod
def eval(
self, distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix]
) -> Union[np.ndarray, scipy.sparse.csr_matrix]:
"""Evaluate kernel on pre-computed distance matrix.
For return values see :meth:`.__call__`.
.. note::
In this function there are no checks of whether the distance matrix was
computed with the correct metric required by the kernel.
Parameters
----------
distance_matrix
Matrix of shape `(n_samples_Y, n_samples_X)`. For the sparse matrix case note
that the kernel acts only on stored data, i.e. distance values with
exactly zero (duplicates and self distance) must be stored explicitly in the
matrix. Only large distance values exceeding a cut-off should not be stored.
Returns
-------
"""
raise NotImplementedError("base class")
class TSCManifoldKernel(BaseManifoldKernel):
"""Abstract base class for kernels acting on time series collections.
See Also
--------
:py:class:`.TSCDataFrame`
"""
@abc.abstractmethod
def __call__(
self,
X: pd.DataFrame,
Y: Optional[pd.DataFrame] = None,
*,
dist_kwargs: Optional[Dict[str, object]] = None,
**kernel_kwargs,
):
"""Abstract method to compute the kernel matrix from a time series collection.
Parameters
----------
X
Data of shape `(n_samples_X, n_features)`.
Y
Data of shape `(n_samples_Y, n_features)`.
dist_kwargs
Keyword arguments for the distance computation.
**kernel_kwargs
Keyword arguments for the kernel algorithm.
Returns
-------
Union[TSCDataFrame, pd.DataFrame]
The computed kernel matrix as ``TSCDataFrame`` (or fallback
``pandas.DataFrame``, if not regular time series). The basis shape of the
kernel matrix `(n_samples_Y, n_samples_X)`. However, the kernel may not be
evaluated at all given input time values and is then reduced accordingly.
Optional[Dict]
For the pairwise computation, a kernel can return data that is required for a
follow-up component-wise computation. The dictionary should contain keys
that can be included as `**kernel_kwargs` to a follow-up ``__call__``. Note
that if a kernel has no such values, this is empty (i.e. not even `None` is
returned).
Optional[Dict]
If the kernel computes quantities of interest, then these quantities can be
included in this dictionary. If this is returned, then this
must be at the third return position (with a possible `None` return at the
second position). If a kernel has no such values, this can be empty (i.e. not
even `None` is returned).
"""
raise NotImplementedError("base class")
class RadialBasisKernel(PCManifoldKernel, metaclass=abc.ABCMeta):
"""Abstract base class for radial basis kernels.
"A radial basis function (RBF) is a real-valued function whose value depends \
only on the distance between the input and some fixed point." from
`Wikipedia <https://en.wikipedia.org/wiki/Radial_basis_function>`_
Parameters
----------
distance_metric
metric required for kernel
"""
def __init__(self, distance_metric):
self.distance_metric = distance_metric
super(RadialBasisKernel, self).__init__()
@classmethod
def _check_bandwidth_parameter(cls, parameter, name) -> float:
check_scalar(
parameter,
name=name,
target_type=(float, np.floating, int, np.integer),
min_val=np.finfo(float).eps,
)
return float(parameter)
def __call__(
self, X, Y=None, *, dist_kwargs=None, **kernel_kwargs
) -> Union[np.ndarray, scipy.sparse.csr_matrix]:
"""Compute kernel matrix.
Parameters
----------
X
Reference point cloud of shape `(n_samples_X, n_features_X)`.
Y
Query point cloud of shape `(n_samples_Y, n_features_Y)`. If not given,
then `Y=X`.
dist_kwargs,
Keyword arguments passed to the distance matrix computation. See
:py:meth:`datafold.pcfold.compute_distance_matrix` for parameter arguments.
**kernel_kwargs
None
Returns
-------
Union[np.ndarray, scipy.sparse.csr_matrix]
Kernel matrix of shape `(n_samples_Y, n_samples_X)`. If cut-off is
specified in `dist_kwargs`, then the matrix is sparse.
"""
self._read_kernel_kwargs(attrs=None, kernel_kwargs=kernel_kwargs)
X = np.atleast_2d(X)
if Y is not None:
Y = np.atleast_2d(Y)
distance_matrix = compute_distance_matrix(
X,
Y,
metric=self.distance_metric,
**dist_kwargs or {},
)
kernel_matrix = self.eval(distance_matrix)
return kernel_matrix
class GaussianKernel(RadialBasisKernel):
r"""Gaussian radial basis kernel.
.. math::
K = \exp(\frac{-1}{2\varepsilon} \cdot D)
where :math:`D` is the squared euclidean distance matrix.
See also super classes :class:`RadialBasisKernel` and :class:`PCManifoldKernel`
for more functionality and documentation.
Parameters
----------
epsilon
The kernel scale as a positive float value. Alternatively, a a
callable can be passed. After computing the distance matrix, the distance matrix
will be passed to this function i.e. ``function(distance_matrix)``. The result
of this function must be a again a positive float to describe the kernel scale.
"""
def __init__(self, epsilon: Union[float, Callable] = 1.0):
self.epsilon = epsilon
super(GaussianKernel, self).__init__(distance_metric="sqeuclidean")
def eval(
self, distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix]
) -> Union[np.ndarray, scipy.sparse.csr_matrix]:
"""Evaluate the kernel on pre-computed distance matrix.
Parameters
----------
distance_matrix
Matrix of pairwise distances of shape `(n_samples_Y, n_samples_X)`.
Returns
-------
Union[np.ndarray, scipy.sparse.csr_matrix]
Kernel matrix of same shape and type as `distance_matrix`.
"""
# Security copy, the distance matrix is maybe required again (for gradient,
# or other computations...)
if callable(self.epsilon):
if isinstance(distance_matrix, scipy.sparse.csr_matrix):
self.epsilon = self.epsilon(distance_matrix.data)
elif isinstance(distance_matrix, np.ndarray):
self.epsilon = self.epsilon(distance_matrix)
else:
raise TypeError(
f"Invalid type: type(distance_matrix)={type(distance_matrix)}."
f"Please report bug."
)
self.epsilon = self._check_bandwidth_parameter(
parameter=self.epsilon, name="epsilon"
)
kernel_matrix = _apply_kernel_function_numexpr(
distance_matrix,
expr="exp((- 1 / (2*eps)) * D)",
expr_dict={"eps": self.epsilon},
)
return kernel_matrix
class MultiquadricKernel(RadialBasisKernel):
r"""Multiquadric radial basis kernel.
.. math::
K = \sqrt(\frac{1}{2 \varepsilon} \cdot D + 1)
where :math:`D` is the squared euclidean distance matrix.
See also super classes :class:`RadialBasisKernel` and :class:`PCManifoldKernel`
for more functionality and documentation.
Parameters
----------
epsilon
kernel scale
"""
def __init__(self, epsilon: float = 1.0):
self.epsilon = epsilon
super(MultiquadricKernel, self).__init__(distance_metric="sqeuclidean")
def eval(
self, distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix]
) -> Union[np.ndarray, scipy.sparse.csr_matrix]:
"""Evaluate the kernel on pre-computed distance matrix.
Parameters
----------
distance_matrix
Matrix of pairwise distances of shape `(n_samples_Y, n_samples_X)`.
Returns
-------
Union[np.ndarray, scipy.sparse.csr_matrix]
Kernel matrix of same shape and type as `distance_matrix`.
"""
self.epsilon = self._check_bandwidth_parameter(
parameter=self.epsilon, name="epsilon"
)
return _apply_kernel_function_numexpr(
distance_matrix,
expr="sqrt(1.0 / (2*eps) * D + 1.0)",
expr_dict={"eps": self.epsilon},
)
class InverseMultiquadricKernel(RadialBasisKernel):
r"""Inverse multiquadric radial basis kernel.
.. math::
K = \sqrt(\frac{1}{2\varepsilon} \cdot D + 1)^{-1}
where :math:`D` is the squared Euclidean distance matrix.
See also super classes :class:`RadialBasisKernel` and :class:`PCManifoldKernel`
for more functionality and documentation.
Parameters
----------
epsilon
kernel scale
"""
def __init__(self, epsilon: float = 1.0):
self.epsilon = epsilon
super(InverseMultiquadricKernel, self).__init__(distance_metric="sqeuclidean")
def eval(
self, distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix]
) -> Union[np.ndarray, scipy.sparse.csr_matrix]:
"""Evaluate the kernel on pre-computed distance matrix.
Parameters
----------
distance_matrix
Matrix of pairwise distances of shape `(n_samples_Y, n_samples_X)`.
Returns
-------
Union[np.ndarray, scipy.sparse.csr_matrix]
Kernel matrix of same shape and type as `distance_matrix`.
"""
self.epsilon = self._check_bandwidth_parameter(
parameter=self.epsilon, name="epsilon"
)
return _apply_kernel_function_numexpr(
distance_matrix,
expr="1.0 / sqrt(1.0 / (2*eps) * D + 1.0)",
expr_dict={"eps": self.epsilon},
)
class CubicKernel(RadialBasisKernel):
r"""Cubic radial basis kernel.
.. math::
K= D^{3}
where :math:`D` is the Euclidean distance matrix.
See also super classes :class:`RadialBasisKernel` and :class:`PCManifoldKernel`
for more functionality and documentation.
"""
def __init__(self):
super(CubicKernel, self).__init__(distance_metric="euclidean")
def eval(
self, distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix]
) -> Union[np.ndarray, scipy.sparse.csr_matrix]:
"""Evaluate the kernel on pre-computed distance matrix.
Parameters
----------
distance_matrix
Matrix of pairwise distances of shape `(n_samples_Y, n_samples_X)`.
Returns
-------
Union[np.ndarray, scipy.sparse.csr_matrix]
Kernel matrix of same shape and type as `distance_matrix`.
"""
# return r ** 3
return _apply_kernel_function_numexpr(distance_matrix, expr="D ** 3")
class QuinticKernel(RadialBasisKernel):
r"""Quintic radial basis kernel.
.. math::
K= D^{5}
where :math:`D` is the Euclidean distance matrix.
See also super classes :class:`RadialBasisKernel` and :class:`PCManifoldKernel`
for more functionality and documentation.
"""
def __init__(self):
super(QuinticKernel, self).__init__(distance_metric="euclidean")
def eval(
self, distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix]
) -> Union[np.ndarray, scipy.sparse.csr_matrix]:
"""Evaluate the kernel on pre-computed distance matrix.
Parameters
----------
distance_matrix
Matrix of pairwise distances of shape `(n_samples_Y, n_samples_X)`.
Returns
-------
Union[np.ndarray, scipy.sparse.csr_matrix]
Kernel matrix of same shape and type as `distance_matrix`.
"""
# r**5
return _apply_kernel_function_numexpr(distance_matrix, "D ** 5")
class ContinuousNNKernel(PCManifoldKernel):
"""Compute the continuous `k` nearest-neighbor adjacency graph.
The continuous `k` nearest neighbor (C-kNN) graph is an adjacency (i.e. unweighted)
graph for which the (un-normalized) graph Laplacian converges spectrally to a
Laplace-Beltrami operator on the manifold in the large data limit.
Parameters
----------
k_neighbor
For each point the distance to the `k_neighbor` nearest neighbor is computed.
If a sparse matrix is computed (with cut-off distance), then each point must
have a minimum of `k` stored neighbors. (see `kmin` parameter in
:meth:`pcfold.distance.compute_distance_matrix`).
delta
Unit-less scale parameter.
References
----------
:cite:`berry_consistent_2019`
"""
def __init__(self, k_neighbor: int, delta: float):
if not is_integer(k_neighbor):
raise TypeError("n_neighbors must be an integer")
else:
# make sure to only use Python built-in
self.k_neighbor = int(k_neighbor)
if not is_float(delta):
if is_integer(delta):
self.delta = float(delta)
else:
raise TypeError("delta must be of type float")
else:
# make sure to only use Python built-in
self.delta = float(delta)
if self.k_neighbor < 1:
raise ValueError(
f"parameter 'k_neighbor={self.k_neighbor}' must be a positive integer"
)
if self.delta <= 0.0:
raise ValueError(
f"parrameter 'delta={self.delta}' must be a positive float"
)
super(ContinuousNNKernel, self).__init__()
def _validate_reference_dist_knn(self, is_pdist, reference_dist_knn):
if is_pdist and reference_dist_knn is None:
raise ValueError("For the 'cdist' case 'reference_dist_knn' must be given")
def __call__(
self,
X: np.ndarray,
Y: Optional[np.ndarray] = None,
*,
dist_kwargs: Optional[Dict] = None,
**kernel_kwargs,
):
"""Compute (sparse) adjacency graph to describes a point neighborhood.
Parameters
----------
X
Reference point cloud of shape `(n_samples_X, n_features_X)`.
Y
Query point cloud of shape `(n_samples_Y, n_features_Y)`. If not given,
then `Y=X`.
dist_kwargs
Keyword arguments passed to the internal distance matrix computation. See
:py:meth:`datafold.pcfold.compute_distance_matrix` for parameter arguments.
**kernel_kwargs: Dict[str, object]
- reference_dist_knn: Optional[np.ndarray]
Distances to the `k`-th nearest neighbor for each point in `X`. The
parameter is mandatory if `Y` is not `None`.
Returns
-------
scipy.sparse.csr_matrix
Sparse adjacency matrix describing the unweighted, undirected continuous
nearest neighbor graph.
Optional[Dict[str, numpy.ndarray]]
For a pair-wise kernel evaluation, a dictionary with key
`reference_dist_knn` with the `k`-the nearest neighbors for each point is
returned.
"""
is_pdist = Y is None
reference_dist_knn = self._read_kernel_kwargs(
attrs=["reference_dist_knn"], kernel_kwargs=kernel_kwargs
)
dist_kwargs = dist_kwargs or {}
# minimum number of neighbors required in sparse case!
dist_kwargs.setdefault("kmin", self.k_neighbor)
distance_matrix = compute_distance_matrix(
X, Y, metric="euclidean", **dist_kwargs
)
return self.eval(
distance_matrix, is_pdist=is_pdist, reference_dist_knn=reference_dist_knn
)
def _validate(self, distance_matrix, is_pdist, reference_dist_knn):
if distance_matrix.ndim != 2:
raise ValueError("distance_matrix must be a two-dimensional array")
n_samples_Y, n_samples_X = distance_matrix.shape
if is_pdist:
if n_samples_Y != n_samples_X:
raise ValueError(
"If is_pdist=True, the distance matrix must be square and symmetric."
)
if isinstance(distance_matrix, np.ndarray):
diagonal = np.diag(distance_matrix)
else:
diagonal = np.asarray(distance_matrix.diagonal(0))
if (diagonal != 0).all():
raise ValueError(
"If is_pdist=True, distance_matrix must have zeros on diagonal."
)
else:
if reference_dist_knn is None:
raise ValueError(
"If is_pdist=False, 'reference_dist_knn' (=None) must be provided."
)
if not isinstance(reference_dist_knn, np.ndarray):
raise TypeError("cdist_reference_k_nn must be of type numpy.ndarray")
if reference_dist_knn.ndim != 1:
raise ValueError("cdist_reference_k_nn must be 1 dim.")
if reference_dist_knn.shape[0] != n_samples_X:
raise ValueError(
f"len(cdist_reference_k_nn)={reference_dist_knn.shape[0]} "
f"must be distance.shape[1]={n_samples_X}"
)
if self.k_neighbor < 1 or self.k_neighbor > n_samples_X - 1:
raise ValueError(
"'n_neighbors' must be in a range between 1 to the number of samples."
)
def eval(
self, distance_matrix, is_pdist=False, reference_dist_knn=None
) -> Tuple[scipy.sparse.csr_matrix, Optional[Dict[Any, np.ndarray]]]:
"""Evaluate kernel on pre-computed distance matrix.
For return values see :meth:`.__call__`.
Parameters
----------
distance_matrix
Pre-computed matrix.
is_pdist
If True, the `distance_matrix` is assumed to be symmetric and with zeros on
the diagonal (self distances). Note, that there are no checks to validate
the distance matrix.
reference_dist_knn
An input is required for a component-wise evaluation of the kernel. This is
the case if the distance matrix is rectangular or non-symmetric (i.e.,
``is_pdist=False``). The required values are returned for a pre-evaluation
of the pair-wise evaluation.
"""
self._validate(
distance_matrix=distance_matrix,
is_pdist=is_pdist,
reference_dist_knn=reference_dist_knn,
)
dist_knn = _kth_nearest_neighbor_dist(distance_matrix, self.k_neighbor)
distance_factors = _symmetric_matrix_division(
distance_matrix,
vec=np.sqrt(dist_knn),
vec_right=np.sqrt(reference_dist_knn)
if reference_dist_knn is not None
else None,
)
if isinstance(distance_factors, np.ndarray):
kernel_matrix = scipy.sparse.csr_matrix(
distance_factors < self.delta, dtype=bool
)
else:
assert isinstance(distance_factors, scipy.sparse.csr_matrix)
distance_factors.data = (distance_factors.data < self.delta).astype(bool)
distance_factors.eliminate_zeros()
kernel_matrix = distance_factors
# return dist_knn, which is required for cdist_k_nearest_neighbor in
# order to do a follow-up cdist request (then as reference_dist_knn as input).
if is_pdist:
ret_cdist: Optional[Dict[str, np.ndarray]] = dict(
reference_dist_knn=dist_knn
)
else:
ret_cdist = None
return kernel_matrix, ret_cdist
class DmapKernelFixed(BaseManifoldKernel):
"""Diffusion map kernel with fixed kernel bandwidth.
This kernel wraps an kernel to describe a diffusion process.
Parameters
----------
internal_kernel
Kernel that describes the proximity between data points.
is_stochastic
If True, the kernel matrix is row-normalized.
alpha
Degree of re-normalization of sampling density in point cloud. `alpha` must be
inside the interval [0, 1] (inclusive).
symmetrize_kernel
If True, performs a conjugate transformation which can improve numerical
stability for matrix operations (such as computing eigenpairs). The matrix to
change the basis back is provided as a quantity of interest (see
possible return values in :meth:`PCManifoldKernel.__call__`).
See Also
--------
:py:class:`DiffusionMaps`
References
----------
:cite:`coifman_diffusion_2006`
"""
def __init__(
self,
internal_kernel: PCManifoldKernel = GaussianKernel(epsilon=1.0),
is_stochastic: bool = True,
alpha: float = 1.0,
symmetrize_kernel: bool = True,
):
self.is_stochastic = is_stochastic
if not (0 <= alpha <= 1):
raise ValueError(f"alpha has to be between [0, 1]. Got alpha={alpha}")
self.alpha = alpha
self.symmetrize_kernel = symmetrize_kernel
self.internal_kernel = internal_kernel
# i) not stochastic -> if the kernel is symmetric the kernel is always
# symmetric
# `symmetrize_kernel` indicates if the user wants the kernel to use
# similarity transformations to solve the
# eigenproblem on a symmetric kernel (if required).
# NOTE: a necessary condition to symmetrize the kernel is that the kernel
# is evaluated pairwise
# (i.e. is_pdist = True)
# self._is_symmetric = True
# else:
# self._is_symmetric = False
self.row_sums_init = None
super(DmapKernelFixed, self).__init__()
@property
def is_symmetric(self):
return self.symmetrize_kernel or not self.is_stochastic
def is_symmetric_transform(self) -> bool:
"""Indicates whether a symmetric conjugate kernel matrix was computed.
Returns
-------
"""
# If the kernel is made stochastic, it looses the symmetry, if symmetric_kernel
# is set to True, then apply the the symmetry transformation
return self.is_stochastic and self.is_symmetric
def _normalize_sampling_density(
self,
kernel_matrix: Union[np.ndarray, scipy.sparse.csr_matrix],
row_sums_alpha_fit: np.ndarray,
) -> Tuple[Union[np.ndarray, scipy.sparse.csr_matrix], Optional[np.ndarray]]:
"""Normalize (sparse/dense) kernels with positive `alpha` value. This is also
referred to a 'renormalization' of sampling density."""
if row_sums_alpha_fit is None:
assert is_symmetric_matrix(kernel_matrix)
else:
assert row_sums_alpha_fit.shape[0] == kernel_matrix.shape[1]
row_sums = kernel_matrix.sum(axis=1)
if scipy.sparse.issparse(kernel_matrix):
# np.matrix to np.ndarray
# (np.matrix is deprecated but still used in scipy.sparse)
row_sums = row_sums.A1
if self.alpha < 1:
if row_sums.dtype.kind != "f":
# This is required for case when 'row_sums' contains boolean or integer
# values; for inplace operations the type has to be the same
row_sums = row_sums.astype(float)
row_sums_alpha = np.power(row_sums, self.alpha, out=row_sums)
else: # no need to power with 1
row_sums_alpha = row_sums
normalized_kernel = _symmetric_matrix_division(
matrix=kernel_matrix,
vec=row_sums_alpha,
vec_right=row_sums_alpha_fit,
)
if row_sums_alpha_fit is not None:
# Set row_sums_alpha to None for security, because in a cdist-case (if
# row_sums_alpha_fit) there is no need to further process row_sums_alpha, yet.
row_sums_alpha = None
return normalized_kernel, row_sums_alpha
def _normalize(
self,
internal_kernel: KernelType,
row_sums_alpha_fit: np.ndarray,
is_pdist: bool,
):
# only required for symmetric kernel, return None if not used
basis_change_matrix = None
# required if alpha>0 and _normalize is called later for a cdist case
# set in the pdist, alpha > 0 case
row_sums_alpha = None
if self.is_stochastic:
if self.alpha > 0:
# if pdist: kernel is still symmetric after this function call
(internal_kernel, row_sums_alpha,) = self._normalize_sampling_density(
internal_kernel, row_sums_alpha_fit
)
if is_pdist and self.is_symmetric_transform():
# Increases numerical stability when solving the eigenproblem
# Note1: when using the (symmetric) conjugate matrix, the eigenvectors
# have to be transformed back to match the original
# Note2: the similarity transform only works for the is_pdist case
# (for cdist, there is no symmetric kernel in the first place,
# because it is generally rectangular and does not include self
# points)
(
internal_kernel,
basis_change_matrix,
) = _conjugate_stochastic_kernel_matrix(internal_kernel)
else:
internal_kernel = _stochastic_kernel_matrix(internal_kernel)
# check that if "is symmetric pdist" -> require basis change
# else no basis change
assert not (
(is_pdist and self.is_symmetric_transform())
^ (basis_change_matrix is not None)
)
if is_pdist and self.is_symmetric:
assert is_symmetric_matrix(internal_kernel)
return internal_kernel, basis_change_matrix, row_sums_alpha
def _validate_row_alpha_fit(self, is_pdist, row_sums_alpha_fit):
if (
self.is_stochastic
and self.alpha > 0
and not is_pdist
and row_sums_alpha_fit is None
):
raise ValueError(
"cdist request can not be carried out, if 'row_sums_alpha_fit=None'"
"Please consider to report bug."
)
def _eval(self, kernel_output, is_pdist, row_sums_alpha_fit):
self._validate_row_alpha_fit(
is_pdist=is_pdist, row_sums_alpha_fit=row_sums_alpha_fit
)
kernel_matrix, internal_ret_cdist, _ = PCManifoldKernel.read_kernel_output(
kernel_output=kernel_output
)
if isinstance(kernel_matrix, pd.DataFrame):
# store indices and cast to same type later
_type = type(kernel_matrix)
rows_idx, columns_idx = kernel_matrix.index, kernel_matrix.columns
kernel_matrix = kernel_matrix.to_numpy()
else:
_type, rows_idx, columns_idx = None, None, None
kernel_matrix, basis_change_matrix, row_sums_alpha = self._normalize(
kernel_matrix,
row_sums_alpha_fit=row_sums_alpha_fit,
is_pdist=is_pdist,
)
if rows_idx is not None and columns_idx is not None:
kernel_matrix = _type(kernel_matrix, index=rows_idx, columns=columns_idx)
if is_pdist:
ret_cdist = dict(
row_sums_alpha_fit=row_sums_alpha,
internal_kernel_kwargs=internal_ret_cdist,
)
ret_extra = dict(basis_change_matrix=basis_change_matrix)
else:
# no need for row_sums_alpha or the basis change matrix in the cdist case
ret_cdist = None
ret_extra = None
return kernel_matrix, ret_cdist, ret_extra
def __call__(
self,
X: np.ndarray,
Y: Optional[np.ndarray] = None,
*,
dist_kwargs: Optional[Dict] = None,
**kernel_kwargs,
) -> Tuple[
Union[np.ndarray, scipy.sparse.csr_matrix], Optional[Dict], Optional[Dict]
]:
"""Compute the diffusion map kernel.
Parameters
----------
X
Reference point cloud of shape `(n_samples_X, n_features_X)`.
Y
Query point cloud of shape `(n_samples_Y, n_features_Y)`. If not given,
then `Y=X`.
dist_kwargs
Keyword arguments passed to the internal distance matrix computation. See
:py:meth:`datafold.pcfold.compute_distance_matrix` for parameter arguments.
**kernel_kwargs: Dict[str, object]
- internal_kernel_kwargs: Optional[Dict]
Keyword arguments passed to the set internal kernel.
- row_sums_alpha_fit: Optional[np.ndarray]
Row sum values during re-normalization computed during pair-wise kernel
computation. The parameter is mandatory for the compontent-wise kernel
computation and if `alpha>0`.
Returns
-------
numpy.ndarray`, `scipy.sparse.csr_matrix`
kernel matrix (or conjugate of it) with same type and shape as
`distance_matrix`
Optional[Dict[str, numpy.ndarray]]
Row sums from re-normalization in key 'row_sums_alpha_fit', only returned for
pairwise computations. The values are required for follow up out-of-sample
kernel evaluations (`Y is not None`).
Optional[Dict[str, scipy.sparse.dia_matrix]]
Basis change matrix (sparse diagonal) if `is_symmetrize=True` and only
returned if the kernel matrix is a symmetric conjugate of the true
diffusion kernel matrix. Required to recover the diffusion map eigenvectors
from the symmetric conjugate matrix.
"""
is_pdist = Y is None
internal_kernel_kwargs, row_sums_alpha_fit = self._read_kernel_kwargs(
attrs=["internal_kernel_kwargs", "row_sums_alpha_fit"],
kernel_kwargs=kernel_kwargs,
)
kernel_output = self.internal_kernel(
X, Y=Y, dist_kwargs=dist_kwargs or {}, **internal_kernel_kwargs or {}
)
return self._eval(
kernel_output=kernel_output,
is_pdist=is_pdist,
row_sums_alpha_fit=row_sums_alpha_fit,
)
def eval(
self,
distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix],
is_pdist=False,
row_sums_alpha_fit=None,
):
"""Evaluate kernel on pre-computed distance matrix.
For return values see :meth:`.__call__`.
Parameters
----------
distance_matrix
Matrix of shape `(n_samples_Y, n_samples_X)`.
is_pdist:
If True, the distance matrix must be square
Returns
-------
"""
kernel_output = self.internal_kernel.eval(distance_matrix)
return self._eval(
kernel_output=kernel_output,
is_pdist=is_pdist,
row_sums_alpha_fit=row_sums_alpha_fit,
)
class ConeKernel(TSCManifoldKernel):
r"""Compute a dynamics adapted cone kernel for time series collection data.
The equations below describe the kernel evaluation and are taken from the referenced
paper below.
A single kernel evaluation between samples :math:`x` and :math:`y` is computed with
.. math::
K(x, y) = \exp
\left(
-\frac{\vert\vert \omega_{ij}\vert\vert^2}
{\varepsilon \delta t^2 \vert\vert \xi_i \vert\vert \vert\vert \xi_j \vert\vert }
\left[ (1-\zeta \cos^2 \theta_i)(1-\zeta \cos^2 \theta_j) \right]^{0.5}
\right)
where,
.. math::
\cos \theta_i =
\frac{(\xi_i, \omega_{ij})}
{\vert\vert \xi_i \vert\vert \vert\vert \omega_{ij} \vert\vert}
is the angle between samples,
.. math::
\omega_{ij} = y - x
is a difference vector between the point pairs,
.. math::
\delta t
is the (constant) time sampling in the time series,
.. math::
\varepsilon
is an additional scaling parameter of the kernel bandwidth,
.. math::
\zeta
is the parameter to control the angular influence, and
.. math::
\xi_i = \delta_p x_i = \sum_{j=-p/2}^{p/2} w_j x_{i+j}
is the approximation of the dynamical vector field. The approximation is carried
out with :math:`\delta_p`, a :math:`p`-th order accurate central finite difference
(in a sense that :math:`\frac{\xi}{\delta t} + \mathcal{O}(\delta t^p)`) with
associated weights :math:`w`.
.. note::
In the centered finite difference the time values are shifted such that no
samples are taken from the future. For exmaple, for the scheme
:math:`x_{t+1} - x_{t-1}`, at time :math:`t`, then the new assigned time value
is `t+1`. See also :py:meth:`.TSCAccessor.time_derivative`.
Parameters
----------
zeta
A scalar between :math:`[0, 1)` that controls the angular influence . The
weight from one point to a neighboring point is increased if the relative
displacement vector is aligned with the dynamical flow. The special case of
`zeta=0`, corresponds to the so-called "Non-Linear Laplacian Spectral
Analysis" kernel (NLSA).
epsilon
An additional scaling parameter with which the kernel scale can be adapted to
the actual time sampling frequency.
fd_accuracy
The accuracy of the centered finite difference scheme (:math:`p`
in the description). Note, that the higher the order the more smaples are
required in a warm-up phase, where the centered scheme cannot be evaluated with
the given accuracy. All samples from this warm-up phase are dropped in the
kernel evaluation.
References
----------
:cite:`giannakis_dynamics-adapted_2015` (the equations are taken from the
`arXiv version <https://arxiv.org/abs/1403.0361>`__)
"""
def __init__(self, zeta: float = 0.0, epsilon: float = 1.0, fd_accuracy: int = 4):
self.zeta = zeta
self.epsilon = epsilon
self.fd_accuracy = fd_accuracy
def _validate_setting(self, X, Y):
# cannot import in top of file, because this creates circular imports
from datafold.pcfold.timeseries.collection import TSCDataFrame, TSCException
check_scalar(
self.zeta,
name="zeta",
target_type=(float, np.floating, int, np.integer),
min_val=0.0,
max_val=1.0 - np.finfo(float).eps,
)
check_scalar(
self.epsilon,
name="epsilon",
target_type=(float, np.floating, int, np.integer),
min_val=np.finfo(float).eps,
max_val=None,
)
check_scalar(
self.fd_accuracy,
"fd_accuracy",
target_type=(int, np.integer),
min_val=1,
max_val=None,
)
# make sure to only deal with Python built-in types
self.zeta = float(self.zeta)
self.epsilon = float(self.epsilon)
self.fd_accuracy = int(self.fd_accuracy)
if not isinstance(X, TSCDataFrame):
raise TypeError(f"X must be a TSCDataFrame (got: {type(X)})")
if Y is not None and not isinstance(Y, TSCDataFrame):
raise TypeError(f"Y must be a TSCDataFrame (got: {type(X)}")
if Y is not None:
is_df_same_index(X, Y, check_index=False, check_column=True, handle="raise")
# checks that if scalar, if yes returns delta_time
if Y is None:
X_dt = X.tsc.check_const_time_delta()
else:
X_dt, _ = TSCAccessor.check_equal_delta_time(
X,
Y,
atol=1e-15,
require_const=True,
)
# return here to not compute delta_time again
return X_dt
def _compute_distance_and_cosinus_matrix(
self,
Y_numpy: np.ndarray,
timederiv_Y: np.ndarray,
norm_timederiv_Y: np.ndarray,
X_numpy: Optional[np.ndarray] = None,
distance_matrix=None,
):
if X_numpy is None:
X_numpy = Y_numpy
is_compute_distance = distance_matrix is None
# pre-allocate cosine- and distance-matrix
cos_matrix = np.zeros((Y_numpy.shape[0], X_numpy.shape[0]))
if is_compute_distance:
distance_matrix = np.zeros_like(cos_matrix)
# define names and init as None to already to use in "out"
diff_matrix, denominator, zero_mask = [None] * 3
for row_idx in range(cos_matrix.shape[0]):
diff_matrix = np.subtract(X_numpy, Y_numpy[row_idx, :], out=diff_matrix)
# distance matrix is not computed via "compute_distance_matrix" function
if is_compute_distance:
distance_matrix[row_idx, :] = scipy.linalg.norm(
diff_matrix, axis=1, check_finite=False
)
# norm of time_derivative * norm_difference
denominator = np.multiply(
norm_timederiv_Y[row_idx], distance_matrix[row_idx, :], out=denominator
)
# nominator: scalar product (time_derivative, differences)
# in paper: (\xi, \omega)
cos_matrix[row_idx, :] = np.dot(
timederiv_Y[row_idx, :],
diff_matrix.T,
out=cos_matrix[row_idx, :],
)
# special handling of (almost) duplicates -> denominator by zero leads to nan
zero_mask = np.less_equal(denominator, 1e-14, out=zero_mask)
cos_matrix[row_idx, zero_mask] = 0.0 # -> np.cos(0) = 1 later
cos_matrix[row_idx, ~zero_mask] /= denominator[~zero_mask]
# memory and cache efficient solving with no intermediate memory allocations:
# cos_matrix = 1 - self.zeta * np.square(np.cos(cos_matrix))
cos_matrix = np.cos(cos_matrix, out=cos_matrix)
cos_matrix = np.square(cos_matrix, out=cos_matrix)
cos_matrix = np.multiply(self.zeta, cos_matrix, out=cos_matrix)
cos_matrix = np.subtract(1.0, cos_matrix, out=cos_matrix)
if not np.isfinite(cos_matrix).all():
raise ValueError("not all finite")
return cos_matrix, distance_matrix
def _approx_dynflow(self, X):
timederiv = X.tsc.time_derivative(
scheme="center", diff_order=1, accuracy=self.fd_accuracy, shift_index=True
)
norm_timederiv = df_type_and_indices_from(
timederiv,
values=np.linalg.norm(timederiv, axis=1),
except_columns=["fd_norm"],
)
return timederiv, norm_timederiv
def __call__(
self,
X: pd.DataFrame,
Y: Optional[pd.DataFrame] = None,
*,
dist_kwargs: Optional[Dict[str, object]] = None,
**kernel_kwargs,
):
"""Compute kernel matrix.
Parameters
----------
X
The reference time series collection of shape `(n_samples_X, n_features_X)`.
Y
The query time series collection of shape `(n_samples_Y, n_features_Y)`. If
`Y` is not provided, then ``Y=X``.
dist_kwargs
ignored `(The distance matrix is computed as part of the kernel evaluation.
For now this can only be a dense matrix).`
**kernel_kwargs: Dict[str, object]
- timederiv_X
The time derivative from a finite difference scheme. Required for a
component-wise evaluation.
- norm_timederiv_X
Norm of the time derivative. Required for a component-wise evaluation.
Returns
-------
TSCDataFrame
The kernel matrix with time information.
"""
delta_time = self._validate_setting(X, Y)
timederiv_X, norm_timederiv_X = self._read_kernel_kwargs(
attrs=["timederiv_X", "norm_timederiv_X"], kernel_kwargs=kernel_kwargs
)
is_pdist = Y is None
if is_pdist:
timederiv_X, norm_timederiv_X = self._approx_dynflow(X=X)
else:
if timederiv_X is None or norm_timederiv_X is None:
raise ValueError(
"For component wise computation the parameters 'timederiv_X' "
"and 'norm_timederiv_X' must be provided. "
)
# NOTE: samples are dropped here which are at the time series boundaries. How
# many, depends on the accuracy level of the time derivative.
X_numpy = X.loc[timederiv_X.index].to_numpy()
if is_pdist:
timederiv_Y = timederiv_X
if self.zeta != 0.0:
(
cos_matrix_Y_X,
distance_matrix,
) = self._compute_distance_and_cosinus_matrix(
Y_numpy=X_numpy, # query (Y) = reference (X)
timederiv_Y=timederiv_X.to_numpy(),
norm_timederiv_Y=norm_timederiv_X.to_numpy(),
)
cos_matrix = np.multiply(
cos_matrix_Y_X, cos_matrix_Y_X.T, out=cos_matrix_Y_X
)
cos_matrix = np.sqrt(cos_matrix, out=cos_matrix)
# squared Euclidean metric
distance_matrix = np.square(distance_matrix, out=distance_matrix)
else:
distance_matrix = compute_distance_matrix(X_numpy, metric="sqeuclidean")
cos_matrix = np.ones((X_numpy.shape[0], X_numpy.shape[0]))
factor_matrix = _symmetric_matrix_division(
cos_matrix,
vec=norm_timederiv_X.to_numpy().ravel(),
vec_right=None,
scalar=(delta_time ** 2) * self.epsilon,
value_zero_division=0,
)
else:
assert isinstance(Y, pd.DataFrame) # mypy
timederiv_Y, norm_timederiv_Y = self._approx_dynflow(X=Y)
Y_numpy = Y.loc[timederiv_Y.index].to_numpy()
if self.zeta != 0.0:
(
cos_matrix_Y_X,
distance_matrix,
) = self._compute_distance_and_cosinus_matrix(
Y_numpy=Y_numpy,
timederiv_Y=timederiv_Y.to_numpy(),
norm_timederiv_Y=norm_timederiv_Y.to_numpy(),
X_numpy=X_numpy,
)
# because of the time derivative cos_matrix is not symmetric between
# reference / query set
# cos_matrix(i,j) != cos_matrix.T(j,i)
# --> compute from the other way
cos_matrix_X_Y, _ = self._compute_distance_and_cosinus_matrix(
X_numpy,
timederiv_Y=timederiv_X.to_numpy(),
norm_timederiv_Y=norm_timederiv_X.to_numpy(),
X_numpy=Y_numpy,
distance_matrix=distance_matrix.T,
)
cos_matrix = np.multiply(
cos_matrix_Y_X, cos_matrix_X_Y.T, out=cos_matrix_Y_X
)
cos_matrix = np.sqrt(cos_matrix, out=cos_matrix)
# squared Euclidean metric
distance_matrix = np.square(distance_matrix, out=distance_matrix)
else:
distance_matrix = compute_distance_matrix(
X_numpy, Y_numpy, metric="sqeuclidean"
)
cos_matrix = np.ones((Y_numpy.shape[0], X_numpy.shape[0]))
factor_matrix = _symmetric_matrix_division(
cos_matrix,
vec=norm_timederiv_Y.to_numpy().ravel(),
vec_right=norm_timederiv_X.to_numpy().ravel(),
scalar=(delta_time ** 2) * self.epsilon,
value_zero_division=0,
)
assert np.isfinite(factor_matrix).all()
kernel_matrix = _apply_kernel_function_numexpr(
distance_matrix=distance_matrix,
expr="exp(-1.0 * D * factor_matrix)",
expr_dict={"factor_matrix": factor_matrix},
)
kernel_matrix = df_type_and_indices_from(
indices_from=timederiv_Y,
values=kernel_matrix,
except_columns=[f"X{i}" for i in np.arange(X_numpy.shape[0])],
)
if is_pdist:
ret_cdist: Optional[Dict[str, Any]] = dict(
timederiv_X=timederiv_X, norm_timederiv_X=norm_timederiv_X
)
else:
ret_cdist = None
return kernel_matrix, ret_cdist
class DmapKernelVariable(BaseManifoldKernel): # pragma: no cover
"""Diffusion maps kernel with variable kernel bandwidth.
.. warning::
This class is not documented. Contributions are welcome
* documentation
* unit- or functional-testing
References
----------
:cite:`berry_nonparametric_2015`
:cite:`berry_variable_2016`
See Also
--------
:py:class:`DiffusionMapsVariable`
"""
def __init__(self, epsilon, k, expected_dim, beta, symmetrize_kernel):
if expected_dim <= 0 and not is_integer(expected_dim):
raise ValueError("expected_dim has to be a non-negative integer.")
if epsilon < 0 and not not is_float(expected_dim):
raise ValueError("epsilon has to be positive float.")
if k <= 0 and not is_integer(expected_dim):
raise ValueError("k has to be a non-negative integer.")
self.beta = beta
self.epsilon = epsilon
self.k = k
self.expected_dim = expected_dim # variable 'd' in paper
if symmetrize_kernel: # allows to later on include a stochastic option...
self.is_symmetric = True
else:
self.is_symmetric = False
self.alpha = -self.expected_dim / 4
c2 = (
1 / 2
- 2 * self.alpha
+ 2 * self.expected_dim * self.alpha
+ self.expected_dim * self.beta / 2
+ self.beta
)
if c2 >= 0:
raise ValueError(
"Theory requires c2 to be negative:\n"
"c2 = 1/2 - 2 * alpha + 2 * expected_dim * alpha + expected_dim * "
f"beta/2 + beta \n but is {c2}"
)
def is_symmetric_transform(self, is_pdist):
# If the kernel is made stochastic, it looses the symmetry, if symmetric_kernel
# is set to True, then apply the the symmetry transformation
return is_pdist and self.is_symmetric
def _compute_rho0(self, distance_matrix):
"""Ad hoc bandwidth function."""
nr_samples = distance_matrix.shape[1]
# both modes are equivalent, MODE=1 allows to easier compare with ref3.
MODE = 1
if MODE == 1: # according to Berry code
# keep only nearest neighbors
distance_matrix = np.sort(np.sqrt(distance_matrix), axis=1)[
:, 1 : self.k + 1
]
rho0 = np.sqrt(np.mean(distance_matrix ** 2, axis=1))
else: # MODE == 2: , more performant if required
if self.k < nr_samples:
# TODO: have to revert setting the inf
# -> check that the diagonal is all zeros
# -> set to inf
# -> after computation set all infs back to zero
# -> this is also very similar to continous-nn in PCManifold
# this allows to ignore the trivial distance=0 to itself
np.fill_diagonal(distance_matrix, np.inf)
# more efficient than sorting, everything
distance_matrix.partition(self.k, axis=1)
distance_matrix = np.sort(distance_matrix[:, : self.k], axis=1)
distance_matrix = distance_matrix * distance_matrix
else: # self.k == self.N
np.fill_diagonal(distance_matrix, np.nan)
bool_mask = ~np.diag(np.ones(nr_samples)).astype(bool)
distance_matrix = distance_matrix[bool_mask].reshape(
distance_matrix.shape[0], distance_matrix.shape[1] - 1
)
# experimental: --------------------------------------------------------------
# paper: in var-bw paper (ref2) pdfp. 7
# it is mentioned to IGNORE non-zero entries -- this is not detailed more.
# a consequence is that the NN and kernel looses symmetry, so does (K+K^T) / 2
# This is with a cut-off rate:
# val = 1E-2
# distance_matrix[distance_matrix < val] = np.nan
# experimental END -----------------------------------------------------------
# nanmean only for the experimental part, if leaving this out, np.mean
# suffices
rho0 = np.sqrt(np.nanmean(distance_matrix, axis=1))
return rho0
def _compute_q0(self, distance_matrix, rho0):
"""The sampling density."""
meanrho0 = np.mean(rho0)
rho0tilde = rho0 / meanrho0
# TODO: eps0 could also be optimized (see Berry Code + paper ref2)
eps0 = meanrho0 ** 2
expon_matrix = _symmetric_matrix_division(
matrix=-distance_matrix, vec=rho0tilde, scalar=2 * eps0
)
nr_samples = distance_matrix.shape[0]
# according to eq. (10) in ref1
q0 = (
np.power(2 * np.pi * eps0, -self.expected_dim / 2)
/ (np.power(rho0, self.expected_dim) * nr_samples)
* np.sum(np.exp(expon_matrix), axis=1)
)
return q0
def _compute_rho(self, q0):
"""The bandwidth function for K_eps_s"""
rho = np.power(q0, self.beta)
# Division by rho-mean is not in papers, but in berry code (ref3)
return rho / np.mean(rho)
def _compute_kernel_eps_s(self, distance_matrix, rho):
expon_matrix = _symmetric_matrix_division(
matrix=distance_matrix, vec=rho, scalar=-4 * self.epsilon
)
kernel_eps_s = np.exp(expon_matrix, out=expon_matrix)
return kernel_eps_s
def _compute_q_eps_s(self, kernel_eps_s, rho):
rho_power_dim = np.power(rho, self.expected_dim)[:, np.newaxis]
q_eps_s = np.sum(kernel_eps_s / rho_power_dim, axis=1)
return q_eps_s
def _compute_kernel_eps_alpha_s(self, kernel_eps_s, q_eps_s):
kernel_eps_alpha_s = _symmetric_matrix_division(
matrix=kernel_eps_s, vec=np.power(q_eps_s, self.alpha)
)
return kernel_eps_alpha_s
def _compute_matrix_l(self, kernel_eps_alpha_s, rho):
rhosq = np.square(rho)[:, np.newaxis]
n_samples = rho.shape[0]
matrix_l = (kernel_eps_alpha_s - np.eye(n_samples)) / (self.epsilon * rhosq)
return matrix_l
def _compute_matrix_s_inv(self, rho, q_eps_alpha_s):
s_diag = np.reciprocal(rho * np.sqrt(q_eps_alpha_s))
return scipy.sparse.diags(s_diag)
def _compute_matrix_l_conjugate(self, kernel_eps_alpha_s, rho, q_eps_alpha_s):
basis_change_matrix = self._compute_matrix_s_inv(rho, q_eps_alpha_s)
p_sq_inv = scipy.sparse.diags(np.reciprocal(np.square(rho)))
matrix_l_hat = (
basis_change_matrix @ kernel_eps_alpha_s @ basis_change_matrix
- (p_sq_inv - scipy.sparse.diags( | np.ones(kernel_eps_alpha_s.shape[0]) | numpy.ones |
import unittest
import sam
from math import log, sqrt
import numpy as np
from scipy.stats import multivariate_normal
from scipy.special import logit
def logProb1(x, gradient, getGradient):
if getGradient:
gradient[0] = sam.gammaDLDX(x[0], 20, 40)
gradient[1] = sam.normalDLDX(x[1], 5, 1)
return sam.gammaLogPDF(x[0], 20, 40) + sam.normalLogPDF(x[1], 5, 1)
def logProb2(x):
return sam.betaLogPDF(x[0], 15, 20)
def logProb3(x, gradient, getGradient):
assert not getGradient
return sam.betaLogPDF(x[0], 20, 40) + sam.normalLogPDF(x[1], 5, 1)
_logProb4_ = multivariate_normal(cov=[[1., .3], [.3, 1]]).logpdf
def logProb4(x):
return _logProb4_(x)
def raisesLogProb(x):
if x > np.inf:
raise ValueError("x can never be good enough!")
return -1
class SamTester(unittest.TestCase):
def testErrorHandling(self):
a = sam.Sam(raisesLogProb, [.5, .5], [0., -np.inf])
self.assertIsNone(a.results)
self.assertIsNone(a.samples)
self.assertRaises(AssertionError, a.getStats)
self.assertRaises(AssertionError, a.summary)
self.assertRaises(ValueError, a.run, 1000, [.5, .5])
self.assertRaises(AttributeError, a.gradientDescent, [.5, .5])
self.assertRaises(ValueError, a.simulatedAnnealing, [.5, .5])
self.assertRaises(AssertionError, a.getSampler, 2)
self.assertRaises(OverflowError, a.getSampler, -3)
self.assertRaises(ValueError, sam.normalCDF, 1, 0, -1)
def testModelSelection(self):
# This is a roundabout way to test them, but it does work
def rightModel(x):
return sam.normalLogPDF(x, 0, 1.)
def wrongModel(x):
return sam.normalLogPDF(x, 0, 2.)
def flatPrior(x):
return 0.
a = sam.Sam(rightModel, .5)
a.run(100000, .5, showProgress=False)
b = sam.Sam(wrongModel, .5)
b.run(100000, .5, showProgress=False)
assert not any(np.isnan(a.resultsLogProb))
assert not any(np.isnan(b.resultsLogProb))
# DIC
right = a.getDIC(flatPrior)
wrong = b.getDIC(flatPrior)
self.assertLessEqual(right, wrong)
self.assertAlmostEqual(right, 3., delta=.2)
self.assertAlmostEqual(wrong, 4.4, delta=.2)
# AIC
right = a.getAIC(flatPrior)
wrong = b.getAIC(flatPrior)
self.assertLessEqual(right, wrong)
self.assertAlmostEqual(right, 3.837, delta=.01)
self.assertAlmostEqual(wrong, 5.224, delta=.01)
# BIC
right = a.getBIC(flatPrior, 1000)
wrong = b.getBIC(flatPrior, 1000)
self.assertLessEqual(right, wrong)
self.assertAlmostEqual(right, 8.74, delta=.01)
self.assertAlmostEqual(wrong, 10.13, delta=.01)
return
def testACF(self):
x = [np.pi]
for i in range(10000):
x.append(np.pi + .9*x[-1] + sam.normalRand())
sampleACF = sam.acf(x, 30)
theoryACF = .9**np.arange(30)
self.assertTrue(np.allclose(sampleACF, theoryACF, .1, .1))
return
def testLogit(self):
x = [.234124, 1.-1e-13, 1e-13]
self.assertAlmostEqual(sam.logit(x[0]), logit(x[0]), 13)
self.assertAlmostEqual(sam.logit(x[1]), logit(x[1]), 13)
self.assertAlmostEqual(sam.logit(x[2]), logit(x[2]), 13)
return
def testGaussianProcess(self):
x = np.linspace(0, 10, 100)
y = np.sin(x)
y2 = np.cos(x)
f = sam.GaussianProcess(x, y, 'exp')
loglike = f.logLikelihood(np.array([10, .5, 0]))
gpMean, gpVar = f.predict(np.array([5.]))
gpVar = np.sqrt(np.diag(gpVar))
with self.assertRaises(ValueError):
f.gradient(3.5)
self.assertAlmostEqual(gpMean[0], -0.957698488, delta=.01)
self.assertAlmostEqual(gpVar[0], 0.0502516, delta=.01)
self.assertAlmostEqual(loglike, 109.90324, delta=.01)
f.setY(y2)
gpMean = f.predict(np.array([5.]), False)
self.assertAlmostEqual(gpMean[0], np.cos(5.), delta=.01)
def testGaussianProcess2D(self):
x = np.linspace(0, 1, 400).reshape(200, 2)
z = np.sin(np.sum(x, axis=-1))
f = sam.GaussianProcess(x, z, 'matern32')
loglike = f.logLikelihood(np.array([1, .5, 0]))
gpMean, gpVar = f.predict([[.5, .5]])
gpVar = np.sqrt(np.diag(gpVar))
grad = f.gradient([.5, .5])
self.assertAlmostEqual(grad[0], 0.537, delta=.01)
self.assertAlmostEqual(grad[1], 0.542, delta=.01)
self.assertAlmostEqual(loglike, 1107.363, delta=.01)
self.assertAlmostEqual(gpMean[0], 0.841, delta=.01)
self.assertAlmostEqual(gpVar[0], 0.00217, delta=.01)
def test1DMetropolis(self):
a = sam.Sam(logProb2, .5, 0., 1.)
samples = a.run(100000, 1, showProgress=False)
self.assertGreaterEqual(a.getAcceptance()[0], 0.)
self.assertLessEqual(a.getAcceptance()[0], 1.)
self.assertTrue((samples >= 0).all())
self.assertTrue((samples <= 1).all())
self.assertAlmostEqual(samples.mean(), sam.betaMean(15, 20), delta=.01)
self.assertAlmostEqual(samples.std(), sam.betaStd(15, 20), delta=.01)
def testSummary(self):
a = sam.Sam(logProb2, .5, 0., 1.)
with self.assertRaises(AssertionError):
a.summary()
a.run(100000, .5, showProgress=False)
self.assertGreaterEqual(len(a.summary(None, True)), 0)
def testGetCovar(self):
a = sam.Sam(logProb4, np.ones(2))
a.addMetropolis()
c = a.getProposalCov()
for i, j in zip(c.flatten(), [1, 0., 0., 1]):
self.assertAlmostEqual(i, j)
a.clearSamplers()
a.addMetropolis(np.array([[1, .1], [.1, 1.]])/2.)
c = a.getProposalCov(0)
for i, j in zip(c.flatten(), np.array([1, .1, .1, 1])/2.):
self.assertAlmostEqual(i, j)
a.clearSamplers()
a.addHMC(10, .1)
c = a.getProposalCov()
for i, j in zip(c.flatten(), [1, 0., 0., 1]):
self.assertAlmostEqual(i, j)
a.clearSamplers()
a.addAdaptiveMetropolis(np.array([[1, .1], [.1, 1.]])/2.)
c = a.getProposalCov(0)
# The covariance output is the sample covariance, which should be 0
for i, j in zip(c.flatten(), [0, 0, 0, 0.]):
self.assertAlmostEqual(i, j)
def test2DMetropolis(self):
a = sam.Sam(logProb1, [.5, .5], [0., -np.inf])
samples = a.run(100000, [.5, .5], 1000, showProgress=False)
self.assertGreaterEqual(a.getAcceptance()[0], 0.)
self.assertLessEqual(a.getAcceptance()[0], 1.)
self.assertGreaterEqual(a.getAcceptance()[1], 0.)
self.assertLessEqual(a.getAcceptance()[1], 1.)
self.assertTrue((samples[:, 0] >= 0).all())
self.assertAlmostEqual(samples[:, 0].mean(), sam.gammaMean(20, 40), delta=.01)
self.assertAlmostEqual(samples[:, 0].std(), sam.gammaStd(20, 40), delta=.01)
self.assertAlmostEqual(samples[:, 1].mean(), 5., delta=.1)
self.assertAlmostEqual(samples[:, 1].std(), 1., delta=.1)
for i in range(50000):
self.assertAlmostEqual(a.samplesLogProb[i], logProb1(a.samples[i], None, False))
def testThreading(self):
a = sam.Sam(logProb1, [.5, .5], lowerBounds=[0., -np.inf])
samples = a.run(100000, [.5, .5], 1000, threads=5, showProgress=False)
for i in a.getAcceptance():
self.assertGreaterEqual(i[0], 0.)
self.assertLessEqual(i[0], 1.)
self.assertGreaterEqual(i[1], 0.)
self.assertLessEqual(i[1], 1.)
self.assertEqual(len(a.results.shape), 2)
self.assertEqual(a.results.shape[0], 5*100000)
self.assertEqual(a.results.shape[1], 2)
self.assertEqual(len(a.samples.shape), 3)
self.assertEqual(a.samples.shape[0], 5)
self.assertEqual(a.samples.shape[1], 100000)
self.assertEqual(a.samples.shape[2], 2)
self.assertNotEqual(samples[0, -1, -1], samples[1, -1, -1])
samples = np.concatenate([samples[0], samples[1]], axis=1)
self.assertTrue((samples[:, 0] >= 0).all())
self.assertAlmostEqual(samples[:, 0].mean(), sam.gammaMean(20, 40), delta=.01)
self.assertAlmostEqual(samples[:, 0].std(), sam.gammaStd(20, 40), delta=.01)
self.assertAlmostEqual(samples[:, 1].mean(), 5., delta=.1)
self.assertAlmostEqual(samples[:, 1].std(), 1., delta=.1)
for i in range(100000):
for j in range(5):
self.assertAlmostEqual(a.samplesLogProb[j, i],
logProb1(a.samples[j, i], None, False))
def testThreading2(self):
a = sam.Sam(logProb1, [.5, .5], lowerBounds=[0., -np.inf])
samples = a.run(100000, np.random.rand(5, 2), 1000, threads=5, showProgress=False)
for i in a.getAcceptance():
self.assertGreaterEqual(i[0], 0.)
self.assertLessEqual(i[0], 1.)
self.assertGreaterEqual(i[1], 0.)
self.assertLessEqual(i[1], 1.)
with self.assertRaises(AttributeError):
a.samples = np.ones(5)
self.assertEqual(samples.shape[0], 5)
self.assertEqual(samples.shape[1], 100000)
self.assertEqual(samples.shape[2], 2)
self.assertNotEqual(samples[0, -1, -1], samples[1, -1, -1])
samples = np.concatenate([samples[0], samples[1]], axis=1)
self.assertTrue((samples[:, 0] >= 0).all())
self.assertAlmostEqual(samples[:, 0].mean(), sam.gammaMean(20, 40), delta=.01)
self.assertAlmostEqual(samples[:, 0].std(), sam.gammaStd(20, 40), delta=.01)
self.assertAlmostEqual(samples[:, 1].mean(), 5., delta=.1)
self.assertAlmostEqual(samples[:, 1].std(), 1., delta=.1)
for i in range(len(a.resultsLogProb)):
self.assertAlmostEqual(a.resultsLogProb[i], logProb1(a.results[i], None, False))
def test2DHMC(self):
a = sam.Sam(logProb1, [1, 1], lowerBounds=[0., -np.inf])
a.addHMC(10, .1)
samples = a.run(50000, [.5, .5], 10, showProgress=False)
self.assertTrue((samples[:, 0] >= 0).all())
self.assertAlmostEqual(samples[:, 0].mean(), sam.gammaMean(20, 40), delta=.05)
self.assertAlmostEqual(samples[:, 0].std(), sam.gammaStd(20, 40), delta=.05)
self.assertAlmostEqual(samples[:, 1].mean(), 5., delta=.2)
self.assertAlmostEqual(samples[:, 1].std(), 1., delta=.2)
def testCorrelatedMetropolis(self):
a = sam.Sam(logProb4, np.ones(2))
a.addMetropolis(np.array([[1, .1], [.1, 1.]])/2.)
samples = a.run(100000, 5*np.ones(2), 1000, showProgress=False)
self.assertAlmostEqual(samples[:, 0].mean(), 0., delta=.05)
self.assertAlmostEqual(samples[:, 0].std(), 1., delta=.1)
self.assertAlmostEqual(samples[:, 1].mean(), 0., delta=.05)
self.assertAlmostEqual(samples[:, 1].std(), 1., delta=.1)
def testAdaptiveMetropolis(self):
a = sam.Sam(logProb4, np.ones(2))
a.addAdaptiveMetropolis(np.array([[1, .1], [.1, 1.]])/2., scaling=4.)
samples = a.run(50000, 5*np.ones(2), 1000, showProgress=False)
self.assertAlmostEqual(samples[:, 0].mean(), 0., delta=.1)
self.assertAlmostEqual(samples[:, 0].std(), 1., delta=.1)
self.assertAlmostEqual(samples[:, 1].mean(), 0., delta=.1)
self.assertAlmostEqual(samples[:, 1].std(), 1., delta=.1)
def test2DGradientDescent(self):
a = sam.Sam(logProb1, [.5, .5], lowerBounds=[0., -np.inf])
posteriorMax = a.gradientDescent([.5, .5], step=.05)
self.assertAlmostEqual(posteriorMax[0], 19./40., delta=1e-4)
self.assertAlmostEqual(posteriorMax[1], 5., delta=1e-4)
def testRunningStats(self):
a = sam.Sam(logProb3, [.5, .5], lowerBounds=[0., -np.inf], upperBounds=[1., np.inf])
a.addMetropolis()
samples = a.run(100000, [.5, .5], 1000, recordStop=0, collectStats=True, showProgress=False)
self.assertEqual(samples.size, 0)
self.assertAlmostEqual(a.getStats()[0][0], sam.betaMean(20, 40), delta=.01)
self.assertAlmostEqual(a.getStats()[1][0], sam.betaStd(20, 40), delta=.01)
self.assertAlmostEqual(a.getStats()[0][1], 5, delta=.1)
self.assertAlmostEqual(a.getStats()[1][1], 1, delta=.1)
def testExceptionsRaised(self):
a = sam.Sam(None, np.ones(1))
with self.assertRaises(RuntimeError):
a(np.ones(1))
class DistributionTester(unittest.TestCase):
# ===== Special Functions =====
def testSpecialFunctions(self):
self.assertAlmostEqual(sam.incBeta(.8, 3.4, 2.1), .04811402)
self.assertAlmostEqual(sam.beta(.7, 2.5), 0.7118737432)
self.assertAlmostEqual(sam.gamma(2.5), 1.329340388)
self.assertAlmostEqual(sam.digamma(12.5), 2.4851956512)
# ===== Distributions =====
def testNormalDistribution(self):
with self.assertRaises(ValueError):
sam.normalPDF(0, 1, -3)
with self.assertRaises(ValueError):
sam.normalCDF(0., 1., 0.)
with self.assertRaises(ValueError):
sam.normalLogPDF(0, 1, -5.)
self.assertAlmostEqual(sam.normalPDF(1, 3, 4), 0.08801633)
self.assertAlmostEqual(sam.normalMean(2, 4), 2.)
self.assertAlmostEqual(sam.normalVar(2, 4), 16.)
self.assertAlmostEqual(sam.normalStd(2, 4), 4.)
self.assertAlmostEqual(sam.normalLogPDF(1, 3, 4), log(0.08801633))
a = [sam.normalRand(3, 2) for i in range(100000)]
self.assertAlmostEqual(np.mean(a), 3., delta=3*.01)
def testMvNormalDistribution(self):
targetCov = np.random.rand(3, 3)
targetCov = targetCov*targetCov.T/2. + np.eye(3)
a = np.empty((100000, 3))
a = np.array([sam.mvNormalRand(np.array([1., 5., -3.]), targetCov) for i in range(100000)])
self.assertAlmostEqual(np.mean(a[:, 0]), 1., delta=.05)
self.assertAlmostEqual(np.mean(a[:, 1]), 5., delta=.05)
self.assertAlmostEqual(np.mean(a[:, 2]), -3., delta=.05)
for i, c in enumerate(np.cov(a.T, ddof=0).flatten()):
self.assertAlmostEqual(targetCov.flatten()[i], c, delta=.05)
targetChol = np.linalg.cholesky(targetCov)
a = np.array([sam.mvNormalRand(np.array([1., 5., -3.]), targetChol, isChol=True)
for i in range(100000)])
self.assertAlmostEqual(np.mean(a[:, 0]), 1., delta=.05)
self.assertAlmostEqual(np.mean(a[:, 1]), 5., delta=.05)
self.assertAlmostEqual(np.mean(a[:, 2]), -3., delta=.05)
for i, c in enumerate(np.cov(a.T, ddof=0).flatten()):
self.assertAlmostEqual(targetCov.flatten()[i], c, delta=.2)
self.assertAlmostEqual(sam.mvNormalLogPDF(np.ones(3), np.zeros(3), targetCov.copy()),
multivariate_normal.logpdf(np.ones(3), np.zeros(3), targetCov))
self.assertAlmostEqual(sam.mvNormalPDF(np.ones(3), np.zeros(3), targetCov.copy()),
multivariate_normal.pdf(np.ones(3), np.zeros(3), targetCov))
def testUniformDistribution(self):
self.assertAlmostEqual(sam.uniformMean(2, 4), 3.)
self.assertAlmostEqual(sam.uniformVar(2, 4), 4./12.)
self.assertAlmostEqual(sam.uniformStd(2, 4), 2./sqrt(12.))
self.assertAlmostEqual(sam.uniformPDF(3, 2, 4), 0.5)
self.assertAlmostEqual(sam.uniformLogPDF(3, 2, 4), log(0.5))
self.assertAlmostEqual(sam.uniformCDF(2.5, 2, 4), 0.25)
a = [sam.uniformRand(3, 4) for i in range(100000)]
self.assertAlmostEqual(np.mean(a), 3.5, delta=3.5*.01)
def testGammaDistribution(self):
with self.assertRaises(ValueError):
sam.gammaPDF(4., 1, -3)
with self.assertRaises(ValueError):
sam.gammaCDF(2., 0., 1.)
with self.assertRaises(ValueError):
sam.gammaMode(10., -np.inf)
self.assertAlmostEqual(sam.gammaMean(3, 4), .75)
self.assertAlmostEqual(sam.gammaVar(3, 4), 3./16)
self.assertAlmostEqual(sam.gammaStd(3, 4), sqrt(3)/4.)
self.assertAlmostEqual(sam.gammaPDF(1, 3, 4), .586100444)
self.assertAlmostEqual(sam.gammaLogPDF(1, 3, 4), log(.586100444))
self.assertAlmostEqual(sam.gammaCDF(1, 3, 4), 0.7618966944464)
a = [sam.gammaRand(3, 4) for i in range(100000)]
self.assertAlmostEqual(np.mean(a), 3./4, delta=.75*.01)
def testInvGammaDistribution(self):
with self.assertRaises(ValueError):
sam.invGammaPDF(4., 1, -3)
with self.assertRaises(ValueError):
sam.invGammaCDF(2., 0., 1.)
with self.assertRaises(ValueError):
sam.invGammaMode(10., -np.inf)
self.assertAlmostEqual(sam.invGammaMean(3, 4), 2.)
self.assertAlmostEqual(sam.invGammaVar(3, 4), 4.)
self.assertAlmostEqual(sam.invGammaStd(3, 4), 2.)
self.assertAlmostEqual(sam.invGammaPDF(1, 3, 4), .0060843811)
self.assertAlmostEqual(sam.invGammaLogPDF(1, 3, 4), log(.0060843811))
self.assertAlmostEqual(sam.invGammaCDF(1, 3, 4), .002161, delta=.001)
a = [sam.invGammaRand(3, 4) for i in range(100000)]
self.assertAlmostEqual(np.mean(a), 2., delta=2*.01)
def testBetaDistribution(self):
with self.assertRaises(ValueError):
sam.betaPDF(.3, 1, -3)
with self.assertRaises(ValueError):
sam.betaCDF(2., 0., 1.)
with self.assertRaises(ValueError):
sam.betaMode(10., -np.inf)
self.assertAlmostEqual(sam.betaMean(3, 4), 3./7)
self.assertAlmostEqual(sam.betaVar(3, 4), .0306122)
self.assertAlmostEqual(sam.betaStd(3, 4), 0.17496355305)
self.assertAlmostEqual(sam.betaPDF(.5, 3, 4), 1.875)
self.assertAlmostEqual(sam.betaLogPDF(.5, 3, 4), log(1.875))
self.assertAlmostEqual(sam.betaCDF(.5, 3, 4), .65625)
a = [sam.betaRand(3, 4) for i in range(100000)]
self.assertAlmostEqual(np.mean(a), 3./7, delta=3./7.*.01)
def testPoissonDistribution(self):
with self.assertRaises(ValueError):
sam.poissonPDF(3, -1.5)
with self.assertRaises(ValueError):
sam.poissonStd(0.)
with self.assertRaises(ValueError):
sam.betaMode(-1., 3.)
self.assertAlmostEqual(sam.poissonMean(2.4), 2.4)
self.assertAlmostEqual(sam.poissonVar(2.4), 2.4)
self.assertAlmostEqual(sam.poissonStd(2.4), sqrt(2.4))
self.assertAlmostEqual(sam.poissonPDF(3, 2.4), .2090141643)
self.assertAlmostEqual(sam.poissonLogPDF(3, 2.4), log(.2090141643))
self.assertAlmostEqual(sam.poissonCDF(3.2, 2.4), 0.7787229)
a = [sam.poissonRand(3.4) for i in range(100000)]
self.assertAlmostEqual( | np.mean(a) | numpy.mean |
"""
Copyright Declaration (C)
From: https://github.com/leeykang/
Use and modification of information, comment(s) or code provided in this document
is granted if and only if this copyright declaration, located between lines 1 to
9 of this document, is preserved at the top of any document where such
information, comment(s) or code is/are used.
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.stats import poisson
from functools import reduce
from operator import mul
from collections import defaultdict
from mpl_toolkits.mplot3d import Axes3D
from seaborn import heatmap
from matplotlib import cm
from itertools import product
from multiprocessing import Pool
from copy import deepcopy
from time import time
class CarRental:
"""
Provides the definition of the car rental problem.
Parameter(s):
name: Name of the CarRental problem.
max_cars_list: A list containing the maximum number of cars that can be in
each location at any point in time.
transfer_dict: A dictionary containing information about transferring cars
from one location to another. Should be stored in the form of key:
(source location index, destination location index) and value: (maximum
number of cars that can be transfered from the source location to the
destination location, maximum number of cars that can be transfered from the
source location to the destination location for free, cost of transferring a
car from the source location to the destination location.
rental_fee: The cost of renting a vehicle at any location, used as a revenue
for the car rental problem.
add_storage_threshold_list: A list containing the maximum number of cars
that can be stored at each location before additional storage costs are
incurred.
add_storage_fee: The cost of additional storage at any location.
rental_lambda_list: A list containing the expected number of rentals at
each location, based on a Poisson distribution.
return_lambda_list: A list containing the expected number of returns at
each location, based on a Poisson distribution.
discount: The discount rate when considering the subsequent state.
use_multiprocessing: Boolean variable for deciding whether to use
multiprocessing to solve the car rental problem.
num_max_processes (optional, default 8): The maximum number of processes to
use for multiprocessing.
"""
def __init__(self,
name,
max_cars_list,
transfer_dict,
rental_fee,
add_storage_threshold_list,
add_storage_fee,
rental_lambda_list,
return_lambda_list,
discount,
use_multiprocessing,
num_max_processes=8):
# Initialises the car rental problem based on the given parameters.
self.name = name
self.max_cars_list = max_cars_list
self.transfer_dict = transfer_dict
self.rental_fee = rental_fee
self.add_storage_threshold_list = add_storage_threshold_list
self.add_storage_fee = add_storage_fee
self.rental_lambda_list = rental_lambda_list
self.return_lambda_list = return_lambda_list
self.discount = discount
self.use_multiprocessing = use_multiprocessing
self.num_max_processes = num_max_processes
# Computes the number of car rental locations.
self.num_locations = len(max_cars_list)
# Initialises the current available solving methods as a dictionary,
# with key being the method name and value being the specific function
# to call.
self.implemented_solve_methods = {'policy_iteration': self.policy_iteration,
'value_iteration': self.value_iteration}
# Computes values required for solving the car rental problem.
self.__compute_values()
def __compute_values(self):
"""
Computes values required for solving the CarRental problem.
"""
# Initialises the maximum transfer array (maximum number of car
# transfers between two locations), free transfer array (maximum number
# of free car transfers between two locations) and transfer cost array
# (cost of transferring a car from one location to another) with 0.
self.max_transfers_arr = np.zeros((self.num_locations, self.num_locations), int)
self.free_transfers_num_arr = np.zeros((self.num_locations, self.num_locations), int)
self.transfer_cost_arr = | np.zeros((self.num_locations, self.num_locations), int) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import yt
from yt.mods import *
import glob
import matplotlib
import os
from matplotlib import pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import numpy as np
np.set_printoptions(threshold=np.inf)
delta_i=10000
i=0
max_data=90000
j=0
every_nthfile=3
profilefilelist = sorted(glob.glob('plt*'), key=lambda name: int(name[3:]))
number_plt=0
for i in profilefilelist:
number_plt=number_plt+1
average_T=0
average_P=0
Dimension=2
old_time=0
time_difference_x_den=1000.0
time_difference_x_T=1000.0
time_difference_x_P=1000.0
time_difference_x_entropy=1000.0
time_difference_x_rad=1000.0
time_difference_T_P=1000.0
time_difference_x_opacity=1000.0
time_difference_x_Mach=1000.0
j=0
old_time=-time_difference_x_den
max_time=10000.0
for i in profilefilelist:
ds=load(i)
time=float(ds.current_time)
filename_time=str(int(time))
if time>=old_time+time_difference_x_den and time<max_time :
old_time=time
print('Adding lines at t='+filename_time+'s for $\rho$-height plot')
location1=ds.find_max('Temp')
location2=ds.find_min('Temp')
kappa=0.18*1e-9
SB_constant=5.6704*1e-5
ad=ds.all_data()
if Dimension==1:
my_ray = ds.ortho_ray(0,(0,0))
srt=np.argsort(my_ray['x'])
x_coord=np.array(my_ray['x'][srt])/1.e5
elif Dimension==2:
my_ray = ds.ortho_ray(1,(0,0))
srt=np.argsort(my_ray['y'])
x_coord=np.array(my_ray['y'][srt])/1.e5
temp=np.array(my_ray['Temp'][srt])
ent=np.array(my_ray['entropy'][srt])
dens=np.array(my_ray['density'][srt])
my_ray = ds.ortho_ray(1,(0,0))
plt.semilogy(x_coord,dens,label='t='+filename_time+'s')
plt.legend()
plt.ylabel(r'$\rho\/[\mathrm{g}/\mathrm{cm}^{3}]$')
plt.xlabel(r'$y\/[\mathrm{km}]$')
# plt.xscale('linear')
# plt.yscale('linear')
plt.title('Density vs height')
plt.savefig("figure_x_den.png")
j=j+1
print("density plot made")
plt.close()
j=0
old_time=-time_difference_T_P
for i in profilefilelist:
ds=load(i)
time=float(ds.current_time)
filename_time=str(int(time))
kappa=0.18*1e-9
SB_constant=5.6704*1e-5
if Dimension==1:
my_ray = ds.ortho_ray(0,(0,0))
srt=np.argsort(my_ray['x'])
x_coord=np.array(my_ray['x'][srt])/1.e5
elif Dimension==2:
my_ray = ds.ortho_ray(1,(0,0))
srt=np.argsort(my_ray['y'])
x_coord=np.array(my_ray['y'][srt])/1.e5
temp=np.array(my_ray['Temp'][srt])
ent=np.array(my_ray['entropy'][srt])
dens=np.array(my_ray['density'][srt])
press=np.array(my_ray['pressure'][srt])/1.e6
average_T=(average_T*float(j)+temp)/float(j+1)
average_P=(average_P*float(j)+press)/float(j+1)
if j==0 :
temp0=np.array(my_ray['Temp'][srt])
ent0=np.array(my_ray['entropy'][srt])
pressure0=np.array(my_ray['pressure'][srt])
density0=np.array(my_ray['density'][srt])
location1=np.amin(pressure0)/1e10
location2=np.amax(pressure0)
if time>=old_time+time_difference_T_P and time<max_time :
old_time=time
plt.plot(average_P,average_T,label='t='+filename_time+'s')
plt.legend()
print('Adding lines at t='+filename_time+'s')
plt.xlabel(r'$P\/[\mathrm{bar}=10^{6}\mathrm{dyne}/\mathrm{cm}^2$]')
plt.ylabel(r'$T\/[\mathrm{K}]$')
plt.xlim(0.001,2000)
plt.ylim(1000,2000)
plt.xscale('log')
#plt.yscale('log')
plt.title('<T> vs <P> (average up to t)')
plt.savefig("figure_T_P_average.png")
j=j+1
print("T-P plot average made")
plt.close()
j=0
old_time=-time_difference_T_P
for i in profilefilelist:
ds=load(i)
time=float(ds.current_time)
filename_time=str(int(time))
kappa=0.18*1e-9
SB_constant=5.6704*1e-5
if Dimension==1:
my_ray = ds.ortho_ray(0,(0,0))
srt=np.argsort(my_ray['x'])
x_coord=np.array(my_ray['x'][srt])/1.e5
elif Dimension==2:
my_ray = ds.ortho_ray(1,(0,0))
srt=np.argsort(my_ray['y'])
x_coord=np.array(my_ray['y'][srt])/1.e5
temp=np.array(my_ray['Temp'][srt])
ent=np.array(my_ray['entropy'][srt])
dens=np.array(my_ray['density'][srt])
press=np.array(my_ray['pressure'][srt])/1.e6
average_T=(average_T*float(j)+temp)/float(j+1)
average_P=(average_P*float(j)+press)/float(j+1)
if j==0 :
temp0=np.array(my_ray['Temp'][srt])
ent0=np.array(my_ray['entropy'][srt])
pressure0=np.array(my_ray['pressure'][srt])/1.e6
density0=np.array(my_ray['density'][srt])
location1=np.amin(pressure0)/1e10
location2=np.amax(pressure0)/1e10
diff_temp=(average_T-temp0)
diff_pressure=average_P-pressure0
if time>=old_time+time_difference_T_P and time<max_time :
old_time=time
plt.plot(average_P,diff_temp,label='t='+filename_time+'s')
plt.legend()
print('Adding lines at t='+filename_time+'s')
plt.xlabel(r'$P\/[\mathrm{bar}=10^{6}\mathrm{dyne}/\mathrm{cm}^2$]')
plt.ylabel(r'$\Delta T\/[\mathrm{K}]$')
plt.xlim(0.001,2000)
plt.ylim(-50,50)
plt.figtext(0.5,0.3,'$\Delta t_{\mathrm{damp}}=7000$s', fontsize=20)
plt.xscale('log')
#plt.yscale('log')
plt.title('<$\Delta$ T> vs <P> (average up to t)')
plt.savefig("figure_T_P_diff_average.png")
j=j+1
print("T-P difference average plot made")
plt.close()
j=0
old_time=-time_difference_x_P
for i in profilefilelist:
ds=load(i)
time=float(ds.current_time)
filename_time=str(int(time))
if time>=old_time+time_difference_x_P and time<max_time:
old_time=time
print('Adding lines at t='+filename_time+'s for P-height plot')
kappa=0.18*1e-9
SB_constant=5.6704*1e-5
if Dimension==1:
my_ray = ds.ortho_ray(0,(0,0))
srt=np.argsort(my_ray['x'])
x_coord=np.array(my_ray['x'][srt])/1.e5
elif Dimension==2:
my_ray = ds.ortho_ray(1,(0,0))
srt=np.argsort(my_ray['y'])
x_coord=np.array(my_ray['y'][srt])/1.e5
temp=np.array(my_ray['Temp'][srt])
ent=np.array(my_ray['entropy'][srt])
dens=np.array(my_ray['density'][srt])
press=np.array(my_ray['pressure'][srt])/1.e6
if j==0 :
temp0=np.array(my_ray['Temp'][srt])
ent0=np.array(my_ray['entropy'][srt])
pressure0=np.array(my_ray['pressure'][srt])
density0=np.array(my_ray['density'][srt])
location1=np.amin(pressure0)/1e10
location2=np.amax(pressure0)
plt.plot(x_coord,press,label='t='+filename_time+'s')
plt.legend()
plt.ylabel(r'$P\/[\mathrm{bar}=10^{6}\mathrm{dyne}/\mathrm{cm}^2$]')
plt.xlabel(r'$y\/[\mathrm{km}]$')
# plt.xscale('linear')
plt.title('P vs height')
plt.yscale('log')
plt.savefig("figure_x_P.png")
j=j+1
print("P plot made")
plt.close()
j=0
old_time=-time_difference_x_T
for i in profilefilelist:
ds=load(i)
time=float(ds.current_time)
filename_time=str(int(time))
if time>=old_time+time_difference_x_T and time<max_time:
old_time=time
print('Adding lines at t='+filename_time+'s for T-height plot')
location1=ds.find_max('Temp')
location2=ds.find_min('Temp')
kappa=0.18*1e-9
SB_constant=5.6704*1e-5
ad=ds.all_data()
if Dimension==1:
my_ray = ds.ortho_ray(0,(0,0))
srt=np.argsort(my_ray['x'])
x_coord=np.array(my_ray['x'][srt])/1.e5
elif Dimension==2:
my_ray = ds.ortho_ray(1,(0,0))
srt=np.argsort(my_ray['y'])
x_coord=np.array(my_ray['y'][srt])/1.e5
temp=np.array(my_ray['Temp'][srt])
ent=np.array(my_ray['entropy'][srt])
dens=np.array(my_ray['density'][srt])
press=np.array(my_ray['pressure'][srt])/1.e6
plt.semilogy(x_coord,temp,label='t='+filename_time+'s')
plt.legend()
plt.ylabel(r'$T$ [K]')
plt.xlabel(r'$y\/[\mathrm{km}]$')
plt.title('T vs height')
#plt.legend(loc=3)
#plt.yscale('linear')
plt.savefig("figure_x_T.png")
j=j+1
print("T plot made")
plt.close()
j=0
old_time=-time_difference_x_opacity
for i in profilefilelist:
ds=load(i)
time=float(ds.current_time)
filename_time=str(int(time))
if time>=old_time+time_difference_x_opacity and time<max_time:
old_time=time
print('Adding lines at t='+filename_time+'s for $\kappa$-height plot')
location1=ds.find_max('Temp')
location2=ds.find_min('Temp')
kappa=0.18*1e-9
SB_constant=5.6704*1e-5
ad=ds.all_data()
if Dimension==1:
my_ray = ds.ortho_ray(0,(0,0))
srt=np.argsort(my_ray['x'])
x_coord=np.array(my_ray['x'][srt])/1.e5
elif Dimension==2:
my_ray = ds.ortho_ray(1,(0,0))
srt=np.argsort(my_ray['y'])
x_coord=np.array(my_ray['y'][srt])/1.e5
temp=np.array(my_ray['Temp'][srt])
ent=np.array(my_ray['entropy'][srt])
dens=np.array(my_ray['density'][srt])
press=np.array(my_ray['pressure'][srt])/1.e6
opacity=np.array(my_ray['pressure'][srt])*0.18/1.e9
plt.semilogy(x_coord,opacity,label='t='+filename_time+'s')
plt.legend()
plt.ylabel(r'$\kappa$ [$\mathrm{cm}^{2}\mathrm{g}^{-1}$]')
plt.xlabel(r'$y\/[\mathrm{km}]$')
plt.title('Opacity $\kappa$ vs height')
#plt.legend(loc=3)
#plt.yscale('linear')
plt.savefig("figure_x_opacity.png")
j=j+1
print("opacity plot made")
plt.close()
j=0
old_time=-time_difference_x_Mach
for i in profilefilelist:
ds=load(i)
time=float(ds.current_time)
filename_time=str(int(time))
if time>=old_time+time_difference_x_Mach and time<max_time:
old_time=time
print('Adding lines at t='+filename_time+'s for $\mathca{M}$-height plot')
kappa=0.18*1e-9
SB_constant=5.6704*1e-5
if Dimension==1:
my_ray = ds.ortho_ray(0,(0,0))
srt=np.argsort(my_ray['x'])
x_coord=np.array(my_ray['x'][srt])/1.e5
elif Dimension==2:
my_ray = ds.ortho_ray(1,(0,0))
srt=np.argsort(my_ray['y'])
x_coord=np.array(my_ray['y'][srt])/1.e5
temp=np.array(my_ray['Temp'][srt])
ent=np.array(my_ray['entropy'][srt])
dens=np.array(my_ray['density'][srt])
press=np.array(my_ray['pressure'][srt])/1.e6
Mach_number=np.array(my_ray['MachNumber'][srt])
if j==0 :
temp0=np.array(my_ray['Temp'][srt])
ent0=np.array(my_ray['entropy'][srt])
pressure0=np.array(my_ray['pressure'][srt])
density0=np.array(my_ray['density'][srt])
location1=np.amin(pressure0)/1e10
location2=np.amax(pressure0)
plt.plot(press,Mach_number,label='t='+filename_time+'s')
plt.legend()
plt.ylabel(r'Mach number')
plt.xlabel(r'$P\/[\mathrm{bar}]$')
#plt.ylim(location1,location2)
# plt.xscale('linear')
plt.xlim(0.001,2000)
plt.ylim(0,5)
plt.yscale('linear')
plt.xscale('log')
plt.title('Mach number \mathcal{M} vs height')
plt.savefig("figure_P_Mach.png")
j=j+1
print("Mach plot made")
plt.close()
j=0
old_time=-time_difference_x_opacity
for i in profilefilelist:
ds=load(i)
time=float(ds.current_time)
filename_time=str(int(time))
if time>=old_time+time_difference_x_opacity and time<max_time:
old_time=time
print('Adding lines at t='+filename_time+'s for $\kappa$-T plot')
location1=ds.find_max('Temp')
location2=ds.find_min('Temp')
kappa=0.18*1e-9
SB_constant=5.6704*1e-5
ad=ds.all_data()
if Dimension==1:
my_ray = ds.ortho_ray(0,(0,0))
srt=np.argsort(my_ray['x'])
x_coord=np.array(my_ray['x'][srt])/1.e5
elif Dimension==2:
my_ray = ds.ortho_ray(1,(0,0))
srt=np.argsort(my_ray['y'])
x_coord=np.array(my_ray['y'][srt])/1.e5
temp=np.array(my_ray['Temp'][srt])
ent=np.array(my_ray['entropy'][srt])
dens=np.array(my_ray['density'][srt])
press=np.array(my_ray['pressure'][srt])/1.e6
opacity=np.array(my_ray['pressure'][srt])*0.18/1.e9
plt.semilogy(temp,opacity,label='t='+filename_time+'s')
plt.legend()
plt.ylabel(r'$\kappa$ [$\mathrm{cm}^{2}\mathrm{g}^{-1}$]')
plt.xlabel(r'$T\/[\mathrm{K}]$')
#plt.legend(loc=3)
plt.xscale('log')
plt.yscale('log')
plt.xlim(400,10000)
plt.ylim(-100,1000)
plt.title('Opacity $\kappa $vs T')
plt.savefig("figure_T_opacity.png")
j=j+1
print("opacity-T plot made")
plt.close()
j=0
old_time=-time_difference_x_entropy
for i in profilefilelist:
ds=load(i)
time=float(ds.current_time)
filename_time=str(int(time))
if time>=old_time+time_difference_x_entropy and time<max_time:
old_time=time
print('Adding lines at t='+filename_time+'s for entropy-height plot')
location1=ds.find_max('Temp')
location2=ds.find_min('Temp')
kappa=0.18*1e-9
SB_constant=5.6704*1e-5
ad=ds.all_data()
if Dimension==1:
my_ray = ds.ortho_ray(0,(0,0))
srt=np.argsort(my_ray['x'])
x_coord=np.array(my_ray['x'][srt])/1.e5
elif Dimension==2:
my_ray = ds.ortho_ray(1,(0,0))
srt=np.argsort(my_ray['y'])
x_coord=np.array(my_ray['y'][srt])/1.e5
temp=np.array(my_ray['Temp'][srt])
ent=np.array(my_ray['entropy'][srt])
dens=np.array(my_ray['density'][srt])
press=np.array(my_ray['pressure'][srt])/1.e6
plt.semilogy(x_coord,ent,label='t='+filename_time+'s')
plt.legend()
plt.ylabel(r'$\mathrm{Entropy}$')
plt.xlabel(r'$y\/[\mathrm{km}]$')
#plt.legend(loc=3)
#plt.yscale('linear')
plt.title('Entropy $vs height')
plt.savefig("figure_x_entropy.png")
j=j+1
print("entropy plot made")
plt.close()
j=0
old_time=-time_difference_x_rad
for i in profilefilelist:
ds=load(i)
time=float(ds.current_time)
filename_time=str(int(time))
if time>=old_time+time_difference_x_rad and time<max_time:
old_time=time
print('Adding lines at t='+filename_time+'s for $U$-height plot')
location1=ds.find_max('Temp')
location2=ds.find_min('Temp')
kappa=0.18*1e-9
SB_constant=5.6704*1e-5
ad=ds.all_data()
if Dimension==1:
my_ray = ds.ortho_ray(0,(0,0))
srt=np.argsort(my_ray['x'])
x_coord=np.array(my_ray['x'][srt])/1.e5
elif Dimension==2:
my_ray = ds.ortho_ray(1,(0,0))
srt=np.argsort(my_ray['y'])
x_coord=np.array(my_ray['y'][srt])/1.e5
temp=np.array(my_ray['Temp'][srt])
ent=np.array(my_ray['entropy'][srt])
dens=np.array(my_ray['density'][srt])
press=np.array(my_ray['pressure'][srt])/1.e6
radiation=np.array(my_ray['rad'][srt])
plt.semilogy(x_coord,radiation,label='t='+filename_time+'s')
plt.legend()
plt.ylabel(r'$U [{\rm erg} {\rm cm}^{-3}]$')
plt.xlabel(r'$y\/[\mathrm{km}]$')
#plt.legend(loc=3)
#plt.yscale('linear')
plt.figtext(0.5,0.3,'$\Delta t_{\mathrm{damp}}=7000$s', fontsize=20)
plt.title('Radiation energy density $U$ vs height')
plt.savefig("figure_x_radiation.png")
j=j+1
print("radiation plot made")
plt.close()
j=0
old_time=-time_difference_T_P
for i in profilefilelist:
ds=load(i)
time=float(ds.current_time)
filename_time=str(int(time))
if time>=old_time+time_difference_T_P and time<max_time:
old_time=time
print('Adding lines at t='+filename_time+'s for $T-P$ plot')
kappa=0.18*1e-9
SB_constant=5.6704*1e-5
if Dimension==1:
my_ray = ds.ortho_ray(0,(0,0))
srt=np.argsort(my_ray['x'])
x_coord=np.array(my_ray['x'][srt])/1.e5
elif Dimension==2:
my_ray = ds.ortho_ray(1,(0,0))
srt=np.argsort(my_ray['y'])
x_coord=np.array(my_ray['y'][srt])/1.e5
temp=np.array(my_ray['Temp'][srt])
ent=np.array(my_ray['entropy'][srt])
dens=np.array(my_ray['density'][srt])
press=np.array(my_ray['pressure'][srt])/1.e6
if j==0 :
temp0=np.array(my_ray['Temp'][srt])
ent0=np.array(my_ray['entropy'][srt])
pressure0=np.array(my_ray['pressure'][srt])
density0=np.array(my_ray['density'][srt])
location1=np.amin(pressure0)/1e10
location2=np.amax(pressure0)
plt.plot(press,temp,label='t='+filename_time+'s')
plt.legend()
plt.xlabel(r'$P\/[\mathrm{bar}=10^{6}\mathrm{dyne}/\mathrm{cm}^2$]')
plt.ylabel(r'$T\/[\mathrm{K}]$')
plt.xlim(0.001,2000)
plt.ylim(1000,2000)
plt.xscale('log')
#plt.yscale('log')
plt.title('T vs P')
plt.savefig("figure_T_P.png")
j=j+1
print("T-P plot made")
plt.close()
j=0
old_time=-time_difference_T_P
for i in profilefilelist:
ds=load(i)
time=float(ds.current_time)
filename_time=str(int(time))
if time>=old_time+time_difference_T_P and time<max_time:
old_time=time
print('Adding lines at t='+filename_time+'s for $\Delta T - P$ plot')
kappa=0.18*1e-9
SB_constant=5.6704*1e-5
if Dimension==1:
my_ray = ds.ortho_ray(0,(0,0))
srt=np.argsort(my_ray['x'])
x_coord=np.array(my_ray['x'][srt])/1.e5
elif Dimension==2:
my_ray = ds.ortho_ray(1,(0,0))
srt=np.argsort(my_ray['y'])
x_coord=np.array(my_ray['y'][srt])/1.e5
temp=np.array(my_ray['Temp'][srt])
ent=np.array(my_ray['entropy'][srt])
dens=np.array(my_ray['density'][srt])
press=np.array(my_ray['pressure'][srt])/1.e6
if j==0 :
temp0=np.array(my_ray['Temp'][srt])
ent0= | np.array(my_ray['entropy'][srt]) | numpy.array |
#!/usr/bin/env python3
import numpy as np
import time
import glob, os
from scripts.laserscan import SemLaserScan, LaserScan
import argparse
from depth_cluster.build import Depth_Cluster
import cv2
import open3d as o3d
parser = argparse.ArgumentParser()
parser.add_argument('--sequence',dest= "sequence_in", default='00', help='')
parser.add_argument('--dataset',dest= "dataset", default='semanticKITTI', help='')
parser.add_argument('--root', dest= "root", default='./Dataset/semanticKITTI/',help="./Dataset/semanticKITTI/")
parser.add_argument('--range_y', dest= "range_y", default=64, help="64")
parser.add_argument('--range_x', dest= "range_x", default=2048, help="2048")
parser.add_argument('--minimum_points', dest= "minimum_points", default=40, help="minimum_points of each class")
parser.add_argument('--which_cluster', dest= "which_cluster", default=1, help="4: ScanLineRun clustering; 3: superVoxel clustering; 2: euclidean; 1: depth_cluster; ")
args = parser.parse_args()
sequence_in = args.sequence_in
if args.which_cluster == 1:
cluster = Depth_Cluster.Depth_Cluster(0.15,9) #angle threshold 0.15 (smaller th less clusters), search steps 9
def key_func(x):
return os.path.split(x)[-1]
def full_scan():
Scan = LaserScan(project=True, flip_sign=False, H=args.range_y, W=args.range_x, fov_up=3.0, fov_down=-25.0)
# load data
lidar_data = sorted(glob.glob('/home/alvari/Desktop/semanticKITTI/dataset/sequences/{0}/velodyne/*.bin'.format(sequence_in)), key=key_func)
label_data = sorted(glob.glob('/home/alvari/Desktop/semanticKITTI/dataset/sequences/{0}/labels/*.label'.format(sequence_in)), key=key_func)
for i in range(len(lidar_data)):
Scan.open_scan(lidar_data[i])
xyz_list = Scan.points
# organize pc
range_img_pre = Scan.proj_range
xyz = Scan.proj_xyz
semantic_label = np.fromfile(label_data[i], dtype=np.uint32)
semantic_label = semantic_label.reshape((-1))
semantic_label = semantic_label & 0xFFFF
semantic_label_img = np.zeros((64,2048))
for jj in range(len(Scan.proj_x)):
y_range, x_range = Scan.proj_y[jj], Scan.proj_x[jj]
if (semantic_label_img[y_range, x_range] == 0):
semantic_label_img[y_range, x_range] = semantic_label[jj]
# create gt ground plane mask #label numbers for ground plane: 40,44,48,49,60,72
gt_i = | np.zeros((64, 2048)) | numpy.zeros |
"""
DaSiamRPN tracker.
Original paper: https://arxiv.org/abs/1808.06048
Link to original repo: https://github.com/foolwood/DaSiamRPN
Links to onnx models:
network: https://www.dropbox.com/s/rr1lk9355vzolqv/dasiamrpn_model.onnx?dl=0
kernel_r1: https://www.dropbox.com/s/999cqx5zrfi7w4p/dasiamrpn_kernel_r1.onnx?dl=0
kernel_cls1: https://www.dropbox.com/s/qvmtszx5h339a0w/dasiamrpn_kernel_cls1.onnx?dl=0
"""
import numpy as np
import cv2 as cv
import argparse
import sys
class DaSiamRPNTracker:
#initialization of used values, initial bounding box, used network
def __init__(self, im, target_pos, target_sz, net, kernel_r1, kernel_cls1):
self.windowing = "cosine"
self.exemplar_size = 127
self.instance_size = 271
self.total_stride = 8
self.score_size = (self.instance_size - self.exemplar_size) // self.total_stride + 1
self.context_amount = 0.5
self.ratios = [0.33, 0.5, 1, 2, 3]
self.scales = [8, ]
self.anchor_num = len(self.ratios) * len(self.scales)
self.penalty_k = 0.055
self.window_influence = 0.42
self.lr = 0.295
self.im_h = im.shape[0]
self.im_w = im.shape[1]
self.target_pos = target_pos
self.target_sz = target_sz
self.avg_chans = np.mean(im, axis=(0, 1))
self.net = net
self.score = []
if ((self.target_sz[0] * self.target_sz[1]) / float(self.im_h * self.im_w)) < 0.004:
raise AssertionError("Initializing BB is too small-try to restart tracker with larger BB")
self.anchor = self.__generate_anchor()
wc_z = self.target_sz[0] + self.context_amount * sum(self.target_sz)
hc_z = self.target_sz[1] + self.context_amount * sum(self.target_sz)
s_z = round(np.sqrt(wc_z * hc_z))
z_crop = self.__get_subwindow_tracking(im, self.exemplar_size, s_z)
z_crop = z_crop.transpose(2, 0, 1).reshape(1, 3, 127, 127).astype(np.float32)
self.net.setInput(z_crop)
z_f = self.net.forward('63')
kernel_r1.setInput(z_f)
r1 = kernel_r1.forward()
kernel_cls1.setInput(z_f)
cls1 = kernel_cls1.forward()
r1 = r1.reshape(20, 256, 4, 4)
cls1 = cls1.reshape(10, 256 , 4, 4)
self.net.setParam(self.net.getLayerId('65'), 0, r1)
self.net.setParam(self.net.getLayerId('68'), 0, cls1)
if self.windowing == "cosine":
self.window = np.outer(np.hanning(self.score_size), np.hanning(self.score_size))
elif self.windowing == "uniform":
self.window = np.ones((self.score_size, self.score_size))
self.window = np.tile(self.window.flatten(), self.anchor_num)
#creating anchor for tracking bounding box
def __generate_anchor(self):
self.anchor = np.zeros((self.anchor_num, 4), dtype = np.float32)
size = self.total_stride * self.total_stride
count = 0
for ratio in self.ratios:
ws = int(np.sqrt(size / ratio))
hs = int(ws * ratio)
for scale in self.scales:
wws = ws * scale
hhs = hs * scale
self.anchor[count] = [0, 0, wws, hhs]
count += 1
score_sz = int(self.score_size)
self.anchor = np.tile(self.anchor, score_sz * score_sz).reshape((-1, 4))
ori = - (score_sz / 2) * self.total_stride
xx, yy = np.meshgrid([ori + self.total_stride * dx for dx in range(score_sz)], [ori + self.total_stride * dy for dy in range(score_sz)])
xx, yy = np.tile(xx.flatten(), (self.anchor_num, 1)).flatten(), np.tile(yy.flatten(), (self.anchor_num, 1)).flatten()
self.anchor[:, 0], self.anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)
return self.anchor
#track function
def track(self, im):
wc_z = self.target_sz[1] + self.context_amount * sum(self.target_sz)
hc_z = self.target_sz[0] + self.context_amount * sum(self.target_sz)
s_z = np.sqrt(wc_z * hc_z)
scale_z = self.exemplar_size / s_z
d_search = (self.instance_size - self.exemplar_size) / 2
pad = d_search / scale_z
s_x = round(s_z + 2 * pad)
#region preprocessing
x_crop = self.__get_subwindow_tracking(im, self.instance_size, s_x)
x_crop = x_crop.transpose(2, 0, 1).reshape(1, 3, 271, 271).astype(np.float32)
self.score = self.__tracker_eval(x_crop, scale_z)
self.target_pos[0] = max(0, min(self.im_w, self.target_pos[0]))
self.target_pos[1] = max(0, min(self.im_h, self.target_pos[1]))
self.target_sz[0] = max(10, min(self.im_w, self.target_sz[0]))
self.target_sz[1] = max(10, min(self.im_h, self.target_sz[1]))
#update bounding box position
def __tracker_eval(self, x_crop, scale_z):
target_size = self.target_sz * scale_z
self.net.setInput(x_crop)
outNames = self.net.getUnconnectedOutLayersNames()
outNames = ['66', '68']
delta, score = self.net.forward(outNames)
delta = np.transpose(delta, (1, 2, 3, 0))
delta = np.ascontiguousarray(delta, dtype = np.float32)
delta = np.reshape(delta, (4, -1))
score = np.transpose(score, (1, 2, 3, 0))
score = np.ascontiguousarray(score, dtype = np.float32)
score = np.reshape(score, (2, -1))
score = self.__softmax(score)[1, :]
delta[0, :] = delta[0, :] * self.anchor[:, 2] + self.anchor[:, 0]
delta[1, :] = delta[1, :] * self.anchor[:, 3] + self.anchor[:, 1]
delta[2, :] = np.exp(delta[2, :]) * self.anchor[:, 2]
delta[3, :] = np.exp(delta[3, :]) * self.anchor[:, 3]
def __change(r):
return np.maximum(r, 1./r)
def __sz(w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def __sz_wh(wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
s_c = __change(__sz(delta[2, :], delta[3, :]) / (__sz_wh(target_size)))
r_c = __change((target_size[0] / target_size[1]) / (delta[2, :] / delta[3, :]))
penalty = | np.exp(-(r_c * s_c - 1.) * self.penalty_k) | numpy.exp |
"""Multiclass predictions.
``y_pred`` should be two dimensional (n_samples x n_classes).
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
import warnings
from .base import BasePrediction
def _multiclass_init(self, y_pred=None, y_true=None, n_samples=None):
if y_pred is not None:
self.y_pred = np.array(y_pred)
elif y_true is not None:
self._init_from_pred_labels(y_true)
elif n_samples is not None:
self.y_pred = | np.empty((n_samples, self.n_columns), dtype=float) | numpy.empty |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
# -----------------------------------------------------------------------------
from guidedprojectionbase import GuidedProjectionBase
# -----------------------------------------------------------------------------
# try:
from constraints_basic import columnnew,con_planarity_constraints,\
con_isometry
from constraints_net import con_unit_edge,con_orthogonal_midline,\
con_isogonal,con_isogonal_diagnet,\
con_anet,con_anet_diagnet,con_gnet,con_gnet_diagnet,\
con_anet_geodesic,con_polyline_ruling,con_osculating_tangents,\
con_planar_1familyof_polylines,con_nonsquare_quadface,\
con_singular_Anet_diag_geodesic,con_gonet, \
con_diag_1_asymptotic_or_geodesic,\
con_ctrlnet_symmetric_1_diagpoly, con_AGnet
from singularMesh import quadmesh_with_1singularity
from constraints_glide import con_alignment,con_alignments,con_fix_vertices
# -----------------------------------------------------------------------------
__author__ = '<NAME>'
# -----------------------------------------------------------------------------
class GuidedProjection_AGNet(GuidedProjectionBase):
_N1 = 0
_N5 = 0
_N6 = 0
_Nanet = 0
_Ndgeo = 0
_Ndgeoliou = 0
_Ndgeopc = 0
_Nruling = 0
_Noscut = 0
_Nnonsym = 0
_Npp = 0
_Ncd = _Ncds = 0
_Nag = 0
def __init__(self):
GuidedProjectionBase.__init__(self)
weights = {
## Commen setting:
'geometric' : 0, ##NOTE SHOULD BE 1 ONCE planarity=1
'planarity' : 0,
## shared used:
'unit_edge' : 0,
'unit_diag_edge' : 0,
'orthogonal' :0,
'isogonal' : 0,
'isogonal_diagnet' :0,
'Anet' : 0,
'Anet_diagnet' : 0,
'Gnet' : 0,
'Gnet_diagnet' : 0,
'GOnet' : 0,
'diag_1_asymptotic': 0,
'diag_1_geodesic': 0,
'ctrlnet_symmetric_1diagpoly': 0,
'nonsymmetric' :0,
'isometry' : 0,
'z0' : 0,
'boundary_glide' :0, #Hui in gpbase.py doesn't work, replace here.
'i_boundary_glide' :0,
'fix_point' :0,
## only for AGNet:
'GGGnet': 0,
'GGG_diagnet': 0, #TODO
'AGnet': 0,
'AAGnet': 0,
'GAAnet': 0,
'GGAnet': 0,
'AGGnet': 0,
'AAGGnet': 0,
'GGAAnet': 0,
'planar_geodesic' : 0,
'agnet_liouville': 0,
'ruling': 0,# opt for ruling quadratic mesh, straight lines-mesh
'oscu_tangent' :0,
'AAG_singular' :0,
'planar_ply1' : 0,
'planar_ply2' : 0,
}
self.add_weights(weights)
self.switch_diagmeth = False
self.is_initial = True
self.if_angle = False
self._angle = 90
self._glide_reference_polyline = None
self.i_glide_bdry_crv, self.i_glide_bdry_ver = [],[]
self.ind_fixed_point, self.fixed_value = None,None
self.set_another_polyline = 0
self._ver_poly_strip1,self._ver_poly_strip2 = None,None
self.nonsym_eps = 0.01
self.ind_nonsym_v124,self.ind_nonsym_l12 = None,None
self.is_singular = False
self._singular_polylist = None
self._ind_rr_vertex = None
self.weight_checker = 1
### isogonal AGnet:
self.is_AG_or_GA = True
self.opt_AG_ortho = False
self.opt_AG_const_rii = False
self.opt_AG_const_r0 = False
#--------------------------------------------------------------------------
#
#--------------------------------------------------------------------------
@property
def mesh(self):
return self._mesh
@mesh.setter
def mesh(self, mesh):
self._mesh = mesh
self.initialization()
@property
def max_weight(self):
return max(self.get_weight('boundary_glide'),
self.get_weight('i_boundary_glide'),
self.get_weight('geometric'),
self.get_weight('planarity'),
self.get_weight('unit_edge'),
self.get_weight('unit_diag_edge'),
self.get_weight('orthogonal'),
self.get_weight('isometry'),
self.get_weight('oscu_tangent'),
self.get_weight('Anet'),
self.get_weight('Anet_diagnet'),
self.get_weight('diag_1_asymptotic'), #n defined from only ctrl-net
self.get_weight('diag_1_geodesic'),
self.get_weight('ctrlnet_symmetric_1diagpoly'),
self.get_weigth('nonsymmetric'),
self.get_weight('AAG_singular'),
self.get_weight('planar_plys'),
1)
@property
def angle(self):
return self._angle
@angle.setter
def angle(self,angle):
if angle != self._angle:
self.mesh.angle=angle
self._angle = angle
@property
def glide_reference_polyline(self):
if self._glide_reference_polyline is None:
polylines = self.mesh.boundary_curves(corner_split=False)[0]
N = 5
for polyline in polylines:
polyline.refine(N)
self._glide_reference_polyline = polyline
return self._glide_reference_polyline
# @glide_reference_polyline.setter##NOTE: used by reference-mesh case
# def glide_reference_polyline(self,polyline):
# self._glide_reference_polyline = polyline
@property
def ver_poly_strip1(self):
if self._ver_poly_strip1 is None:
if self.get_weight('planar_ply1') or self.opt_AG_const_rii:
self.index_of_mesh_polylines()
else:
self.index_of_strip_along_polyline()
return self._ver_poly_strip1
@property
def ver_poly_strip2(self):
if self._ver_poly_strip2 is None:
if self.get_weight('planar_ply2'):
self.index_of_mesh_polylines()
return self._ver_poly_strip2
@property
def singular_polylist(self):
if self._singular_polylist is None:
self.get_singularmesh_diagpoly()
return self._singular_polylist
@property
def ind_rr_vertex(self):
if self._ind_rr_vertex is None:
self.get_singularmesh_diagpoly()
return self._ind_rr_vertex
#--------------------------------------------------------------------------
# Initialization
#--------------------------------------------------------------------------
def set_weights(self):
#------------------------------------
if self.get_weight('isogonal'):
self.set_weight('unit_edge', 1*self.get_weight('isogonal'))
elif self.get_weight('isogonal_diagnet'):
self.set_weight('unit_diag_edge', 1*self.get_weight('isogonal_diagnet'))
if self.get_weight('Gnet') or self.get_weight('GOnet'):
self.set_weight('unit_edge', 1)
elif self.get_weight('Gnet_diagnet'):
self.set_weight('unit_diag_edge', 1)
if self.get_weight('GGGnet'):
self.set_weight('Gnet', 1)
self.set_weight('diag_1_geodesic',1)
if self.get_weight('AAGnet'):
self.set_weight('Anet', 1)
elif self.get_weight('GAAnet'):
self.set_weight('Anet_diagnet', 1)
elif self.get_weight('GGAnet'):
self.set_weight('Gnet', 1)
elif self.get_weight('AGGnet'):
self.set_weight('Gnet_diagnet', 1)
elif self.get_weight('AAGGnet'):
self.set_weight('Anet', 1)
self.set_weight('Gnet_diagnet', 1)
elif self.get_weight('GGAAnet'):
self.set_weight('Gnet', 1)
self.set_weight('Anet_diagnet', 1)
if self.get_weight('AGnet'):
self.set_weight('oscu_tangent', self.get_weight('AGnet'))
if self.get_weight('AAG_singular'):
self.set_weight('Anet', 1*self.get_weight('AAG_singular'))
if self.get_weight('diag_1_asymptotic') or self.get_weight('diag_1_geodesic'):
self.set_weight('unit_edge',1)
if self.get_weight('ctrlnet_symmetric_1diagpoly'):
pass
#--------------------------------------
def set_dimensions(self): # Huinote: be used in guidedprojectionbase
V = self.mesh.V
F = self.mesh.F
num_regular = self.mesh.num_regular
N = 3*V
N1 = N5 = N6 = N
Nanet = N
Ndgeo = Ndgeoliou = Ndgeopc = Nruling = Noscut = N
Nnonsym = N
Npp = N
Ncd = Ncds = N
Nag = N
#---------------------------------------------
if self.get_weight('planarity'):
N += 3*F
N1 = N
if self.get_weight('unit_edge'): #Gnet
"le1,le2,le3,le4,ue1,ue2,ue3,ue4 "
if self.get_weight('isogonal'):
N += 16*num_regular
else:
"for Anet"
N += 16*len(self.mesh.ind_rr_star_v4f4)
N5 = N
elif self.get_weight('unit_diag_edge'): #Gnet_diagnet
"le1,le2,le3,le4,ue1,ue2,ue3,ue4 "
N += 16*len(self.mesh.ind_rr_star_v4f4)
N5 = N
if self.get_weight('isogonal'):
"lt1,lt2, ut1,ut2, cos0"
N += 8*num_regular+1
N6 = N
elif self.get_weight('isogonal_diagnet'):
"lt1,lt2, ut1,ut2, cos0"
N += 8*len(self.mesh.ind_rr_star_v4f4)+1
N6 = N
if self.get_weight('Anet') or self.get_weight('Anet_diagnet'):
N += 3*len(self.mesh.ind_rr_star_v4f4)#3*num_regular
Nanet = N
if self.get_weight('AAGnet') or self.get_weight('GAAnet'):
N += 3*len(self.mesh.ind_rr_star_v4f4)
Ndgeo = N
elif self.get_weight('GGAnet') or self.get_weight('AGGnet'):
N += 9*len(self.mesh.ind_rr_star_v4f4)
Ndgeo = N
elif self.get_weight('AAGGnet') or self.get_weight('GGAAnet'):
N += 6*len(self.mesh.ind_rr_star_v4f4)
Ndgeo = N
if self.get_weight('oscu_tangent'):
"X +=[ll1,ll2,ll3,ll4,lu1,lu2,u1,u2]"
N += 12*len(self.mesh.ind_rr_star_v4f4)
Noscut = N
if self.get_weight('AGnet'):
"osculating tangents; X += [surfN; ogN]; if const.ri, X+=[Ri]"
N += 6*len(self.mesh.ind_rr_star_v4f4)
if self.opt_AG_const_rii:
"const rii for each geodesic polylines, default v2-v-v4"
N += len(self.ver_poly_strip1)#TODO
elif self.opt_AG_const_r0:
"unique r"
N += 1
Nag = N
if self.get_weight('agnet_liouville'):
"X +=[lu1,tu1; lla,llc,g1, lg1,tg1, c]"
N += 13*len(self.mesh.ind_rr_star_v4f4) +1
Ndgeoliou = N
if self.get_weight('planar_geodesic'):
N += 3*len(self.ver_poly_strip1[0])
Ndgeopc = N
if self.get_weight('ruling'):
N += 3*len(self.mesh.get_both_isopolyline(self.switch_diagmeth))
Nruling = N
if self.get_weight('nonsymmetric'):
"X += [E,s]"
N += self.mesh.E + len(self.ind_nonsym_v124[0]) ##self.mesh.num_rrf ##len=self.rr_quadface
Nnonsym = N
if self.get_weight('AAG_singular'):
"X += [on]"
N += 3*len(self.singular_polylist[1])
Ndgeo = N
### PPQ-project:
if self.get_weight('planar_ply1'):
N += 3*len(self.ver_poly_strip1)
## only for \obj_cheng\every_5_PPQ.obj'
##matrix = self.ver_poly_strip1
#matrix = self.mesh.rot_patch_matrix[:,::5].T
#N += 3*len(matrix)
Nppq = N
if self.get_weight('planar_ply2'):
N += 3*len(self.ver_poly_strip2)
Nppo = N
### CG / CA project:
if self.get_weight('diag_1_asymptotic') or self.get_weight('diag_1_geodesic'):
if self.get_weight('diag_1_asymptotic'):
"[ln,v_N]"
N += 4*len(self.mesh.ind_rr_star_v4f4)
elif self.get_weight('diag_1_geodesic'):
if self.is_singular:
"[ln,v_N;la[ind],lc[ind],ea[ind],ec[ind]]"
N += (1+3)*len(self.mesh.ind_rr_star_v4f4)+8*len(self.ind_rr_vertex)
else:
"[ln,v_N;la,lc,ea,ec]"
N += (1+3+3+3+1+1)*len(self.mesh.ind_rr_star_v4f4)
Ncd = N #ctrl-diag net
if self.get_weight('ctrlnet_symmetric_1diagpoly'):
N += (1+1+3+3+1+3)*len(self.mesh.ind_rr_star_v4f4) #[e_ac,l_ac]
Ncds = N
#---------------------------------------------
if N1 != self._N1:
self.reinitialize = True
if N5 != self._N5 or N6 != self._N6:
self.reinitialize = True
if Nanet != self._Nanet:
self.reinitialize = True
if Ndgeo != self._Ndgeo:
self.reinitialize = True
if Nag != self._Nag:
self.reinitialize = True
if Ndgeoliou != self._Ndgeoliou:
self.reinitialize = True
if Ndgeopc != self._Ndgeopc:
self.reinitialize = True
if Nruling != self._Nruling:
self.reinitialize = True
if Noscut != self._Noscut:
self.reinitialize = True
if Nnonsym != self._Nnonsym:
self.reinitialize = True
if Npp != self._Npp:
self.reinitialize = True
if Ncd != self._Ncd:
self.reinitialize = True
if Ncds != self._Ncds:
self.reinitialize = True
#----------------------------------------------
self._N = N
self._N1 = N1
self._N5 = N5
self._N6 = N6
self._Nanet = Nanet
self._Ndgeo = Ndgeo
self._Ndgeoliou = Ndgeoliou
self._Ndgeopc = Ndgeopc
self._Nruling = Nruling
self._Noscut = Noscut
self._Nnonsym = Nnonsym
self._Npp = Npp
self._Ncd = Ncd
self._Ncds = Ncds
self._Nag = Nag
self.build_added_weight() # Hui add
def initialize_unknowns_vector(self):
X = self.mesh.vertices.flatten('F')
if self.get_weight('planarity'):
normals = self.mesh.face_normals()
normals = normals.flatten('F')
X = np.hstack((X, normals))
if self.get_weight('unit_edge'):
if True:
"self.get_weight('Gnet')"
rr=True
l1,l2,l3,l4,E1,E2,E3,E4 = self.mesh.get_v4_unit_edge(rregular=rr)
X = np.r_[X,l1,l2,l3,l4]
X = np.r_[X,E1.flatten('F'),E2.flatten('F'),E3.flatten('F'),E4.flatten('F')]
elif self.get_weight('unit_diag_edge'):
l1,l2,l3,l4,E1,E2,E3,E4 = self.mesh.get_v4_diag_unit_edge()
X = np.r_[X,l1,l2,l3,l4]
X = np.r_[X,E1.flatten('F'),E2.flatten('F'),E3.flatten('F'),E4.flatten('F')]
if self.get_weight('isogonal'):
lt1,lt2,ut1,ut2,_,_ = self.mesh.get_v4_unit_tangents()
cos0 = np.mean(np.einsum('ij,ij->i', ut1, ut2))
X = np.r_[X,lt1,lt2,ut1.flatten('F'),ut2.flatten('F'),cos0]
elif self.get_weight('isogonal_diagnet'):
lt1,lt2,ut1,ut2,_,_ = self.mesh.get_v4_diag_unit_tangents()
cos0 = np.mean(np.einsum('ij,ij->i', ut1, ut2))
X = np.r_[X,lt1,lt2,ut1.flatten('F'),ut2.flatten('F'),cos0]
if self.get_weight('Anet'):
if True:
"only r-regular vertex"
v = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4][:,0]
else:
v = self.mesh.ver_regular
V4N = self.mesh.vertex_normals()[v]
X = np.r_[X,V4N.flatten('F')]
elif self.get_weight('Anet_diagnet'):
v = self.mesh.rr_star_corner[0]
V4N = self.mesh.vertex_normals()[v]
X = np.r_[X,V4N.flatten('F')]
if self.get_weight('AAGnet'):
on = self.get_agweb_initial(diagnet=False,
another_poly_direction=self.set_another_polyline,
AAG=True)
X = np.r_[X,on]
elif self.get_weight('GAAnet'):
on = self.get_agweb_initial(diagnet=True,
another_poly_direction=self.set_another_polyline,
AAG=True)
X = np.r_[X,on]
elif self.get_weight('GGAnet'):
vNoN1oN2 = self.get_agweb_initial(diagnet=False,
another_poly_direction=self.set_another_polyline,
GGA=True)
X = np.r_[X,vNoN1oN2]
elif self.get_weight('AGGnet'):
vNoN1oN2 = self.get_agweb_initial(diagnet=True,
another_poly_direction=self.set_another_polyline,
GGA=True)
X = np.r_[X,vNoN1oN2]
elif self.get_weight('AAGGnet'):
oN1oN2 = self.get_agweb_initial(diagnet=False,
another_poly_direction=self.set_another_polyline,
AAGG=True)
X = np.r_[X,oN1oN2]
elif self.get_weight('GGAAnet'):
oN1oN2 = self.get_agweb_initial(diagnet=True,
another_poly_direction=self.set_another_polyline,
AAGG=True)
X = np.r_[X,oN1oN2]
if self.get_weight('oscu_tangent'):
"X +=[ll1,ll2,ll3,ll4,lu1,lu2,u1,u2]"
if self.get_weight('GAAnet') or self.get_weight('AGGnet') or self.get_weight('GGAAnet'):
diag=True
else:
diag=False
l,t,lt1,lt2 = self.mesh.get_net_osculating_tangents(diagnet=diag)
[ll1,ll2,ll3,ll4],[lt1,t1],[lt2,t2] = l,lt1,lt2
X = np.r_[X,ll1,ll2,ll3,ll4]
X = np.r_[X,lt1,lt2,t1.flatten('F'),t2.flatten('F')]
if self.get_weight('AGnet'):
"osculating tangent"
v,v1,v2,v3,v4 = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4].T
V = self.mesh.vertices
_,_,lt1,lt2 = self.mesh.get_net_osculating_tangents()
srfN = np.cross(lt1[1],lt2[1])
srfN = srfN / np.linalg.norm(srfN,axis=1)[:,None]
if not self.is_AG_or_GA:
v2,v4 = v1,v3
biN = np.cross(V[v2]-V[v], V[v4]-V[v])
ogN = biN / np.linalg.norm(biN,axis=1)[:,None]
X = np.r_[X,srfN.flatten('F'),ogN.flatten('F')]
if self.opt_AG_const_rii:
"const rii for each geodesic polylines, default v2-v-v4"
pass #TODO
elif self.opt_AG_const_r0:
"unique r"
from frenet_frame import FrenetFrame
allr = FrenetFrame(V[v],V[v2],V[v4]).radius
X = np.r_[X,np.mean(allr)]
if self.get_weight('agnet_liouville'): # no need now
"X +=[lu1,tu1; lla,llc,g1, lg1,tg1, c]"
lulg = self.get_agweb_liouville(diagnet=True)
X = np.r_[X,lulg]
if self.get_weight('planar_geodesic'):
sn = self.get_poly_strip_normal()
X = np.r_[X,sn.flatten('F')]
if self.get_weight('ruling'): # no need now
sn = self.get_poly_strip_ruling_tangent()
X = np.r_[X,sn.flatten('F')]
if self.get_weight('nonsymmetric'):
E, s = self.get_nonsymmetric_edge_ratio(diagnet=False)
X = np.r_[X, E, s]
if self.get_weight('AAG_singular'):
"X += [on]"
on = self.get_initial_singular_diagply_normal(is_init=True)
X = np.r_[X,on.flatten('F')]
if self.get_weight('planar_ply1'):
sn = self.get_poly_strip_normal(pl1=True)
X = np.r_[X,sn.flatten('F')]
if self.get_weight('planar_ply2'):
sn = self.get_poly_strip_normal(pl2=True)
X = np.r_[X,sn.flatten('F')]
### CG / CA project:
if self.get_weight('diag_1_asymptotic') or self.get_weight('diag_1_geodesic'):
"X += [ln,uN;la,lc,ea,ec]"
v,v1,v2,v3,v4 = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4].T
V = self.mesh.vertices
v4N = np.cross(V[v3]-V[v1], V[v4]-V[v2])
ln = np.linalg.norm(v4N,axis=1)
un = v4N / ln[:,None]
if self.get_weight('diag_1_asymptotic'):
"X += [ln,un]"
X = np.r_[X,ln,un.flatten('F')]
elif self.get_weight('diag_1_geodesic'):
"X += [ln,un; la,lc,ea,ec]"
if self.is_singular:
"new, different from below"
vl,vc,vr = self.singular_polylist
la = np.linalg.norm(V[vl]-V[vc],axis=1)
lc = np.linalg.norm(V[vr]-V[vc],axis=1)
ea = (V[vl]-V[vc]) / la[:,None]
ec = (V[vr]-V[vc]) / lc[:,None]
X = np.r_[X,ln,un.flatten('F'),la,lc,ea.flatten('F'),ec.flatten('F')]
else:
"no singular case"
l1,l2,l3,l4,E1,E2,E3,E4 = self.mesh.get_v4_diag_unit_edge()
if self.set_another_polyline:
"switch to another diagonal polyline"
ea,ec,la,lc = E2,E4,l2,l4
else:
ea,ec,la,lc = E1,E3,l1,l3
X = np.r_[X,ln,un.flatten('F'),la,lc,ea.flatten('F'),ec.flatten('F')]
if self.get_weight('ctrlnet_symmetric_1diagpoly'):
"X += [lt1,lt2,ut1,ut2; lac,ud1]"
lt1,lt2,ut1,ut2,_,_ = self.mesh.get_v4_unit_tangents()
ld1,ld2,ud1,ud2,_,_ = self.mesh.get_v4_diag_unit_tangents()
if self.set_another_polyline:
"switch to another diagonal polyline"
eac,lac = ud2,ld2
else:
eac,lac = ud1,ld1
X = np.r_[X,lt1,lt2,ut1.flatten('F'),ut2.flatten('F')]
X = np.r_[X,lac,eac.flatten('F')]
self._X = X
self._X0 = np.copy(X)
self.build_added_weight() # Hui add
#--------------------------------------------------------------------------
# Errors strings
#--------------------------------------------------------------------------
def make_errors(self):
self.planarity_error()
self.isogonal_error()
self.isogonal_diagnet_error()
self.anet_error()
self.gnet_error()
self.gonet_error()
#self.oscu_tangent_error() # good enough: mean=meax=90
#self.liouville_error()
def planarity_error(self):
if self.get_weight('planarity') == 0:
return None
P = self.mesh.face_planarity()
Emean = np.mean(P)
Emax = np.max(P)
self.add_error('planarity', Emean, Emax, self.get_weight('planarity'))
def isogonal_error(self):
if self.get_weight('isogonal') == 0:
return None
cos,cos0 = self.unit_tangent_vectors()
err = np.abs(cos-cos0) # no divided by cos
emean = np.mean(err)
emax = np.max(err)
self.add_error('isogonal', emean, emax, self.get_weight('isogonal'))
def isogonal_diagnet_error(self):
if self.get_weight('isogonal_diagnet') == 0:
return None
cos,cos0 = self.unit_tangent_vectors_diagnet()
err = np.abs(cos-cos0) # no divided by cos
emean = np.mean(err)
emax = np.max(err)
self.add_error('isogonal_diagnet', emean, emax, self.get_weight('isogonal_diagnet'))
def isometry_error(self): # Hui
"compare all edge_lengths"
if self.get_weight('isometry') == 0:
return None
L = self.edge_lengths_isometry()
L0 = self.edge_lengths_isometry(initialized=True)
norm = np.mean(L)
Err = np.abs(L-L0) / norm
Emean = np.mean(Err)
Emax = np.max(Err)
self.add_error('isometry', Emean, Emax, self.get_weight('isometry'))
def anet_error(self):
if self.get_weight('Anet') == 0 and self.get_weight('Anet_diagnet')==0:
return None
if self.get_weight('Anet'):
name = 'Anet'
if True:
star = self.mesh.rr_star
v,v1,v2,v3,v4 = star[self.mesh.ind_rr_star_v4f4].T
else:
v,v1,v2,v3,v4 = self.mesh.ver_regular_star.T
elif self.get_weight('Anet_diagnet'):
name = 'Anet_diagnet'
v,v1,v2,v3,v4 = self.mesh.rr_star_corner
if self.is_initial:
Nv = self.mesh.vertex_normals()[v]
else:
num = len(v)
c_n = self._Nanet-3*num+np.arange(3*num)
Nv = self.X[c_n].reshape(-1,3,order='F')
V = self.mesh.vertices
err1 = np.abs(np.einsum('ij,ij->i',Nv,V[v1]-V[v]))
err2 = np.abs(np.einsum('ij,ij->i',Nv,V[v2]-V[v]))
err3 = np.abs(np.einsum('ij,ij->i',Nv,V[v3]-V[v]))
err4 = np.abs(np.einsum('ij,ij->i',Nv,V[v4]-V[v]))
Err = err1+err2+err3+err4
Emean = np.mean(Err)
Emax = np.max(Err)
self.add_error(name, Emean, Emax, self.get_weight(name))
def gnet_error(self):
if self.get_weight('Gnet') == 0 and self.get_weight('Gnet_diagnet')==0:
return None
if self.get_weight('Gnet'):
name = 'Gnet'
if True:
star = self.mesh.rr_star
v,v1,v2,v3,v4 = star[self.mesh.ind_rr_star_v4f4].T
else:
v,v1,v2,v3,v4 = self.mesh.ver_regular_star.T
elif self.get_weight('Gnet_diagnet'):
name = 'Gnet_diagnet'
v,v1,v2,v3,v4 = self.mesh.rr_star_corner
V = self.mesh.vertices
E1 = (V[v1]-V[v]) / np.linalg.norm(V[v1]-V[v],axis=1)[:,None]
E2 = (V[v2]-V[v]) / np.linalg.norm(V[v2]-V[v],axis=1)[:,None]
E3 = (V[v3]-V[v]) / np.linalg.norm(V[v3]-V[v],axis=1)[:,None]
E4 = (V[v4]-V[v]) / np.linalg.norm(V[v4]-V[v],axis=1)[:,None]
err1 = np.abs(np.einsum('ij,ij->i',E1,E2)-np.einsum('ij,ij->i',E3,E4))
err2 = np.abs(np.einsum('ij,ij->i',E2,E3)-np.einsum('ij,ij->i',E4,E1))
Err = err1+err2
Emean = np.mean(Err)
Emax = np.max(Err)
self.add_error(name, Emean, Emax, self.get_weight(name))
def gonet_error(self):
if self.get_weight('GOnet') == 0:
return None
name = 'GOnet'
if True:
star = self.mesh.rr_star
v,v1,v2,v3,v4 = star[self.mesh.ind_rr_star_v4f4].T
else:
v,v1,v2,v3,v4 = self.mesh.ver_regular_star.T
V = self.mesh.vertices
E1 = (V[v1]-V[v]) / np.linalg.norm(V[v1]-V[v],axis=1)[:,None]
E2 = (V[v2]-V[v]) / np.linalg.norm(V[v2]-V[v],axis=1)[:,None]
E3 = (V[v3]-V[v]) / np.linalg.norm(V[v3]-V[v],axis=1)[:,None]
E4 = (V[v4]-V[v]) / np.linalg.norm(V[v4]-V[v],axis=1)[:,None]
if self.is_AG_or_GA:
err1 = np.abs(np.einsum('ij,ij->i',E1,E2)-np.einsum('ij,ij->i',E2,E3))
err2 = np.abs(np.einsum('ij,ij->i',E3,E4)-np.einsum('ij,ij->i',E4,E1))
else:
err1 = np.abs(np.einsum('ij,ij->i',E1,E2)-np.einsum('ij,ij->i',E1,E4))
err2 = np.abs(np.einsum('ij,ij->i',E2,E3)-np.einsum('ij,ij->i',E3,E4))
Err = err1+err2
Emean = np.mean(Err)
Emax = np.max(Err)
self.add_error(name, Emean, Emax, self.get_weight(name))
def oscu_tangent_error(self):
if self.get_weight('oscu_tangent') == 0:
return None
if self.get_weight('GAAnet') or self.get_weight('AGGnet') or self.get_weight('GGAAnet'):
diag=True
else:
diag=False
angle = self.mesh.get_net_osculating_tangents(diagnet=diag,printerr=True)
emean = '%.2f' % np.mean(angle)
emax = '%.2f' % np.max(angle)
print('ortho:',emean,emax)
#self.add_error('orthogonal', emean, emax, self.get_weight('oscu_tangent'))
def liouville_error(self):
if self.get_weight('agnet_liouville') == 0:
return None
cos,cos0 = self.agnet_liouville_const_angle()
err = np.abs(cos-cos0) # no divided by cos
emean = np.mean(err)
emax = np.max(err)
self.add_error('Liouville', emean, emax, self.get_weight('agnet_liouville'))
def planarity_error_string(self):
return self.error_string('planarity')
def isogonal_error_string(self):
return self.error_string('isogonal')
def isogonal_diagnet_error_string(self):
return self.error_string('isogonal_diagnet')
def isometry_error_string(self):
return self.error_string('isometry')
def anet_error_string(self):
return self.error_string('Anet')
def liouville_error_string(self):
return self.error_string('agnet_liouville')
#--------------------------------------------------------------------------
# Getting (initilization + Plotting):
#--------------------------------------------------------------------------
def unit_tangent_vectors(self, initialized=False):
if self.get_weight('isogonal') == 0:
return None
if initialized:
X = self._X0
else:
X = self.X
N6 = self._N6
num = self.mesh.num_regular
ut1 = X[N6-6*num-1:N6-3*num-1].reshape(-1,3,order='F')
ut2 = X[N6-3*num-1:N6-1].reshape(-1,3,order='F')
cos = np.einsum('ij,ij->i',ut1,ut2)
cos0 = X[N6-1]
return cos,cos0
def unit_tangent_vectors_diagnet(self, initialized=False):
if self.get_weight('isogonal_diagnet') == 0:
return None
if initialized:
X = self._X0
else:
X = self.X
N6 = self._N6
num = len(self.mesh.ind_rr_star_v4f4)
ut1 = X[N6-6*num-1:N6-3*num-1].reshape(-1,3,order='F')
ut2 = X[N6-3*num-1:N6-1].reshape(-1,3,order='F')
cos = np.einsum('ij,ij->i',ut1,ut2)
cos0 = X[N6-1]
return cos,cos0
def edge_lengths_isometry(self, initialized=False): # Hui
"isometry: keeping all edge_lengths"
if self.get_weight('isometry') == 0:
return None
if initialized:
X = self._X0
else:
X = self.X
vi, vj = self.mesh.vertex_ring_vertices_iterators(order=True) # later should define it as global
Vi = X[columnnew(vi,0,self.mesh.V)].reshape(-1,3,order='F')
Vj = X[columnnew(vj,0,self.mesh.V)].reshape(-1,3,order='F')
el = np.linalg.norm(Vi-Vj,axis=1)
return el
def get_agweb_initial(self,diagnet=False,another_poly_direction=False,
AAG=False,GGA=False,AAGG=False):
"initilization of AG-net project"
V = self.mesh.vertices
v,v1,v2,v3,v4 = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4].T # regular
v,va,vb,vc,vd = self.mesh.rr_star_corner# in diagonal direction
V0,V1,V2,V3,V4,Va,Vb,Vc,Vd = V[v],V[v1],V[v2],V[v3],V[v4],V[va],V[vb],V[vc],V[vd]
vnn = self.mesh.vertex_normals()[v]
if diagnet:
"GGAA / GAA"
Vg1,Vg2,Vg3,Vg4 = V1,V2,V3,V4
else:
"AAGG / AAG"
Vg1,Vg2,Vg3,Vg4 = Va,Vb,Vc,Vd
"X +=[ln, vN] + [oNi]; oNi not need to be unit; all geodesics matter"
if AAGG:
"oN1,oN2 from Gnet-osculating_normals,s.t. anetN*oN1(oN2)=0"
oN1,oN2 = np.cross(Vg3-V0,Vg1-V0),np.cross(Vg4-V0,Vg2-V0)
X = np.r_[oN1.flatten('F'),oN2.flatten('F')]
elif AAG:
"oN from geodesic-osculating-normal (not unit)"
if another_poly_direction:
Vl,Vr = Vg2, Vg4
else:
Vl,Vr = Vg1, Vg3
oN = np.cross(Vr-V0,Vl-V0)
X = np.r_[oN.flatten('F')]
elif GGA:
"X +=[vN, oN1, oN2]; oN1,oN2 from Gnet-osculating_normals"
if diagnet:
"AGG"
Vg1,Vg2,Vg3,Vg4 = Va,Vb,Vc,Vd # different from above
else:
"GGA"
Vg1,Vg2,Vg3,Vg4 = V1,V2,V3,V4 # different from above
oN1,oN2 = np.cross(Vg3-V0,Vg1-V0),np.cross(Vg4-V0,Vg2-V0)
vn = np.cross(oN1,oN2)
vN = vn / np.linalg.norm(vn,axis=1)[:,None]
ind = np.where(np.einsum('ij,ij->i',vnn,vN)<0)[0]
vN[ind]=-vN[ind]
X = np.r_[vN.flatten('F'),oN1.flatten('F'),oN2.flatten('F')]
return X
def get_agweb_an_n_on(self,is_n=False,is_on=False,is_all_n=False):
V = self.mesh.vertices
v = self.mesh.rr_star[:,0]#self.mesh.rr_star_corner[0]
an = V[v]
n = self.mesh.vertex_normals()[v]
on1=on2=n
num = len(self.mesh.ind_rr_star_v4f4)
if self.is_initial:
if self.get_weight('AAGnet') or self.get_weight('GAAnet'):
"vertex normal from A-net"
X = self.get_agweb_initial(AAG=True)
#on = X[:3*num].reshape(-1,3,order='F')
elif self.get_weight('GGAnet') or self.get_weight('AGGnet'):
"X=+[N,oN1,oN2]"
X = self.get_agweb_initial(GGA=True)
n = X[:3*num].reshape(-1,3,order='F')
on1 = X[3*num:6*num].reshape(-1,3,order='F')
on2 = X[6*num:9*num].reshape(-1,3,order='F')
elif self.get_weight('AAGGnet') or self.get_weight('GGAAnet'):
"vertex-normal from Anet, X+=[on1,on2]"
X = self.get_agweb_initial(AAGG=True)
on1 = X[:3*num].reshape(-1,3,order='F')
on2 = X[3*num:6*num].reshape(-1,3,order='F')
elif self.get_weight('Anet'):
pass
# v = v[self.mesh.ind_rr_star_v4f4]
# n = n[v]
elif self.get_weight('AGnet'):
if False:
_,_,lt1,lt2 = self.mesh.get_net_osculating_tangents()
n = np.cross(lt1[1],lt2[1])
n = n / np.linalg.norm(n,axis=1)[:,None]
else:
_,_,ut1,ut2,_,_ = self.mesh.get_v4_unit_tangents(False,True)
n = np.cross(ut1,ut2)
n = n / np.linalg.norm(n,axis=1)[:,None]
else:
X = self.X
if self.get_weight('AAGnet') or self.get_weight('GAAnet'):
"X=+[oNg]"
##print(v,self.mesh.ind_rr_star_v4f4,len(v),len(self.mesh.ind_rr_star_v4f4))
v = v[self.mesh.ind_rr_star_v4f4]
n = X[self._Nanet-3*num:self._Nanet].reshape(-1,3,order='F')
d = self._Ndgeo-3*num
#on = X[d:d+3*num].reshape(-1,3,order='F')
elif self.get_weight('GGAnet') or self.get_weight('AGGnet'):
d = self._Ndgeo-9*num
n = X[d:d+3*num].reshape(-1,3,order='F')
on1 = X[d+3*num:d+6*num].reshape(-1,3,order='F')
on2 = X[d+6*num:d+9*num].reshape(-1,3,order='F')
elif self.get_weight('AAGGnet') or self.get_weight('GGAAnet'):
v = v[self.mesh.ind_rr_star_v4f4]
n = X[self._Nanet-3*num:self._Nanet].reshape(-1,3,order='F')
d = self._Ndgeo-6*num
on1 = X[d:d+3*num].reshape(-1,3,order='F')
on2 = X[d+3*num:d+6*num].reshape(-1,3,order='F')
elif self.get_weight('Anet'):
v = v[self.mesh.ind_rr_star_v4f4]
n = X[self._Nanet-3*num:self._Nanet].reshape(-1,3,order='F')
elif self.get_weight('AGnet'):
if False:
Nag = self._Nag
arr3 = np.arange(3*num)
if self.opt_AG_const_rii or self.opt_AG_const_r0:
if self.opt_AG_const_rii:
#k = len(igeo)
#c_ri = Nag-k+np.arange(k)
pass
#c_srfN = Nag-6*num+arr3-k
#c_ogN = Nag-4*num+arr3-k
elif self.opt_AG_const_r0:
#c_r = Nag-1
c_srfN = Nag-6*num+arr3-1
#c_ogN = Nag-4*num+arr3-1
else:
c_srfN = Nag-6*num+arr3
#c_ogN = Nag-3*num+arr3
n = X[c_srfN].reshape(-1,3,order='F')
#on = X[c_ogN].reshape(-1,3,order='F')
elif False:
ie1 = self._N5-12*num+np.arange(3*num)
ue1 = X[ie1].reshape(-1,3,order='F')
ue2 = X[ie1+3*num].reshape(-1,3,order='F')
ue3 = X[ie1+6*num].reshape(-1,3,order='F')
ue4 = X[ie1+9*num].reshape(-1,3,order='F')
#try:
if self.is_AG_or_GA:
n = ue2+ue4
else:
n = ue1+ue3
n = n / np.linalg.norm(n,axis=1)[:,None]
# except:
# t1,t2 = ue1-ue3,ue2-ue4
# n = np.cross(t1,t2)
# n = n / np.linalg.norm(n,axis=1)[:,None]
v = v[self.mesh.ind_rr_star_v4f4]
else:
c_srfN = self._Nag-3*num+np.arange(3*num)
n = X[c_srfN].reshape(-1,3,order='F')
if is_n:
n = n / np.linalg.norm(n,axis=1)[:,None]
alln = self.mesh.vertex_normals()
n0 = alln[v]
j = np.where(np.einsum('ij,ij->i',n0,n)<0)[0]
n[j] = -n[j]
return V[v],n
elif is_on:
on1 = on1 / np.linalg.norm(on1,axis=1)[:,None]
on2 = on2 / np.linalg.norm(on2,axis=1)[:,None]
return an,on1,on2
elif is_all_n:
alln = self.mesh.vertex_normals()
n0 = alln[v]
j = np.where(np.einsum('ij,ij->i',n0,n)<0)[0]
n[j] = -n[j]
alln[v] = n
return alln
def get_agnet_normal(self,is_biN=False):
V = self.mesh.vertices
v,v1,v2,v3,v4 = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4].T
an = V[v]
if is_biN:
"AGnet: Asy(v1-v-v3), Geo(v2-v-v4), binormal of geodesic crv"
if self.is_AG_or_GA:
eb = (V[v2]-V[v])#/np.linalg.norm(V[v2]-V[v],axis=1)[:,None]
ed = (V[v4]-V[v])#/np.linalg.norm(V[v4]-V[v],axis=1)[:,None]
else:
eb = (V[v1]-V[v])#/np.linalg.norm(V[v1]-V[v],axis=1)[:,None]
ed = (V[v3]-V[v])#/np.linalg.norm(V[v3]-V[v],axis=1)[:,None]
n = np.cross(eb,ed)
i = np.where(np.linalg.norm(n,axis=1)==0)[0]
if len(i)!=0:
n[i]=np.zeros(3)
else:
n = n / np.linalg.norm(n,axis=1)[:,None]
return an, n
if False:
_,_,lt1,lt2 = self.mesh.get_net_osculating_tangents()
n = np.cross(lt1[1],lt2[1])
n = n / np.linalg.norm(n,axis=1)[:,None]
else:
_,_,ut1,ut2,_,_ = self.mesh.get_v4_unit_tangents(False,True)
n = np.cross(ut1,ut2)
n = n / np.linalg.norm(n,axis=1)[:,None]
return an, n
def index_of_strip_along_polyline(self):
"ver_poly_strip1: 2-dim list with different length, at least 2"
w3 = self.get_weight('AAGnet')
w4 = self.get_weight('AAGGnet')
diag = True if w3 or w4 else False
d = self.set_another_polyline
if diag:
iall,iind,_,_ = self.mesh.get_diagonal_vertex_list(5,d) # interval is random
else:
iall,iind,_,_ = self.mesh.get_isoline_vertex_list(5,d) # updated, need to check
self._ver_poly_strip1 = [iall,iind]
def index_of_mesh_polylines(self):
"index_of_strip_along_polyline without two bdry vts, this include full"
if self.is_singular:
self._ver_poly_strip1,_,_ = quadmesh_with_1singularity(self.mesh)
else:
"ver_poly_strip1,ver_poly_strip2"
iall = self.mesh.get_both_isopolyline(diagpoly=self.switch_diagmeth,
is_one_or_another=self.set_another_polyline)
self._ver_poly_strip1 = iall
iall = self.mesh.get_both_isopolyline(diagpoly=self.switch_diagmeth,
is_one_or_another=not self.set_another_polyline)
self._ver_poly_strip2 = iall
def get_initial_singular_diagply_normal(self,is_init=False,AGnet=False,CCnet=False):
V = self.mesh.vertices
vl,vc,vr = self.singular_polylist
Vl,Vc,Vr = V[vl], V[vc], V[vr]
if is_init:
on = np.cross(Vl-Vc, Vr-Vc)
return on / np.linalg.norm(on,axis=1)[:,None]
else:
if self.is_initial:
v = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4][:,0]
vN = self.mesh.vertex_normals()[v] ##approximate.
else:
if AGnet:
#num = self.mesh.num_regular
num = len(self.mesh.ind_rr_star_v4f4)
arr = self._Nanet-3*num+np.arange(3*num)
vN = self.X[arr].reshape(-1,3,order='F')
elif CCnet:
num1 = len(self.mesh.ind_rr_star_v4f4)
num2 = len(self.ind_rr_vertex)
arr = self._Ncd-3*num1-8*num2+np.arange(3*num1)
vN = self.X[arr].reshape(-1,3,order='F')
Nc = vN[self.ind_rr_vertex]
return Nc,Vl,Vc,Vr
def get_poly_strip_normal(self,pl1=False,pl2=False):
"for planar strip: each strip 1 normal as variable, get mean n here"
V = self.mesh.vertices
if pl1:
iall = self.ver_poly_strip1
elif pl2:
iall = self.ver_poly_strip2
else:
iall = self.ver_poly_strip1[0]
n = np.array([0,0,0])
for iv in iall:
if len(iv)==2:
ni = np.array([(V[iv[1]]-V[iv[0]])[1],-(V[iv[1]]-V[iv[0]])[0],0]) # random orthogonal normal
elif len(iv)==3:
vl,v0,vr = iv[0],iv[1],iv[2]
ni = np.cross(V[vl]-V[v0],V[vr]-V[v0])
else:
vl,v0,vr = iv[:-2],iv[1:-1],iv[2:]
ni = np.cross(V[vl]-V[v0],V[vr]-V[v0])
ni = ni / np.linalg.norm(ni,axis=1)[:,None]
ni = np.mean(ni,axis=0)
ni = ni / np.linalg.norm(ni)
n = np.vstack((n,ni))
return n[1:,:]
def get_poly_strip_ruling_tangent(self):
"ruling"
V = self.mesh.vertices
iall = self.mesh.get_both_isopolyline(diagpoly=self.switch_diagmeth,is_one_or_another=True)
t = np.array([0,0,0])
for iv in iall:
ti = V[iv[1:]]-V[iv[:-1]]
ti = np.mean(ti,axis=0)
t = np.vstack((t,ti))
return t[1:,:]
def get_mesh_planar_normal_or_plane(self,pl1=False,pl2=False,pln=False,scale=None):
V = self.mesh.vertices
if pl1:
iall = self.ver_poly_strip1
elif pl2:
iall = self.ver_poly_strip2
else:
iall = self.ver_poly_strip1[0]
num = len(iall)
if not pln:
an=vn = np.array([0,0,0])
i= 0
for iv in iall:
vl,v0,vr = iv[:-2],iv[1:-1],iv[2:]
an = np.vstack((an,V[iv]))
if self.get_weight('planar_geodesic'):
nx = self.X[self._Ndgeopc-3*num+i]
ny = self.X[self._Ndgeopc-2*num+i]
nz = self.X[self._Ndgeopc-1*num+i]
ni = np.tile(np.array([nx,ny,nz]),len(iv)).reshape(-1,3)
elif self.get_weight('planar_plys'):
nx = self.X[self._Npp-3*num+i]
ny = self.X[self._Npp-2*num+i]
nz = self.X[self._Npp-1*num+i]
ni = np.tile(np.array([nx,ny,nz]),len(iv)).reshape(-1,3)
else:
"len(an)=len(ni)=len(iv)-2"
an = np.vstack((an,V[v0]))
ni = np.cross(V[vl]-V[v0],V[vr]-V[v0])
ni = ni / np.linalg.norm(ni,axis=1)[:,None]
vn = np.vstack((vn,ni))
i+= 1
return an[1:,:],vn[1:,:]
else:
"planar strip passing through ply-vertices with above uninormal"
P1=P2=P3=P4 = np.array([0,0,0])
i= 0
for iv in iall:
vl,vr = iv[:-1],iv[1:]
vec = V[vr]-V[vl]
vec = np.vstack((vec,vec[-1])) #len=len(iv)
if scale is None:
scale = np.mean(np.linalg.norm(vec,axis=1)) * 0.1
if self.get_weight('planar_geodesic'):
nx = self.X[self._Ndgeopc-3*num+i]
ny = self.X[self._Ndgeopc-2*num+i]
nz = self.X[self._Ndgeopc-1*num+i]
oni = np.array([nx,ny,nz])
Ni = np.cross(vec,oni)
elif self.get_weight('planar_plys'):
nx = self.X[self._Npp-3*num+i]
ny = self.X[self._Npp-2*num+i]
nz = self.X[self._Npp-1*num+i]
oni = np.array([nx,ny,nz])
Ni = np.cross(vec,oni)
else:
il,i0,ir = iv[:-2],iv[1:-1],iv[2:]
oni = np.cross(V[il]-V[i0],V[ir]-V[i0])
oni = np.vstack((oni[0],oni,oni[-1])) #len=len(iv)
oni = oni / np.linalg.norm(oni,axis=1)[:,None]
Ni = np.cross(vec,oni)
uNi = Ni / np.linalg.norm(Ni,axis=1)[:,None] * scale
i+= 1
P1,P2 = np.vstack((P1,V[vl]-uNi[:-1])),np.vstack((P2,V[vr]-uNi[1:]))
P4,P3 = np.vstack((P4,V[vl]+uNi[:-1])),np.vstack((P3,V[vr]+uNi[1:]))
pm = self.mesh.make_quad_mesh_pieces(P1[1:],P2[1:],P3[1:],P4[1:])
return pm
def get_singularmesh_diagpoly(self,is_poly=False,is_rr_vertex=True):
plylist,vlr,vlcr = quadmesh_with_1singularity(self.mesh)
self._singular_polylist = vlcr ##==[vl,vc,vr]
##### AAG-SINGULAR / CG / CA project:
if is_rr_vertex:
rrv = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4][:,0]
else: ##no use now
rrv = self.mesh.ver_regular
ind = []
for i in range(len(vlcr[1])):
ck = np.where(rrv==vlcr[1][i])[0]
ind.append(ck[0])
self._ind_rr_vertex = np.array(ind,dtype=int)
if is_poly:
Vl,Vr = self.mesh.vertices[vlr[0]], self.mesh.vertices[vlr[1]]
return self.mesh.make_polyline_from_endpoints(Vl,Vr)
def get_nonsymmetric_edge_ratio(self,diagnet=False):
"""each quadface, oriented edge1,edge2
l1 > l2 or l1<l2<==> (l1-l2)^2 = s^2 + eps"""
if diagnet:
pass
else:
"suppose edge1: v1v2; edge2: v1v4"
V = self.mesh.vertices
if self.is_singular:
v1,v2,_,v4 = quadmesh_with_1singularity(self.mesh,False,True)
else:
v1,v2,_,v4 = self.mesh.rr_quadface.T # in odrder
mean1 = np.mean(np.linalg.norm(V[v4]-V[v1],axis=1))
mean2 = np.mean(np.linalg.norm(V[v2]-V[v1],axis=1))
print(mean1,mean2)
print('%.2g'%(mean1-mean2)**2)
self.ind_nonsym_v124 = [v1,v4,v2]
il1 = self.mesh.edge_from_connected_vertices(v1,v4)
il2 = self.mesh.edge_from_connected_vertices(v1,v2)
self.ind_nonsym_l12 = [il1,il2]
allv1, allv2 = self.mesh.edge_vertices()
eL = np.linalg.norm(V[allv2]-V[allv1],axis=1)
"(l1-l2)^2 = s^2 + eps"
s = np.zeros(len(il1)) ## len(il1)=len(il2)=len(s)
ind = np.where((eL[il1]-eL[il2])**2-self.nonsym_eps>0)[0]
s[ind] = np.sqrt((eL[il1][ind]-eL[il2][ind])**2-self.nonsym_eps)
print('%.2g'% np.mean((eL[il1]-eL[il2])**2))
return eL,s
def get_conjugate_diagonal_net(self,normal=True):
"CCD-net vertex normal"
V = self.mesh.vertices
v,v1,v2,v3,v4 = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4].T
num = len(v)
if self.get_weight('diag_1_asymptotic'):
arr = self._Ncd-3*num+np.arange(3*num)
n = self.X[arr].reshape(-1,3,order='F')
elif self.get_weight('diag_1_geodesic'):
if self.is_singular:
arr = self._Ncd-3*num-8*len(self.ind_rr_vertex)+np.arange(3*num)
else:
arr = self._Ncd-11*num+np.arange(3*num)
n = self.X[arr].reshape(-1,3,order='F')
else:
n = np.cross(V[v3]-V[v1], V[v4]-V[v2])
if normal:
return V[v], n
# -------------------------------------------------------------------------
# Build
# -------------------------------------------------------------------------
def build_iterative_constraints(self):
self.build_added_weight() # Hui change
H, r = self.mesh.iterative_constraints(**self.weights) ##NOTE: in gridshell.py
self.add_iterative_constraint(H, r, 'mesh_iterative')
if self.get_weight('planarity'):
H,r = con_planarity_constraints(**self.weights)
self.add_iterative_constraint(H, r, 'planarity')
if self.get_weight('AAGnet') or self.get_weight('GAAnet') \
or self.get_weight('GGAnet')or self.get_weight('AGGnet') \
or self.get_weight('AAGGnet')or self.get_weight('GGAAnet') :
"agnet_liouville, planar_geodesic"
if self.get_weight('planar_geodesic'):
strip = self.ver_poly_strip1
else:
strip = None
if self.weight_checker<1:
idck = self.mesh.ind_ck_tian_rr_vertex,#self.mesh.ind_ck_rr_vertex,
else:
idck = None
H,r = con_anet_geodesic(strip,self.set_another_polyline,
checker_weight=self.weight_checker,
id_checker=idck, #self.mesh.ind_ck_tian_rr_vertex,#self.mesh.ind_ck_rr_vertex,
**self.weights)
self.add_iterative_constraint(H, r, 'AG-web')
if self.get_weight('ruling'):
H,r = con_polyline_ruling(switch_diagmeth=False,
**self.weights)
self.add_iterative_constraint(H, r, 'ruling')
if self.get_weight('oscu_tangent'):
if self.get_weight('GAAnet') or self.get_weight('AGGnet') or self.get_weight('GGAAnet'):
diag=True
is_orthonet = True
else:
diag=False
is_orthonet = True
H,r = con_osculating_tangents(diag,is_ortho=is_orthonet,**self.weights)
self.add_iterative_constraint(H, r, 'oscu_tangent')
if self.get_weight('GOnet'):
d = True if self.is_AG_or_GA else False
H,r = con_gonet(rregular=True,is_direction24=d,**self.weights)
self.add_iterative_constraint(H, r, 'GOnet')
if self.get_weight('AGnet'):
H,r = con_AGnet(self.is_AG_or_GA,self.opt_AG_ortho,
self.opt_AG_const_rii,self.opt_AG_const_r0,
**self.weights)
self.add_iterative_constraint(H, r, 'AGnet')
###-------partially shared-used codes:---------------------------------
if self.get_weight('unit_edge'):
H,r = con_unit_edge(rregular=True,**self.weights)
self.add_iterative_constraint(H, r, 'unit_edge')
elif self.get_weight('unit_diag_edge'):
H,r = con_unit_edge(rregular=True,**self.weights)
self.add_iterative_constraint(H, r, 'unit_diag_edge')
if self.get_weight('fix_point'):
ind,Vf = self.ind_fixed_point, self.fixed_value
H,r = con_fix_vertices(ind, Vf,**self.weights)
self.add_iterative_constraint(H,r, 'fix_point')
if self.get_weight('boundary_glide'):
"the whole boundary"
refPoly = self.glide_reference_polyline
glideInd = self.mesh.boundary_curves(corner_split=False)[0]
w = self.get_weight('boundary_glide')
H,r = con_alignment(w, refPoly, glideInd,**self.weights)
self.add_iterative_constraint(H, r, 'boundary_glide')
elif self.get_weight('i_boundary_glide'):
"the i-th boundary"
refPoly = self.i_glide_bdry_crv
glideInd = self.i_glide_bdry_ver
if len(glideInd)!=0:
w = self.get_weight('i_boundary_glide')
H,r = con_alignments(w, refPoly, glideInd,**self.weights)
self.add_iterative_constraint(H, r, 'iboundary_glide')
if self.get_weight('orthogonal'):
#H,r = con_orthogonal(**self.weights)
H,r = con_orthogonal_midline(**self.weights)
self.add_iterative_constraint(H, r, 'orthogonal')
if self.get_weight('isogonal'): # todo: mayhas problem of unit-edge(rregular)
H,r = con_isogonal(np.cos(self.angle/180.0*np.pi),
assign=self.if_angle,**self.weights)
self.add_iterative_constraint(H, r, 'isogonal')
elif self.get_weight('isogonal_diagnet'):
H,r = con_isogonal_diagnet( | np.cos(self.angle/180.0*np.pi) | numpy.cos |
# Author: <NAME>
# email: <EMAIL>
import os, sys, numpy as np, pytest
from PIL import Image
import init_paths
from type_check import isimsize, isimage_dimension, iscolorimage_dimension, isgrayimage_dimension, isuintimage, isfloatimage, isnpimage, ispilimage, isimage
def test_isimsize():
input_test = np.zeros((100, 100), dtype='uint8')
input_test = input_test.shape
assert isimsize(input_test)
input_test = [100, 200]
assert isimsize(input_test)
input_test = (100, 200)
assert isimsize(input_test)
input_test = np.array([100, 200])
assert isimsize(input_test)
input_test = np.zeros((100, 100, 3), dtype='float32')
input_test = input_test.shape
assert isimsize(input_test) is False
input_test = [100, 200, 3]
assert isimsize(input_test) is False
input_test = (100, 200, 3)
assert isimsize(input_test) is False
def test_ispilimage():
input_test = Image.fromarray(np.zeros((100, 100, 3), dtype='uint8'))
assert ispilimage(input_test)
input_test = Image.fromarray(np.zeros((100, 100), dtype='uint8'))
assert ispilimage(input_test)
input_test = np.zeros((100, 100), dtype='uint8')
assert ispilimage(input_test) is False
input_test = np.zeros((100, 100), dtype='float32')
assert ispilimage(input_test) is False
def test_iscolorimage_dimension():
input_test = np.zeros((100, 100, 4), dtype='uint8')
assert iscolorimage_dimension(input_test)
input_test = np.zeros((100, 100, 3), dtype='uint8')
assert iscolorimage_dimension(input_test)
input_test = np.zeros((100, 100, 4), dtype='float32')
assert iscolorimage_dimension(input_test)
input_test = np.zeros((100, 100, 3), dtype='float64')
assert iscolorimage_dimension(input_test)
input_test = Image.fromarray(np.zeros((100, 100, 3), dtype='uint8'))
assert iscolorimage_dimension(input_test)
input_test = Image.fromarray( | np.zeros((100, 100), dtype='uint8') | numpy.zeros |
"""
Script goal, to produce trends in netcdf files
This script can also be used in P03 if required
"""
#==============================================================================
__title__ = "Global Vegetation Trends"
__author__ = "<NAME>"
__version__ = "v1.0(28.03.2019)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import argparse
import datetime as dt
from collections import OrderedDict
import warnings as warn
from scipy import stats
import xarray as xr
from numba import jit
import bottleneck as bn
import scipy as sp
import glob
# from netCDF4 import Dataset, num2date, date2num
# from scipy import stats
# import statsmodels.stats.multitest as smsM
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cpf
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# Import debugging packages
import ipdb
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
#==============================================================================
def main():
# ========== Set up the params ==========
arraysize = 10000 # size of the area to test
mat = 40.0 # how long before a forest reaches maturity
germ = 10.0 # how long before a burnt site can germinate
burnfrac = 0.10 # how much burns
# burnfrac = BurntAreaFraction(year=2016)/2
# nburnfrac = 0.0 # how much burns in other years
nburnfrac = 0.02 # how much burns in other years
# nburnfrac = BurntAreaFraction(year=2018)/2.0 # how much burns in other years
# nburnfrac = np.mean([BurntAreaFraction(year=int(yr)) for yr in [2015, 2017, 2018]]) # how much burns in other years
firefreqL = [25, 20, 15, 11, 5, 4, 3, 2, 1] # how often the fires happen
years = 200 # number of years to loop over
RFfrac = 0.04 # The fraction that will fail to recuit after a fire
iterations = 100
# ========== Create empty lists to hold the variables ==========
obsMA = OrderedDict()
obsMF = OrderedDict()
obsGF = OrderedDict()
obsSF = OrderedDict()
obsMAstd = OrderedDict()
obsMFstd = OrderedDict()
obsGFstd = OrderedDict()
obsSFstd = OrderedDict()
# ========== Loop over the fire frequency list ==========
for firefreq in firefreqL:
print("Testing with a %d year fire frequency" % firefreq)
# ************VECTORISE THIS LOOP *********
iymean = []
ifmat = []
ifgerm = []
ifsap = []
for it in np.arange(iterations):
print("Iteration %d of %d" % (it, iterations))
# ========== Make an array ==========
array = np.zeros( arraysize)
rucfail = np.ones( arraysize)
index = np.arange( arraysize)
# ========== Make the entire array mature forest ==========
array[:] = mat
# ========== Create the empty arrays ==========
ymean = []
fmat = []
fgerm = []
fsap = []
rfhold = 0 #the left over fraction of RF
# ========== start the loop ==========
# ************VECTORISE THIS LOOP *********
for year in range(0, years):
# Loop over every year in case i want to add major fire events
# print(year)
if year % firefreq == 0:
# FIre year
array, rucfail, rfhold = firetime(array, index, mat, germ, burnfrac, rucfail, RFfrac, rfhold)
else:
# non fire year
array, rucfail, rfhold = firetime(array, index, mat, germ, nburnfrac, rucfail, RFfrac, rfhold)
# Mean years
ymean.append(np.mean(array))
# Fraction of mature forest\
fmat.append(np.sum(array>=mat)/float(arraysize))
# Fraction of germinating forest
fsap.append(np.sum(np.logical_and((array>germ), (array<mat)))/float(np.sum((array>germ))))
# Fraction of germinating forest
fgerm.append(np.sum(array>germ)/float(arraysize))
# if year>60 and firefreq == 1:
iymean.append( | np.array(ymean) | numpy.array |
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
import itertools
import csv
import gensim
import re
import nltk.data
import tensorflow
from nltk.tokenize import WordPunctTokenizer
from collections import Counter
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import LSTM, Merge
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from IPython.display import SVG, display
from keras.utils.visualize_util import plot, to_graph
# from keras import backend as K
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def message_to_wordlist(message, lemmas_bool, remove_stopwords=False):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove HTML
#review_text = BeautifulSoup(review).get_text()
#
# 2. Remove messages numbers
message_text = re.sub(">>\d+","", message)
message_text = message_text.lower()
message_text = re.sub(u"ё", 'e', message_text, re.UNICODE)
message_text = clean_str(message_text)
tokenizer = WordPunctTokenizer()
# 3. Convert words to lower case and split them
words = tokenizer.tokenize(message_text)
lemmas = []
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
if lemmas_bool == 'l':
for word in words:
word_parsed = morph.parse(word)
if len(word_parsed) > 0:
lemmas.append(word_parsed[0].normal_form)
elif lemmas_bool == 's':
for word in words:
word = stemmer.stem(word)
if len(word) > 0:
lemmas.append(word)
else:
lemmas = words
# 5. Return a list of words
return(lemmas)
#return(words)
# Define a function to split a message into parsed sentences
def message_to_sentences( message, tokenizer, lemmas_bool, remove_stopwords=False):
sentences = []
# Function to split a message into parsed sentences. Returns a
# list of sentences, where each sentence is a list of words
#
# 1. Use the NLTK tokenizer to split the paragraph into sentences
if type(message) == str:
message = message.decode('utf-8')
raw_sentences = tokenizer.tokenize(message.strip())
#
# 2. Loop over each sentence
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call message_to_wordlist to get a list of words
sentences += message_to_wordlist( raw_sentence,lemmas_bool, remove_stopwords)
return sentences
def pad_sentences(sentences, padding_word="<PAD/>"):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
new_labels = []
for label in labels:
if label == 1:
new_labels.append([1,0])
else:
new_labels.append([0,1])
labels = new_labels
y = np.array(labels)
return [x, y]
def load_data():
messages = pd.read_csv( 'aggression.csv', header=0,
delimiter="\t", quoting = csv.QUOTE_MINIMAL )
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
labels = messages[:]['Aggression']
messages = messages[:]['Text']
messages = [message_to_sentences(message, tokenizer, '') for message in messages]
pos_data = [nltk.pos_tag(message) for message in messages]
tags = []
for sent in pos_data:
sent_tags = []
for word in sent:
sent_tags.append(word[1])
tags.append(sent_tags)
messages = pad_sentences(messages) # turn to the same length
tags = pad_sentences(tags)
vocabulary, vocabulary_inv = build_vocab(messages)
vocabulary_pos, vocabulary_inv_pos = build_vocab(tags)
x_pos = np.array([[vocabulary_pos[word] for word in sentence] for sentence in tags])
x, y = build_input_data(messages, labels, vocabulary)
return [x, y, vocabulary, vocabulary_inv, vocabulary_pos, vocabulary_inv_pos, x_pos]
np.random.seed(2)
model_variation = 'CNN-non-static' # CNN-rand | CNN-non-static | CNN-static
print('Model variation is %s' % model_variation)
# Model Hyperparameters
sequence_length = 287
embedding_dim = 600
filter_sizes = (3, 4)
num_filters = 150
dropout_prob = (0.25, 0.5)
hidden_dims = 150
# Training parameters
batch_size = 32
num_epochs = 100
val_split = 0.1
# Word2Vec parameters, see train_word2vec
min_word_count = 4 # Minimum word count
context = 10 # Context window size
print("Loading data...")
x, y, vocabulary, vocabulary_inv, voc_pos, voc_inv_pos, x_pos = load_data()
if model_variation == 'CNN-non-static' or model_variation == 'CNN-static':
embedding_model = gensim.models.Word2Vec.load('model')
model_words = embedding_model.index2word
embedding_weights = [np.array([embedding_model[w] if w in vocabulary and w in model_words\
else | np.random.uniform(-0.25,0.25,600) | numpy.random.uniform |
import pytest
import math
import numpy as np
import autograd.numpy as adnp
from autograd import grad
import cs107_salad.Forward.salad as ad
from cs107_salad.Forward.utils import check_list, compare_dicts, compare_dicts_multi
def test_add_radd():
x = ad.Variable(3)
y = x + 3
assert y.val == 6
assert list(y.der.values()) == np.array([1])
x = ad.Variable(3)
y = 3 + x
assert y.val == 6
assert list(y.der.values()) == np.array([1])
x = ad.Variable(3, {"x": 1})
y = ad.Variable(3, {"y": 1})
z = x + y
assert z.val == 6
assert z.der == {"x": 1, "y": 1}
x = ad.Variable(np.ones((5, 5)), label="x")
y = ad.Variable(np.ones((5, 5)), label="y")
z = x + y
assert np.array_equal(z.val, 2 * np.ones((5, 5)))
np.testing.assert_equal(z.der, {"x": np.ones((5, 5)), "y": np.ones((5, 5))})
z = x + x + y + y + 2
assert np.array_equal(z.val, 4 * np.ones((5, 5)) + 2)
np.testing.assert_equal(z.der, {"x": 2 * np.ones((5, 5)), "y": 2 * np.ones((5, 5))})
def test_sub_rsub():
x = ad.Variable(3)
y = x - 3
assert y.val == 0
assert list(y.der.values()) == np.array([1])
x = ad.Variable(3)
y = 3 - x
assert y.val == 0
assert list(y.der.values()) == np.array([-1])
x = ad.Variable(3, {"x": 1})
y = ad.Variable(3, {"y": 1})
z = x - y
assert z.val == 0
assert z.der == {"x": 1, "y": -1}
x = ad.Variable(np.ones((5, 5)), label="x")
y = ad.Variable(np.ones((5, 5)), label="y")
z = x - y
assert np.array_equal(z.val, np.zeros((5, 5)))
np.testing.assert_equal(z.der, {"x": np.ones((5, 5)), "y": -1 * np.ones((5, 5))})
z = x + x - y - y + 2
assert np.array_equal(z.val, 2 * np.ones((5, 5)))
np.testing.assert_equal(
z.der, {"x": 2 * np.ones((5, 5)), "y": -2 * np.ones((5, 5))}
)
def test_mul_rmul():
x = ad.Variable(3, label="x")
y = x * 2
assert y.val == 6
assert y.der == {"x": 2}
# y = 5x + x^2
y = x * 2 + 3 * x + x * x
assert y.val == 24
assert y.der == {"x": 11}
x = ad.Variable(3, label="x")
y = ad.Variable(2, label="y")
z = x * y
assert z.val == 6
assert z.der == {"x": 2, "y": 3}
z = 3 * z * 3
assert z.val == 54
assert z.der == {"x": 18, "y": 27}
x = ad.Variable(3, label="x")
y = ad.Variable(2, label="y")
z = x * y
z = y * z # y^2*x
assert z.val == 12
assert z.der == {"x": y.val ** 2, "y": 2 * y.val * x.val}
x = ad.Variable(2 * np.ones((5, 5)), label="x")
y = ad.Variable(3 * np.ones((5, 5)), label="y")
z = x * y
assert np.array_equal(z.val, 2 * 3 * np.ones((5, 5)))
np.testing.assert_equal(z.der, {"x": 3 * np.ones((5, 5)), "y": 2 * np.ones((5, 5))})
z = -1 * z * x # f = -(x^2) * y, dx = -2xy, dy = -x^2
assert np.array_equal(z.val, -12 * np.ones((5, 5)))
np.testing.assert_equal(
z.der, {"x": -2 * 2 * 3 * np.ones((5, 5)), "y": -1 * 2 * 2 * np.ones((5, 5))}
)
def test_truediv_rtruediv():
x = ad.Variable(3, label="x")
y = x / 2
assert y.val == 1.5
assert y.der == {"x": 1 / 2}
y = x / 2 + 3 / x + x / x
assert y.val == 3.5
assert y.der == {"x": 0.5 - 3 / 9}
x = ad.Variable(3, label="x")
y = ad.Variable(2, label="y")
z = x / y
assert z.val == 3 / 2
assert z.der == {"x": 1 / 2, "y": -3 / 4} # dx = 1/y, dy = -x/y^2
z = 2.4 / z / x / 8 # 2.4/(x/y)/x/8
assert z.val == 2.4 / (3 / 2) / 3 / 8
## Using this function because of rounding errors
assert compare_dicts(
z.der, {"x": (-0.6 * y.val) / (x.val ** 3), "y": (0.3 / (x.val ** 2))}
) # dx = -.6y/x^3 , dy = .3/x^2
x = ad.Variable(2 * np.ones((5, 5)), label="x")
y = ad.Variable(3 * np.ones((5, 5)), label="y")
z = x / y
assert np.array_equal(z.val, 2 / 3 * np.ones((5, 5)))
np.testing.assert_equal(z.der, {"x": 1 / y.val, "y": -1 * x.val / (y.val ** 2)})
z = -1 / z / x
assert np.array_equal(z.val, -1 / (2 / 3) / 2 * np.ones((5, 5)))
np.testing.assert_equal(
z.der, {"x": 2 * y.val / (x.val ** 3), "y": -1 / (x.val ** 2)}
)
def test_exp():
x = 3
ans = ad.exp(x)
sol = np.exp(x)
assert sol == ans
x = ad.Variable(3, label="x")
y = ad.exp(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = np.exp(3), np.exp(3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x")
ans_val, ans_der = ad.exp(x).val, ad.exp(x).der["x"]
sol_val, sol_der = np.exp(3), np.exp(3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + 3
y = ad.exp(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = np.exp(6), np.exp(6)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + ad.Variable(4, label="y")
y = ad.exp(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = np.exp(7), [np.exp(7), np.exp(7)]
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.exp(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[np.exp(3), np.exp(4), np.exp(5)],
[np.exp(3), np.exp(4), np.exp(5),],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.exp(x) + ad.exp(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[2 * np.exp(3), 2 * np.exp(4), 2 * np.exp(5)],
[2 * np.exp(3), 2 * np.exp(4), 2 * np.exp(5),],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
z = x + x
y = ad.exp(z)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[np.exp(2 * 3), np.exp(2 * 4), np.exp(2 * 5)],
[2 * np.exp(2 * 3), 2 * np.exp(2 * 4), 2 * np.exp(2 * 5),],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.Variable([6, 6, 6], label="y")
y = ad.exp(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[np.exp(9), np.exp(10), np.exp(11)],
[
grad(lambda x, y: adnp.exp(x + y), 0)(3.0, 6.0),
grad(lambda x, y: adnp.exp(x + y), 0)(4.0, 6.0),
grad(lambda x, y: adnp.exp(x + y), 0)(5.0, 6.0),
],
[
grad(lambda x, y: adnp.exp(x + y), 1)(3.0, 6.0),
grad(lambda x, y: adnp.exp(x + y), 1)(4.0, 6.0),
grad(lambda x, y: adnp.exp(x + y), 1)(5.0, 6.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_ln():
x = 3
ans = ad.ln(x)
sol = adnp.log(x)
assert sol == ans
x = ad.Variable(3, label="x")
y = ad.ln(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.log(3), grad(adnp.log)(3.0)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + 3
y = ad.ln(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.log(6), grad(lambda x: adnp.log(x + 3.0))(3.0)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + ad.Variable(4, label="y")
y = ad.ln(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.log(7),
[
grad(lambda x, y: adnp.log(x + y), 0)(3.0, 4.0),
grad(lambda x, y: adnp.log(x + y), 1)(3.0, 4.0),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.ln(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[np.log(3), np.log(4), np.log(5)],
[
grad(lambda x: adnp.log(x))(3.0),
grad(lambda x: adnp.log(x))(4.0),
grad(lambda x: adnp.log(x))(5.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.ln(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.log(3 * 2), adnp.log(4 * 2), adnp.log(5 * 2)],
[
grad(lambda x: adnp.log(x + x))(3.0),
grad(lambda x: adnp.log(x + x))(4.0),
grad(lambda x: adnp.log(x + x))(5.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.Variable([6, 6, 6], label="y")
y = ad.ln(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[np.log(9), np.log(10), np.log(11)],
[
grad(lambda x, y: adnp.log(x + y), 0)(3.0, 6.0),
grad(lambda x, y: adnp.log(x + y), 0)(4.0, 6.0),
grad(lambda x, y: adnp.log(x + y), 0)(5.0, 6.0),
],
[
grad(lambda x, y: adnp.log(x + y), 1)(3.0, 6.0),
grad(lambda x, y: adnp.log(x + y), 1)(4.0, 6.0),
grad(lambda x, y: adnp.log(x + y), 1)(5.0, 6.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_logistic():
def logistic(x):
return 1 / (1 + adnp.exp(-x))
x = 3
ans = ad.logistic(x)
sol = logistic(x)
assert sol == ans
x = ad.Variable(3, label="x")
y = ad.logistic(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = logistic(3), grad(logistic)(3.0)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + 3
y = ad.logistic(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = logistic(6), grad(lambda x: logistic(x + 3.0))(3.0)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + ad.Variable(4, label="y")
y = ad.logistic(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
logistic(7),
[
grad(lambda x, y: logistic(x + y), 0)(3.0, 4.0),
grad(lambda x, y: logistic(x + y), 1)(3.0, 4.0),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.logistic(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[logistic(3), logistic(4), logistic(5)],
[
grad(lambda x: logistic(x))(3.0),
grad(lambda x: logistic(x))(4.0),
grad(lambda x: logistic(x))(5.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.logistic(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[logistic(3 * 2), logistic(4 * 2), logistic(5 * 2)],
[
grad(lambda x: logistic(x + x))(3.0),
grad(lambda x: logistic(x + x))(4.0),
grad(lambda x: logistic(x + x))(5.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.Variable([6, 6, 6], label="y")
y = ad.logistic(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[logistic(9), logistic(10), logistic(11)],
[
grad(lambda x, y: logistic(x + y), 0)(3.0, 6.0),
grad(lambda x, y: logistic(x + y), 0)(4.0, 6.0),
grad(lambda x, y: logistic(x + y), 0)(5.0, 6.0),
],
[
grad(lambda x, y: logistic(x + y), 1)(3.0, 6.0),
grad(lambda x, y: logistic(x + y), 1)(4.0, 6.0),
grad(lambda x, y: logistic(x + y), 1)(5.0, 6.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_log10():
def log10(x):
return adnp.log(x) / adnp.log(10)
x = 3
ans = ad.log10(x)
sol = log10(x)
assert sol == ans
x = ad.Variable(3, label="x")
y = ad.log10(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = log10(3), grad(log10)(3.0)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + 3
y = ad.log10(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = log10(6), grad(lambda x: log10(x + 3.0))(3.0)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(3, label="x") + ad.Variable(4, label="y")
y = ad.log10(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
log10(7),
[
grad(lambda x, y: log10(x + y), 0)(3.0, 4.0),
grad(lambda x, y: log10(x + y), 1)(3.0, 4.0),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.log10(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[log10(3), log10(4), log10(5)],
[
grad(lambda x: log10(x))(3.0),
grad(lambda x: log10(x))(4.0),
grad(lambda x: log10(x))(5.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.log10(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[log10(3 * 2), log10(4 * 2), log10(5 * 2)],
[
grad(lambda x: log10(x + x))(3.0),
grad(lambda x: log10(x + x))(4.0),
grad(lambda x: log10(x + x))(5.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([3, 4, 5], label="x")
y = ad.Variable([6, 6, 6], label="y")
y = ad.log10(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[log10(9), log10(10), log10(11)],
[
grad(lambda x, y: log10(x + y), 0)(3.0, 6.0),
grad(lambda x, y: log10(x + y), 0)(4.0, 6.0),
grad(lambda x, y: log10(x + y), 0)(5.0, 6.0),
],
[
grad(lambda x, y: log10(x + y), 1)(3.0, 6.0),
grad(lambda x, y: log10(x + y), 1)(4.0, 6.0),
grad(lambda x, y: log10(x + y), 1)(5.0, 6.0),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_sin():
x = 0.3
ans = ad.sin(x)
sol = adnp.sin(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.sin(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.sin(0.3), grad(adnp.sin)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.sin(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.sin(0.6), grad(lambda x: adnp.sin(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.sin(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.sin(0.7),
[
grad(lambda x, y: adnp.sin(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.sin(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.sin(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.sin(0.3), adnp.sin(0.4), adnp.sin(0.5)],
[
grad(lambda x: adnp.sin(x))(0.3),
grad(lambda x: adnp.sin(x))(0.4),
grad(lambda x: adnp.sin(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.sin(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.sin(0.3 * 2), adnp.sin(0.4 * 2), adnp.sin(0.5 * 2)],
[
grad(lambda x: adnp.sin(x + x))(0.3),
grad(lambda x: adnp.sin(x + x))(0.4),
grad(lambda x: adnp.sin(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.sin(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.sin(0.09), adnp.sin(0.10), adnp.sin(0.11)],
[
grad(lambda x, y: adnp.sin(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.sin(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.sin(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.sin(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.sin(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.sin(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_arcsin():
x = 0.3
ans = ad.arcsin(x)
sol = adnp.arcsin(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.arcsin(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arcsin(0.3), grad(adnp.arcsin)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.arcsin(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arcsin(0.6), grad(lambda x: adnp.arcsin(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.arcsin(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.arcsin(0.7),
[
grad(lambda x, y: adnp.arcsin(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.arcsin(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arcsin(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arcsin(0.3), adnp.arcsin(0.4), adnp.arcsin(0.5)],
[
grad(lambda x: adnp.arcsin(x))(0.3),
grad(lambda x: adnp.arcsin(x))(0.4),
grad(lambda x: adnp.arcsin(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arcsin(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arcsin(0.3 * 2), adnp.arcsin(0.4 * 2), adnp.arcsin(0.5 * 2)],
[
grad(lambda x: adnp.arcsin(x + x))(0.3),
grad(lambda x: adnp.arcsin(x + x))(0.4),
grad(lambda x: adnp.arcsin(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.arcsin(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.arcsin(0.09), adnp.arcsin(0.10), adnp.arcsin(0.11)],
[
grad(lambda x, y: adnp.arcsin(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.arcsin(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.arcsin(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.arcsin(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.arcsin(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.arcsin(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
x = ad.Variable(2, label="x")
with pytest.raises(Exception):
y = ad.arcsin(x)
def test_sinh():
x = 0.3
ans = ad.sinh(x)
sol = adnp.sinh(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.sinh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.sinh(0.3), grad(adnp.sinh)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.sinh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.sinh(0.6), grad(lambda x: adnp.sinh(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.sinh(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.sinh(0.7),
[
grad(lambda x, y: adnp.sinh(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.sinh(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.sinh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.sinh(0.3), adnp.sinh(0.4), adnp.sinh(0.5)],
[
grad(lambda x: adnp.sinh(x))(0.3),
grad(lambda x: adnp.sinh(x))(0.4),
grad(lambda x: adnp.sinh(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.sinh(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.sinh(0.3 * 2), adnp.sinh(0.4 * 2), adnp.sinh(0.5 * 2)],
[
grad(lambda x: adnp.sinh(x + x))(0.3),
grad(lambda x: adnp.sinh(x + x))(0.4),
grad(lambda x: adnp.sinh(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.sinh(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.sinh(0.09), adnp.sinh(0.10), adnp.sinh(0.11)],
[
grad(lambda x, y: adnp.sinh(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.sinh(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.sinh(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.sinh(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.sinh(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.sinh(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_cos():
x = 0.3
ans = ad.cos(x)
sol = adnp.cos(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.cos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.cos(0.3), grad(adnp.cos)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.cos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.cos(0.6), grad(lambda x: adnp.cos(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.cos(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.cos(0.7),
[
grad(lambda x, y: adnp.cos(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.cos(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.cos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.cos(0.3), adnp.cos(0.4), adnp.cos(0.5)],
[
grad(lambda x: adnp.cos(x))(0.3),
grad(lambda x: adnp.cos(x))(0.4),
grad(lambda x: adnp.cos(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.cos(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.cos(0.3 * 2), adnp.cos(0.4 * 2), adnp.cos(0.5 * 2)],
[
grad(lambda x: adnp.cos(x + x))(0.3),
grad(lambda x: adnp.cos(x + x))(0.4),
grad(lambda x: adnp.cos(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.cos(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.cos(0.09), adnp.cos(0.10), adnp.cos(0.11)],
[
grad(lambda x, y: adnp.cos(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.cos(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.cos(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.cos(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.cos(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.cos(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_arccos():
x = 0.3
ans = ad.arccos(x)
sol = adnp.arccos(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.arccos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arccos(0.3), grad(adnp.arccos)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.arccos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arccos(0.6), grad(lambda x: adnp.arccos(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.arccos(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.arccos(0.7),
[
grad(lambda x, y: adnp.arccos(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.arccos(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arccos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arccos(0.3), adnp.arccos(0.4), adnp.arccos(0.5)],
[
grad(lambda x: adnp.arccos(x))(0.3),
grad(lambda x: adnp.arccos(x))(0.4),
grad(lambda x: adnp.arccos(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arccos(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arccos(0.3 * 2), adnp.arccos(0.4 * 2), adnp.arccos(0.5 * 2)],
[
grad(lambda x: adnp.arccos(x + x))(0.3),
grad(lambda x: adnp.arccos(x + x))(0.4),
grad(lambda x: adnp.arccos(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.arccos(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.arccos(0.09), adnp.arccos(0.10), adnp.arccos(0.11)],
[
grad(lambda x, y: adnp.arccos(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.arccos(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.arccos(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.arccos(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.arccos(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.arccos(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
x = ad.Variable(2, label="x")
with pytest.raises(Exception):
y = ad.arccos(x)
def test_cosh():
x = 0.3
ans = ad.cosh(x)
sol = adnp.cosh(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.cosh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.cosh(0.3), grad(adnp.cosh)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.cosh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.cosh(0.6), grad(lambda x: adnp.cosh(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.cosh(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.cosh(0.7),
[
grad(lambda x, y: adnp.cosh(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.cosh(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.cosh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.cosh(0.3), adnp.cosh(0.4), adnp.cosh(0.5)],
[
grad(lambda x: adnp.cosh(x))(0.3),
grad(lambda x: adnp.cosh(x))(0.4),
grad(lambda x: adnp.cosh(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.cosh(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.cosh(0.3 * 2), adnp.cosh(0.4 * 2), adnp.cosh(0.5 * 2)],
[
grad(lambda x: adnp.cosh(x + x))(0.3),
grad(lambda x: adnp.cosh(x + x))(0.4),
grad(lambda x: adnp.cosh(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.cosh(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.cosh(0.09), adnp.cosh(0.10), adnp.cosh(0.11)],
[
grad(lambda x, y: adnp.cosh(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.cosh(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.cosh(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.cosh(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.cosh(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.cosh(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_tan():
x = 0.3
ans = ad.tan(x)
sol = adnp.tan(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.tan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.tan(0.3), grad(adnp.tan)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.tan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.tan(0.6), grad(lambda x: adnp.tan(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.tan(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.tan(0.7),
[
grad(lambda x, y: adnp.tan(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.tan(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.tan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.tan(0.3), adnp.tan(0.4), adnp.tan(0.5)],
[
grad(lambda x: adnp.tan(x))(0.3),
grad(lambda x: adnp.tan(x))(0.4),
grad(lambda x: adnp.tan(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.tan(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.tan(0.3 * 2), adnp.tan(0.4 * 2), adnp.tan(0.5 * 2)],
[
grad(lambda x: adnp.tan(x + x))(0.3),
grad(lambda x: adnp.tan(x + x))(0.4),
grad(lambda x: adnp.tan(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.tan(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.tan(0.09), adnp.tan(0.10), adnp.tan(0.11)],
[
grad(lambda x, y: adnp.tan(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.tan(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.tan(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.tan(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.tan(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.tan(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_arctan():
x = 0.3
ans = ad.arctan(x)
sol = adnp.arctan(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.arctan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arctan(0.3), grad(adnp.arctan)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.arctan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arctan(0.6), grad(lambda x: adnp.arctan(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.arctan(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.arctan(0.7),
[
grad(lambda x, y: adnp.arctan(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.arctan(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arctan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arctan(0.3), adnp.arctan(0.4), adnp.arctan(0.5)],
[
grad(lambda x: adnp.arctan(x))(0.3),
grad(lambda x: adnp.arctan(x))(0.4),
grad(lambda x: adnp.arctan(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arctan(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arctan(0.3 * 2), adnp.arctan(0.4 * 2), adnp.arctan(0.5 * 2)],
[
grad(lambda x: adnp.arctan(x + x))(0.3),
grad(lambda x: adnp.arctan(x + x))(0.4),
grad(lambda x: adnp.arctan(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.arctan(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.arctan(0.09), adnp.arctan(0.10), adnp.arctan(0.11)],
[
grad(lambda x, y: adnp.arctan(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.arctan(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.arctan(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.arctan(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.arctan(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.arctan(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_tanh():
x = 0.3
ans = ad.tanh(x)
sol = adnp.tanh(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.tanh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.tanh(0.3), grad(adnp.tanh)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.tanh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.tanh(0.6), grad(lambda x: adnp.tanh(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.tanh(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.tanh(0.7),
[
grad(lambda x, y: adnp.tanh(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.tanh(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.tanh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.tanh(0.3), adnp.tanh(0.4), adnp.tanh(0.5)],
[
grad(lambda x: adnp.tanh(x))(0.3),
grad(lambda x: adnp.tanh(x))(0.4),
grad(lambda x: adnp.tanh(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.tanh(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.tanh(0.3 * 2), adnp.tanh(0.4 * 2), adnp.tanh(0.5 * 2)],
[
grad(lambda x: adnp.tanh(x + x))(0.3),
grad(lambda x: adnp.tanh(x + x))(0.4),
grad(lambda x: adnp.tanh(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.tanh(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.tanh(0.09), adnp.tanh(0.10), adnp.tanh(0.11)],
[
grad(lambda x, y: adnp.tanh(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.tanh(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.tanh(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.tanh(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.tanh(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.tanh(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_neg():
x = ad.Variable(3, label="x")
y = -x
assert y.val == -3
assert y.der == {"x": -1}
x = ad.Variable(3, label="x", der={"x": 2})
y = -x
assert y.val == -3
assert y.der == {"x": -2}
x = ad.Variable(np.arange(3), label="x")
y = -x
assert np.all(y.val == [0, -1, -2])
assert y.der == {"x": [-1, -1, -1]}
x = ad.Variable(0, label="x")
y = ad.Variable(3, label="y")
z = x + 2 * y
z2 = -z
assert z2.val == -6
assert z2.der == {"x": -1, "y": -2}
x = ad.Variable(np.arange(3), label="x")
y = ad.Variable(3 + np.arange(3), label="y")
z = x + 2 * y
z2 = -z
assert np.all(z2.val == [-6, -9, -12])
assert z2.der == {"x": [-1, -1, -1], "y": [-2, -2, -2]}
def test_pow():
x = ad.Variable(3, label="x")
z = x ** 2
assert z.val == 9
assert z.der == {"x": 6}
x = ad.Variable(0, label="x")
z = x ** 2
assert z.val == 0
assert z.der == {"x": 0}
x = ad.Variable([3, 2], label="x")
z = x ** 2
assert np.all(z.val == [9, 4])
assert np.all(z.der == {"x": [6, 4]})
x = ad.Variable(3, label="x")
y = ad.Variable(2, label="y")
z = x ** y
assert z.val == 9
assert z.der == {"x": 6, "y": 9 * np.log(3)}
x = ad.Variable([3, 2], label="x")
y = ad.Variable([2, 3], label="y")
z = x ** y
assert np.all(z.val == [9, 8])
assert (
compare_dicts_multi(z.der, {"x": [6, 12], "y": [9 * np.log(3), 8 * np.log(2)]})
== True
)
x = ad.Variable([np.e - 1, np.e - 1], label="x")
y = ad.Variable([1, 1], label="y")
z = x + y
z2 = z ** y
assert np.all(z2.val == [np.e, np.e])
assert compare_dicts_multi(z2.der, {"x": [1, 1], "y": [np.e + 1, np.e + 1]}) == True
x = ad.Variable([0, 0], label="x")
y = ad.Variable([1, 2], label="y")
z = x ** y
assert np.all(z.val == [0, 0])
assert compare_dicts_multi(z.der, {"x": [1, 0], "y": [0, 0]}) == True
def test_rpow():
x = ad.Variable(1, label="x")
z = np.e ** x
assert z.val == np.e
assert z.der == {"x": np.e}
x = ad.Variable(1, label="x")
z = 0 ** x
assert z.val == 0
assert z.der == {"x": 0}
x = ad.Variable([1, 2], label="x")
z = np.e ** x
assert np.all(z.val == [np.e, np.e ** 2])
assert np.all(z.der == {"x": [np.e, np.e ** 2]})
x = ad.Variable(2, label="x")
y = ad.Variable(-1, label="y")
z = np.e ** (x + 2 * y)
assert z.val == 1
assert z.der == {"x": 1, "y": 2}
x = ad.Variable([2, -2], label="x")
y = ad.Variable([-1, 1], label="y")
z = np.e ** (x + 2 * y)
assert np.all(z.val == [1, 1])
assert np.all(z.der == {"x": [1, 1], "y": [2, 2]})
def test_ne():
x = ad.Variable(1, label="x")
y = ad.Variable(1, label="y")
assert (x != x) == False
assert (x != y) == True
z1 = ad.Variable([1, 2], der={"x": [1, 2], "y": [1, 2]}, label="z1")
z2 = ad.Variable([1, 2], der={"x": [1, 2], "y": [1, 2]}, label="z2")
assert (z1 != z2) == False
z1 = ad.Variable(1, der={"x": 2, "y": 3}, label="z1")
z2 = ad.Variable(1, der={"x": 2, "y": 3}, label="z2")
assert (z1 != z2) == False
z1 = ad.Variable([1, 2], der={"x": [1, 2], "y": [1, 2]}, label="z1")
z2 = ad.Variable([1, 2], der={"x": [1, 2], "y": [1, 3]}, label="z2")
assert (z1 != z2) == True
x = ad.Variable(1, label="x")
y = ad.Variable(1, label="y")
z1 = ad.exp(x) + np.e * y
z2 = ad.exp(y) + np.e * x
assert (z1 != z2) == False
x = ad.Variable([1, 2, 3], label="x")
y = ad.Variable([2, 3], label="y")
assert (x != y) == True
z = 1
assert (x != z) == True
def test_lt():
x = ad.Variable(1, label="x")
y = ad.Variable(2, label="y")
assert (x < y) == True
x = ad.Variable([1, 2], label="x")
y = ad.Variable([2, 2], label="y")
assert np.all((x < y) == [True, False])
x = ad.Variable([1, 1, 1], label="x")
y = ad.Variable([2, 2], label="y")
with pytest.raises(Exception):
print(x < y)
def test_le():
x = ad.Variable(1, label="x")
y = ad.Variable(2, label="y")
assert (x <= y) == True
x = ad.Variable([1, 2], label="x")
y = ad.Variable([2, 2], label="y")
assert np.all((x <= y) == [True, True])
x = ad.Variable([1, 1, 1], label="x")
y = ad.Variable([2, 2], label="y")
with pytest.raises(Exception):
print(x <= y)
def test_gt():
x = ad.Variable(3, label="x")
y = ad.Variable(2, label="y")
assert (x > y) == True
x = ad.Variable([3, 2], label="x")
y = ad.Variable([2, 2], label="y")
assert np.all((x > y) == [True, False])
x = ad.Variable([1, 1, 1], label="x")
y = ad.Variable([2, 2], label="y")
with pytest.raises(Exception):
print(x > y)
def test_ge():
x = ad.Variable(3, label="x")
y = ad.Variable(2, label="y")
assert (x >= y) == True
x = ad.Variable([3, 2], label="x")
y = ad.Variable([2, 2], label="y")
assert np.all((x >= y) == [True, True])
x = ad.Variable([1, 1, 1], label="x")
y = ad.Variable([2, 2], label="y")
with pytest.raises(Exception):
print(x >= y)
def test_complicated_functions():
## Function 1
## sin(x) + cos(x) * 3*y - x^4 + ln(x*y)
x = np.random.rand(5, 4)
x_var = ad.Variable(x, label="x")
y = np.random.rand(5, 4)
y_var = ad.Variable(y, label="y")
f_ad = ad.sin(x_var) + ad.cos(x_var) * 3 * y_var - x_var ** 4 + ad.ln(x_var * y_var)
f_ad_val = f_ad.val
f_ad_grad = f_ad.der
f_np_val = np.sin(x) + np.cos(x) * 3 * y - x ** 4 + np.log(x * y)
dx = -4 * x ** 3 - 3 * y * np.sin(x) + 1 / x + np.cos(x)
dy = 3 * np.cos(x) + 1 / y
assert np.array_equal(f_ad_val, f_np_val)
assert np.array_equal(np.around(f_ad_grad["x"], 4), np.around(dx, 4))
assert np.array_equal(np.around(f_ad_grad["y"], 4), np.around(dy, 4))
## Function 2
## cos(x*y^2) + exp(x*y*3x)
x = np.random.rand(3, 8)
x_var = ad.Variable(x, label="x")
y = np.random.rand(3, 8)
y_var = ad.Variable(y, label="y")
f_ad = ad.cos(x_var * y_var ** 2) + ad.exp(x_var * y_var * 3 * x_var)
f_ad_val = f_ad.val
f_ad_grad = f_ad.der
f_np_val = np.cos(x * y ** 2) + np.exp(x * y * 3 * x)
dx = y * (6 * x * np.exp(3 * x ** 2 * y) - y * np.sin(x * y ** 2))
dy = x * (3 * x * np.exp(3 * x ** 2 * y) - 2 * y * np.sin(x * y ** 2))
assert | np.array_equal(f_ad_val, f_np_val) | numpy.array_equal |
__copyright__ = "Copyright 2016-2020, Netflix, Inc."
__license__ = "BSD+Patent"
import numpy as np
class ListStats(object):
"""
>>> test_list = [1, 2, 3, 4, 5, 11, 12, 13, 14, 15]
>>> "%0.4f" % ListStats.total_variation(test_list)
'1.5556'
>>> np.mean(test_list)
8.0
>>> np.median(test_list)
8.0
>>> ListStats.lp_norm(test_list, 1.0)
8.0
>>> "%0.4f" % ListStats.lp_norm(test_list, 3.0)
'10.5072'
>>> "%0.2f" % ListStats.perc1(test_list)
'1.09'
>>> "%0.2f" % ListStats.perc5(test_list)
'1.45'
>>> "%0.2f" % ListStats.perc10(test_list)
'1.90'
>>> "%0.2f" % ListStats.perc20(test_list)
'2.80'
>>> ListStats.nonemean([None, None, 1, 2])
1.5
>>> ListStats.nonemean([3, 4, 1, 2])
2.5
>>> ListStats.nonemean([None, None, None])
nan
>>> "%0.4f" % ListStats.harmonic_mean(test_list)
'4.5223'
>>> "%0.4f" % ListStats.lp_norm(test_list, 2.0)
'9.5394'
"""
@staticmethod
def total_variation(my_list):
abs_diff_scores = np.absolute(np.diff(my_list))
return np.mean(abs_diff_scores)
@staticmethod
def moving_average(my_list, n, type='exponential', decay=-1):
"""
compute an n period moving average.
:param my_list:
:param n:
:param type: 'simple' | 'exponential'
:param decay:
:return:
"""
x = np.asarray(my_list)
if type == 'simple':
weights = np.ones(n)
elif type == 'exponential':
weights = np.exp(np.linspace(decay, 0., n))
else:
assert False, "Unknown type: {}.".format(type)
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
@staticmethod
def harmonic_mean(my_list):
return 1.0 / np.mean(1.0 / (np.array(my_list) + 1.0)) - 1.0
@staticmethod
def lp_norm(my_list, p):
return np.power(np.mean(np.power(np.array(my_list), p)), 1.0 / p)
@staticmethod
def perc1(my_list):
return np.percentile(my_list, 1)
@staticmethod
def perc5(my_list):
return np.percentile(my_list, 5)
@staticmethod
def perc10(my_list):
return np.percentile(my_list, 10)
@staticmethod
def perc20(my_list):
return np.percentile(my_list, 20)
@staticmethod
def print_stats(my_list):
print("Min: {min}, Max: {max}, Median: {median}, Mean: {mean}," \
" Variance: {var}, Total_variation: {total_var}".format(
min=np.min(my_list), max= | np.max(my_list) | numpy.max |
"""
generate_plots_PRD_2020.py is a Python routine that can be used to generate
the plots of <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>, "Numerical simulations of gravitational waves
from early-universe turbulence," Phys. Rev. D 102, 083512 (2020),
https://arxiv.org/abs/1903.08585.
It reads the pickle run variables that can be generated by the routine
initialize_PRD_2020.py.
The function run() executes the code.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
# get working directory, where the runs and routines should be stored
dir0 = os.getcwd() + '/'
HOME = dir0 + '/..'
os.chdir(HOME)
from dirs import read_dirs as rd
import plot_sets
import run as r
import interferometry as inte
import cosmoGW
import spectra as sp
def run():
# import dictionary with the names identifying
# the runs and pointing to the corresponding directory
dirs = rd('PRD_2020_ini')
dirs = rd('PRD_2020_hel', dirs)
dirs = rd('PRD_2020_noh', dirs)
dirs = rd('PRD_2020_ac', dirs)
R = [s for s in dirs]
# read the runs stored in the pickle variables
runs = r.load_runs(R, dir0, dirs, quiet=False)
os.chdir(dir0)
return runs
def plot_EGW_EM_vs_k(runs, rr='ini2', save=True, show=True):
"""
Function that generates the plot of the magnetic spectrum
EM (k) = Omega_M(k)/k at the initial time of turbulence generation
and the GW spectrum EGW (k) = Omega_GW(k)/k, averaged over oscillations
in time.
It corresponds to figure 1 of <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>, "Numerical simulations of gravitational
waves from early-universe turbulence," Phys. Rev. D 102, 083512 (2020),
https://arxiv.org/abs/1903.08585.
Arguments:
runs -- dictionary that includes the run variables
rr -- string that selects which run to plot (default 'ini2')
save -- option to save the resulting figure as
plots/EGW_EM_vs_k_'name_run'.pdf' (default True)
show -- option to show the resulting figure (default True)
"""
plt.figure(figsize=(10,6))
plt.xscale('log')
plt.yscale('log')
plt.xlim(120, 6e4)
plt.ylim(1e-19, 1e-4)
plt.xlabel('$k$')
plt.ylabel(r'$\Omega_{\rm GW}(k)/k$ and $\Omega_{\rm M}(k)/k$',
fontsize=20)
run = runs.get(rr)
# plot the averaged over times GW spectrum
GWs_stat_sp = run.spectra.get('EGW_stat_sp')
k = run.spectra.get('k')[1:]
plt.plot(k, GWs_stat_sp, color='black')
# plot magnetic spectrum at the initial time
mag = run.spectra.get('mag')[0, 1:]
plt.plot(k, mag, color='black')
# plot k^4 line
k0 = np.logspace(np.log10(150), np.log10(500), 5)
plt.plot(k0, 1e-9*(k0/100)**4, color='black', ls='-.', lw=.7)
plt.text(300, 8e-9, r'$\sim\!k^4$', fontsize=20)
# plot k^(-5/3) line
k0 = np.logspace(np.log10(2000), np.log10(8000), 5)
plt.plot(k0, 1e-5*(k0/1000)**(-5/3), color='black', ls='-.', lw=.7)
plt.text(5e3, 1.6e-6, r'$\sim\!k^{-5/3}$', fontsize=20)
# plot k^(-11/3) line
k0 = np.logspace(np.log10(3000), np.log10(30000), 5)
plt.plot(k0, 1e-12*(k0/1000)**(-11/3), color='black', ls='-.', lw=.7)
plt.text(1e4, 5e-16, r'$\sim\!k^{-11/3}$', fontsize=20)
plt.text(1500, 1e-16, r'$\Omega_{\rm GW} (k)/k$', fontsize=20)
plt.text(800, 5e-8, r'$\Omega_{\rm M} (k)/k$', fontsize=20)
ax = plt.gca()
ax.set_xticks([100, 1000, 10000])
ytics = 10**np.array(np.linspace(-19, -5, 7))
ytics2 = 10**np.array(np.linspace(-19, -5, 15))
yticss = ['$10^{-19}$', '', '$10^{-17}$', '', '$10^{-15}$', '',
'$10^{-13}$', '', '$10^{-11}$', '', '$10^{-9}$', '',
'$10^{-7}$', '', '$10^{-5}$']
ax.set_yticks(ytics2)
ax.set_yticklabels(yticss)
plot_sets.axes_lines()
ax.tick_params(pad=10)
if save: plt.savefig('plots/EGW_EM_vs_k_' + run.name_run + '.pdf',
bbox_inches='tight')
if not show: plt.close()
def plot_EGW_vs_kt(runs, rr='ini2', save=True, show=True):
"""
Function that generates the plot of the compensated GW spectrum as a
function of k(t - tini) for the smallest wave numbers of the run.
It corresponds to figure 3 of <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>, "Numerical simulations of gravitational
waves from early-universe turbulence," Phys. Rev. D 102, 083512 (2020),
https://arxiv.org/abs/1903.08585.
Arguments:
runs -- dictionary that includes the run variables
rr -- string that selects which run to plot (default 'ini2')
save -- option to save the resulting figure as
plots/EGW_vs_kt.pdf (default True)
show -- option to show the resulting figure (default True)
"""
run = runs.get(rr)
k = run.spectra.get('k')[1:]
EGW = np.array(run.spectra.get('EGW')[:,1:], dtype='float')
t = run.spectra.get('t_EGW')
plt.figure(figsize=(10,6))
plt.xscale('log')
plt.yscale('log')
plt.xlim(1e-2, 40)
plt.ylim(8e-8, 1e-5)
plt.xlabel('$k (t - 1)$')
plt.ylabel(r'$\left[k_* \Omega_{\rm GW} (k, t)/k\right]^{1/2}$')
plot_sets.axes_lines()
# plot for initial wave numbers
plt.plot(k[0]*(t - 1), np.sqrt(EGW[:, 0]*run.kf),
color='black', lw=.8, #label='$k = %.0f$'%k[0])
label='$k = 100$')
plt.plot(k[1]*(t - 1), np.sqrt(EGW[:, 1]*run.kf),
color='red', ls='-.', lw=.8, #label='$k = %.0f$'%k[1])
label='$k = 200$')
plt.plot(k[2]*(t - 1), np.sqrt(EGW[:, 2]*run.kf),
color='blue', ls='dashed', lw=.8, #label='$k = %.0f$'%k[2])
label='$k = 300$')
plt.plot(k[3]*(t - 1), np.sqrt(EGW[:, 3]*run.kf),
color='green', ls='dotted', lw=.8, #label='$k = %.0f$'%k[3])
label='$k = 400$')
plt.legend(fontsize=22, loc='lower right', frameon=False)
if save: plt.savefig('plots/EGW_vs_kt.pdf', bbox_inches='tight')
if not show: plt.close()
def plot_OmMK_OmGW_vs_t(runs, save=True, show=True):
"""
Function that generates the plots of the total magnetic/kinetic energy
density as a function of time ('OmM_vs_t.pdf') and the GW energy density
as a function of time ('OmGW_vs_t.pdf').
It corresponds to figure 5 of <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>, "Numerical simulations of gravitational
waves from early-universe turbulence," Phys. Rev. D 102, 083512 (2020),
https://arxiv.org/abs/1903.08585.
Arguments:
runs -- dictionary that includes the run variables
save -- option to save the resulting figure as
plots/OmGW_vs_t.pdf (default True)
show -- option to show the resulting figure (default True)
"""
# chose the runs to be shown
rrs = ['ini1', 'ini2', 'ini3', 'hel1', 'hel2', 'ac1']
# chose the colors of each run
col = ['black', 'red', 'blue', 'red', 'blue', 'black']
# chose the line style for the plots
ls = ['solid']*6
ls[3] = 'dashed'
ls[4] = 'dashed'
ls[5] = 'dashed'
plt.figure(1, figsize=(10,6))
plt.figure(2, figsize=(10,6))
j = 0
for i in rrs:
run = runs.get(i)
k = run.spectra.get('k')[1:]
GWs_stat_sp = run.spectra.get('GWs_stat_sp')
t = run.ts.get('t')[1:]
indst = np.argsort(t)
t = t[indst]
EEGW = run.ts.get('EEGW')[1:][indst]
if run.turb == 'm': EEM = run.ts.get('EEM')[1:][indst]
if run.turb == 'k': EEM = run.ts.get('EEK')[1:][indst]
plt.figure(1)
plt.plot(t, EEGW, color=col[j], lw=.8, ls=ls[j])
# text with run name
if i=='ini1': plt.text(1.02, 5e-8, i, color=col[j])
if i=='ini2': plt.text(1.07, 5e-11, i, color=col[j])
if i=='ini3': plt.text(1.2, 6e-9, i, color=col[j])
if i=='hel1': plt.text(1.15, 2e-9, i, color=col[j])
if i=='hel2': plt.text(1.12, 7e-10, i, color=col[j])
if i=='ac1': plt.text(1.2, 1e-7, i, color=col[j])
plt.figure(2)
plt.plot(t, EEM, color=col[j], lw=.8, ls=ls[j])
# text with run name
if i=='ini1': plt.text(1.01, 8e-2, i, color=col[j])
if i=='ini2': plt.text(1.12, 3e-3, i, color=col[j])
if i=='ini3': plt.text(1.01, 9e-3, i, color=col[j])
if i=='hel1': plt.text(1.15, 1.3e-2, i, color=col[j])
if i=='hel2': plt.text(1.02, 1e-3, i, color=col[j])
if i=='ac1': plt.text(1.17, 1.5e-3, i, color=col[j])
j += 1
plt.figure(1)
plt.yscale('log')
plt.xlabel('$t$')
plt.xlim(1, 1.25)
plt.ylim(2e-11, 2e-7)
plt.ylabel(r'$\Omega_{\rm GW}$')
plot_sets.axes_lines()
if save: plt.savefig('plots/OmGW_vs_t.pdf', bbox_inches='tight')
if not show: plt.close()
plt.figure(2)
plt.yscale('log')
plt.xlim(1, 1.25)
plt.ylim(5e-4, 2e-1)
plt.xlabel('$t$')
plt.ylabel(r'$\Omega_{\rm M, K}$')
plot_sets.axes_lines()
if save: plt.savefig('plots/OmM_vs_t.pdf', bbox_inches='tight')
if not show: plt.close()
def plot_OmGW_hc_vs_f_ini(runs, T=1e5*u.MeV, g=100, SNR=10, Td=4,
save=True, show=True):
"""
Function that generates the plot of the GW energy density spectrum
of initial runs (ini1, ini2, and ini3), compared to the LISA sensitivity
and power law sensitivity (PLS).
It corresponds to figure 4 of <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>, "Numerical simulations of gravitational
waves from early-universe turbulence," Phys. Rev. D 102, 083512 (2020),
https://arxiv.org/abs/1903.08585.
Arguments:
runs -- dictionary that includes the run variables
T -- temperature scale (in natural units) at the time of turbulence
generation (default 100 GeV, i.e., electroweak scale)
g -- number of relativistic degrees of freedom at the time of
turbulence generation (default 100, i.e., electroweak scale)
SNR -- signal-to-noise ratio (SNR) of the resulting PLS (default 10)
Td -- duration of the mission (in years) of the resulting PLS
(default 4)
save -- option to save the resulting figure as
plots/OmGW_vs_f_ini.pdf (default True)
show -- option to show the resulting figure (default True)
"""
# read LISA and Taiji sensitivities
CWD = os.getcwd()
os.chdir('..')
#f_LISA, f_LISA_Taiji, LISA_sensitivity, LISA_OmPLS, LISA_XiPLS, \
# Taiji_OmPLS, Taiji_XiPLS, LISA_Taiji_XiPLS = inte.read_sens()
fs, LISA_Om, LISA_OmPLS = inte.read_sens(SNR=SNR, T=Td)
fs = fs*u.Hz
os.chdir(CWD)
# chose the runs to be shown
rrs = ['ini1', 'ini2', 'ini3']
# chose the colors of each run
col = ['black', 'red', 'blue']
plt.figure(1, figsize=(12,5))
plt.figure(2, figsize=(12,5))
j = 0
for i in rrs:
run = runs.get(i)
k = run.spectra.get('k')[1:]
EGW_stat = run.spectra.get('EGW_stat_sp')
f, OmGW_stat = cosmoGW.shift_OmGW_today(k, EGW_stat*k, T, g)
OmGW_stat = np.array(OmGW_stat, dtype='float')
hc_stat = cosmoGW.hc_OmGW(f, OmGW_stat)
plt.figure(1)
plt.plot(f, OmGW_stat, color=col[j], lw=.8)
if i == 'ini1': plt.text(5e-2, 1.5e-15, i, color=col[j])
if i == 'ini2': plt.text(3e-2, 2e-17, i, color=col[j])
if i == 'ini3': plt.text(3e-3, 4e-17, i, color=col[j])
plt.figure(2)
plt.plot(f, hc_stat, color=col[j], lw=.8)
if i == 'ini1': plt.text(5e-2, 1.5e-24, i, color=col[j])
if i == 'ini2': plt.text(1e-2, 3e-24, i, color=col[j])
if i == 'ini3': plt.text(4e-3, 1e-24, i, color=col[j])
j += 1
plt.figure(1)
plt.xscale('log')
plt.yscale('log')
plt.xlim(1e-4, 1e-1)
plt.ylim(1e-19, 1e-9)
plt.xlabel('$f$ [Hz]')
plt.ylabel(r'$h_0^2 \Omega_{\rm GW} (f)$')
#plt.plot(f_LISA, LISA_OmPLS, color='lime', ls='dashdot')
#plt.plot(f_LISA, LISA_sensitivity, color='lime')
plt.plot(fs, LISA_OmPLS, color='lime', ls='dashdot')
plt.plot(fs, LISA_Om, color='lime')
# plot f^(-8/3) line
fs0 = np.logspace(-2.1, -1.5, 5)
plt.plot(fs0, 3e-15*(fs0/2e-2)**(-8/3), color='black',
ls='dashdot', lw=.7)
plt.text(1e-2, 5e-16, '$\sim\!f^{-8/3}$')
# plot f line
fs0 = np.logspace(-3.45, -2.8, 5)
plt.plot(fs0, 2e-13*(fs0/1e-3)**(1), color='black',
ls='dashdot', lw=.7)
plt.text(4e-4, 3e-13, '$\sim\!f$')
ax = plt.gca()
ytics2 = 10**np.array(np.linspace(-19, -9, 11))
yticss = ['', '$10^{-18}$', '', '$10^{-16}$', '', '$10^{-14}$', '',
'$10^{-12}$', '', '$10^{-10}$', '']
ax.set_yticks(ytics2)
ax.set_yticklabels(yticss)
plot_sets.axes_lines()
if save: plt.savefig('plots/OmGW_vs_f_ini.pdf', bbox_inches='tight')
if not show: plt.close()
plt.figure(2)
plt.xscale('log')
plt.yscale('log')
plt.xlim(1e-4, 1e-1)
plt.ylim(1e-25, 1e-20)
plt.xlabel('$f$ [Hz]')
plt.ylabel(r'$h_{\rm c}(f)$')
LISA_hc_PLS = cosmoGW.hc_OmGW(fs, LISA_OmPLS)
LISA_hc = cosmoGW.hc_OmGW(fs, LISA_Om)
plt.plot(fs, LISA_hc_PLS, color='lime', ls='dashdot')
plt.plot(fs, LISA_hc, color='lime')
# plot f^(-1/2) line
fs0 = np.logspace(-3.4, -2.6, 5)
plt.plot(fs0, 8e-22*(fs0/1e-3)**(-1/2), color='black',
ls='dashdot', lw=.7)
plt.text(8e-4, 1.5e-21, '$\sim\!f^{-1/2}$')
# plot f^(-7/3) line
fs0 = np.logspace(-2, -1.4, 5)
plt.plot(fs0, 1e-23*(fs0/2e-2)**(-7/3), color='black',
ls='dashdot', lw=.7)
plt.text(2e-2, 2e-23, '$\sim\!f^{-7/3}$')
ax = plt.gca()
ytics2 = 10**np.array(np.linspace(-25, -20, 6))
ax.set_yticks(ytics2)
plot_sets.axes_lines()
if save: plt.savefig('plots/hc_vs_f_ini.pdf', bbox_inches='tight')
if not show: plt.close()
def plot_OmGW_hc_vs_f_driven(runs, T=1e5*u.MeV, g=100, SNR=10, Td=4,
save=True, show=True):
"""
Function that generates the plot of the GW energy density spectrum
of some of the initially driven runs (ac1, hel1, hel2, hel3, and noh1),
compared to the LISA sensitivity and power law sensitivity (PLS).
It corresponds to figure 6 of A. <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>, "Numerical simulations of gravitational
waves from early-universe turbulence," Phys. Rev. D 102, 083512 (2020),
https://arxiv.org/abs/1903.08585.
Arguments:
runs -- dictionary that includes the run variables
T -- temperature scale (in natural units) at the time of turbulence
generation (default 100 GeV, i.e., electroweak scale)
g -- number of relativistic degrees of freedom at the time of
turbulence generation (default 100, i.e., electroweak scale)
SNR -- signal-to-noise ratio (SNR) of the resulting PLS (default 10)
Td -- duration of the mission (in years) of the resulting PLS
(default 4)
save -- option to save the resulting figure as
plots/OmGW_vs_f_driven.pdf (default True)
show -- option to show the resulting figure (default True)
"""
# read LISA and Taiji sensitivities
CWD = os.getcwd()
os.chdir('..')
#f_LISA, f_LISA_Taiji, LISA_sensitivity, LISA_OmPLS, LISA_XiPLS, \
# Taiji_OmPLS, Taiji_XiPLS, LISA_Taiji_XiPLS = inte.read_sens()
fs, LISA_Om, LISA_OmPLS = inte.read_sens(SNR=SNR, T=Td)
fs = fs*u.Hz
os.chdir(CWD)
# chose the runs to be shown
rrs = ['ac1', 'hel1', 'hel2', 'hel3', 'noh1']
# chose the colors of each run
col = ['black', 'red', 'blue', 'blue', 'blue']
# chose the line style for the plots
ls = ['solid', 'solid', 'solid', 'dotted', 'dashed']
plt.figure(1, figsize=(12,5))
plt.figure(2, figsize=(12,5))
j = 0
for i in rrs:
run = runs.get(i)
k = run.spectra.get('k')[1:]
EGW_stat = run.spectra.get('EGW_stat_sp')
f, OmGW_stat = cosmoGW.shift_OmGW_today(k, EGW_stat*k, T, g)
OmGW_stat = np.array(OmGW_stat, dtype='float')
hc_stat = cosmoGW.hc_OmGW(f, OmGW_stat)
plt.figure(1)
# omit largest frequencies where there is not enough numerical accuracy
if i == 'hel3':
OmGW_stat = OmGW_stat[np.where(f.value<3e-2)]
hc_stat = hc_stat[np.where(f.value<3e-2)]
f = f[np.where(f.value<3e-2)]
if i=='hel3' or i=='hel2' or i=='noh1' or i=='hel1':
plt.plot(f, OmGW_stat, color=col[j],
ls=ls[j], lw=.8, label=i)
else:
plt.plot(f, OmGW_stat, color=col[j], ls=ls[j], lw=.8)
plt.text(5e-2, 2e-19, i, fontsize=20, color='black')
plt.figure(2)
if i=='hel3' or i=='hel2' or i=='noh1'or i=='hel1':
plt.plot(f, hc_stat, color=col[j], ls=ls[j], lw=.8, label=i)
else:
plt.plot(f, hc_stat, color=col[j], ls=ls[j], lw=.8)
plt.text(3e-3, 5e-22, i, color=col[j])
j += 1
plt.figure(1)
plt.xscale('log')
plt.yscale('log')
plt.xlim(1e-4, 1e-1)
plt.ylim(1e-26, 1e-9)
plt.xlabel('$f$ [Hz]')
plt.ylabel(r'$h_0^2 \Omega_{\rm GW} (f)$')
plt.legend(loc='lower left', frameon=False, fontsize=20)
plt.plot(fs, LISA_OmPLS, color='lime', ls='dashdot')
plt.plot(fs, LISA_Om, color='lime')
# plot f^(-5) line
fk0 = np.logspace(-2.2, -1.6, 5)
plt.plot(fk0, 1e-14*(fk0/1e-2)**(-5), color='black',
ls='dashdot', lw=.7)
plt.text(1.3e-2, 1e-14, '$\sim\!f^{-5}$')
# plot f line
fk0 = np.logspace(-3.3, -2.8, 5)
plt.plot(fk0, 2e-16*(fk0/1e-3)**(1), color='black',
ls='dashdot', lw=.7)
plt.text(6e-4, 1e-17, '$\sim\!f$')
ax = plt.gca()
ytics2 = 10**np.array(np.linspace(-25, -9, 16))
yticss = ['$10^{-25}$', '', '$10^{-23}$', '', '$10^{-21}$', '',
'$10^{-19}$','', '$10^{-17}$', '','$10^{-15}$','',
'$10^{-13}$', '','$10^{-11}$','']
ax.set_yticks(ytics2)
ax.set_yticklabels(yticss)
plot_sets.axes_lines()
if save: plt.savefig('plots/OmGW_vs_f_driven.pdf', bbox_inches='tight')
if not show: plt.close()
plt.figure(2)
plt.xscale('log')
plt.yscale('log')
plt.xlim(1e-4, 1e-1)
plt.ylim(1e-28, 1e-20)
plt.xlabel('$f$ [Hz]')
plt.ylabel(r'$h_{\rm c}(f)$')
plt.legend(loc='lower left', frameon=False, fontsize=20)
LISA_hc_PLS = cosmoGW.hc_OmGW(fs, LISA_OmPLS)
LISA_hc = cosmoGW.hc_OmGW(fs, LISA_Om)
plt.plot(fs, LISA_hc_PLS, color='lime', ls='dashdot')
plt.plot(fs, LISA_hc, color='lime')
# plot f^(-7/2) line
fk0 = np.logspace(-2.2, -1.6, 5)
plt.plot(fk0, 1e-23*(fk0/1e-2)**(-7/2), color='black',
ls='dashdot', lw=.7)
plt.text(1.3e-2, 1e-23, '$\sim\!f^{-7/2}$')
# plot f^(-1/2) line
fk0 = | np.logspace(-3.3, -2.8, 5) | numpy.logspace |
import glob
import os
import operator
import sys
import numpy as np
import matplotlib.pyplot as plt
import yt
yt.funcs.mylog.setLevel(50)
class Profile:
"""read a plotfile using yt and store the 1d profile for T and enuc"""
def __init__(self, plotfile):
ds = yt.load(plotfile)
time = float(ds.current_time)
ad = ds.all_data()
# Sort the ray values by 'x' so there are no discontinuities
# in the line plot
srt = np.argsort(ad['x'])
x_coord = np.array(ad['x'][srt])
temp = np.array(ad['Temp'][srt])
enuc = np.array(ad['enuc'][srt])
self.time = time
self.x = x_coord
self.T = temp
self.enuc = enuc
def find_x_for_T(self, T_0=1.e9):
""" given a profile x(T), find the x_0 that corresponds to T_0 """
# our strategy here assumes that the hot ash is in the early
# part of the profile. We then find the index of the first
# point where T drops below T_0
idx = np.where(self.T < T_0)[0][0]
T1 = self.T[idx-1]
x1 = self.x[idx-1]
T2 = self.T[idx]
x2 = self.x[idx]
slope = (x2 - x1)/(T2 - T1)
return x1 + slope*(T_0 - T1)
def get_velocity(p1, p2):
"""look at the last 2 plotfiles and estimate the velocity by
finite-differencing"""
# we'll do this by looking at 3 different temperature
# thresholds and averaging
T_ref = [2.e9, 3.e9, 4.e9]
vs = []
for T0 in T_ref:
x1 = p1.find_x_for_T(T0)
x2 = p2.find_x_for_T(T0)
vs.append((x1 - x2)/(p1.time - p2.time))
vs = np.array(vs)
v = | np.mean(vs) | numpy.mean |
# -*- coding: utf-8 -*-
"""Minimum_External_Constraints.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1soDz7aUjticLOAa_JMQxQ3GNZ22AWNjI
# Definition of the network properties.
"""
#Import modules.
import numpy as np
import sympy as sp
import sys
from sympy import symbols, diff
from numpy.linalg import multi_dot
import math
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
n = 16 #number of measuremens.
v = 5 #number of network nodes.
d = 3 #imperfections : position and orientation = 2+1 =3
m = 2*v-d #number of unknown parameters.
r = n - m #degrees of freedom.
#Set numpy's print options.
np.set_printoptions(suppress=True,threshold=np.inf,linewidth=300,precision=15)
print("The design matrix A has dimensions of {}x{} elements".format(n,m))
print("The weight matrix P has dimensions of {}x{} elements".format(n,n))
#We assume that:
# 0 = Α
# 1 = B = constant and a_12 = a_BC = constant
# 2 = C
# 3 = D
# 4 = E
#Matrices of measurements and standard errors.
l = np.array([64.1902,50.8882,32.4675,60.7616,26.2679,43.1958,92.6467,29.5762,106.2276,116.6508,112.9705,64.1595,490.249,220.725,791.552,659.535]) #array which includes all measurements. (angles+lengths)
noa = 12 #number of angles
sigma_1 = np.array([25]*noa) #the standard error is in cc!
sigma_2 = np.array([0.012]*(l.shape[0]-noa)) #the standard error is in meters!
sigma = np.concatenate((sigma_1,sigma_2))
#Temporary coordinates of network nodes.
x = np.array([1586.537,2075.094,2222.679,1449.130,1688.320])
y = np.array([937.235,896.541,354.801,522.664,741.395])
#Temporary distance S12.
S12 = np.sqrt((x[2]-x[1])**2+(y[2]-y[1])**2)
#Matrix of unknown parameters Χ : xA,yΑ,xA,yΑ,xA,yΑ,SBC
X=np.array([1586.537, 937.235, 1449.13 , 522.664, 1688.320, 741.395, S12])
X
#Create the necessary variables.
b_jik, a_ik, a_ij = symbols("b_jik, a_ik, a_ij ")
y_i,y_j,y_k = symbols("y_i,y_j,y_k")
x_i,x_j,x_k = symbols("x_i,x_j,x_k")
S_ij,S_ik = symbols("S_ij,S_ik")
dx_i,dx_j,dx_k = symbols("dx_i,dx_j,dx_k")
dy_i,dy_j,dy_k = symbols("dy_i,dy_j,dy_k")
#Auxiliary indices.
jj = np.array([2,5,1,5,4,5,3,5,1,4,3,2,2,5,3,3])-1
ii = np.array([1,1,4,4,3,3,2,2,5,5,5,5,1,1,4,5])-1
kk = np.array([5,4,5,3,5,2,5,1,2,1,4,3])-1
#Linearized angle equations.
angle_eq = (((y_j-y_i)/S_ij**2 - (y_k-y_i)/S_ik**2)*dx_i + ((x_k-x_i)/S_ik**2 - (x_j-x_i)/S_ij**2)*dy_i
- dx_j*(y_j-y_i)/S_ij**2 + dy_j*(x_j-x_i)/S_ij**2 + dx_k*(y_k-y_i)/S_ik**2 - dy_k*(x_k-x_i)/S_ik**2
)
angle_eq
#Linearized distance equations.
dist_eq = -(x_j-x_i)/S_ij*dx_i-dy_i*(y_j-y_i)/S_ij+(x_j-x_i)/S_ij*dx_j+dy_j*(y_j-y_i)/S_ij
dist_eq
def finda(dx,dy,a):
'''This function calculates the true value of an azimuth angle'''
if dx>0:
if dy>0:return 200*a/math.pi
if dy==0:return 100
if dy<0:return 200+200*a/math.pi
if dx<0:
if dy>0:return 400+200*a/math.pi
if dy==0:return 300
if dy<0:return 200+200*a/math.pi
if dx==0:
if dy>0:return 0
if dy==0:return print("Division by 0!")
if dy<0:return 200
"""# Creation of design matrix Α."""
#Create auxiliary vector.
dx_0,dy_0,dx_1,dy_1,dx_2,dy_2,dx_3,dy_3,dx_4,dy_4,dS_12 = symbols('dx_0,dy_0,dx_1,dy_1,dx_2,dy_2,dx_3,dy_3,dx_4,dy_4,dS_12')
DX = [dx_0,dy_0,
dx_3,dy_3,
dx_4,dy_4,
dS_12]
DX
#Construction of the 16 observation equations.
eqs = np.empty(shape=(n,), dtype=object)
for i in range(eqs.shape[0]):
Sij = np.sqrt((x[jj[i]]-x[ii[i]])**2+(y[jj[i]]-y[ii[i]])**2)
if i<=noa-1:
Sik = np.sqrt((x[kk[i]]-x[ii[i]])**2+(y[kk[i]]-y[ii[i]])**2)
eqs[i]=angle_eq.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(x_k,x[kk[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]]),(S_ij,Sij),(S_ik,Sik),
(dx_i,symbols('dx_{}'.format(ii[i]))),
(dx_j,symbols('dx_{}'.format(jj[i]))),
(dx_k,symbols('dx_{}'.format(kk[i]))),
(dy_i,symbols('dy_{}'.format(ii[i]))),
(dy_j,symbols('dy_{}'.format(jj[i]))),
(dy_k,symbols('dy_{}'.format(kk[i])))
])*636620
else:
eqs[i] = dist_eq.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(S_ij,Sij),
(dx_i,symbols('dx_{}'.format(ii[i]))),
(dx_j,symbols('dx_{}'.format(jj[i]))),
(dy_i,symbols('dy_{}'.format(ii[i]))),
(dy_j,symbols('dy_{}'.format(jj[i]))),
])
#Calculate the true value of the azimuth angle a_BC = a_12.
a = math.atan((x[2]-x[1])/(y[2]-y[1]))
a_12 = finda(x[2]-x[1],y[2]-y[1],a)
#Replace the variables with their true values.
for i,eq in enumerate(eqs):
eqs[i]=eq.subs([(dx_1,x[1]),(dy_1,x[2])])
eqs[i]=eq.subs([(dx_2,dS_12*math.sin(a_12*math.pi/200)),(dy_2,dS_12*math.cos(a_12*math.pi/200))])
#Differentiation of each equation with each one of the unknown parameters and creation of design matrix Α.
A = np.zeros(shape=(n,m))
for i in range(0,n):
for j in range(0,m):
A[i,j] = diff(eqs[i],DX[j])
print('Array A is:\n')
print(A)
"""# Creation of matrix of the calculated values. """
#Creation of matrix of the calculated values.
delta_l = np.zeros(shape=(n,))
for i in range(n):
if i<=noa-1: #If its a angle equation.
div1 = (x_k-x_i)/(y_k-y_i)
div2 = (x_j-x_i)/(y_j-y_i)
d1=float(div1.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(x_k,x[kk[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]])]))
d2=float(div2.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]])]))
aik_ = np.arctan(d1)
aij_ = np.arctan(d2)
#Azimuth angle calculation.
deltaX = x_k-x_i
deltaY = y_k-y_i
deltaX = float(deltaX.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(x_k,x[kk[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]])]))
deltaY = float(deltaY.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(x_k,x[kk[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]])]))
aik=finda(deltaX,deltaY,aik_)
deltaX = x_j-x_i
deltaY = y_j-y_i
deltaX = float(deltaX.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(x_k,x[kk[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]])]))
deltaY = float(deltaY.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(x_k,x[kk[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]])]))
aij=finda(deltaX,deltaY,aij_)
delta_l[i]=(l[i]-aik+aij) #in cc
while delta_l[i]>399: delta_l[i]-=400
delta_l[i]=delta_l[i]*10000 #convertion from grad to cc.
else: #If its a distance equation.
Sij = np.sqrt((x[jj[i]]-x[ii[i]])**2+(y[jj[i]]-y[ii[i]])**2) #distance calculation.
delta_l[i]=l[i]-Sij #in meters
delta_l
"""
# Creation of the weight matrix P."""
#Define the a-priori standard error.
sigma_0 = 1
I = np.identity(n)
P = I*(sigma_0/sigma)**2
print('Array P is:\n')
print(P)
"""#System solution."""
#Calculate the new array with the adjusted coordinate values of network nodes A,D,E and distance SBC.
delta_x=np.dot(np.linalg.inv(multi_dot([A.T,P,A])),multi_dot([A.T,P,delta_l]))
X_hat = X+delta_x
X_hat
#Points Β=1 και C=2.
x_1 = x[1]
y_1 = y[1]
#Calculate the final coordinates of the node C=2.
#With:
# x_2 = X_hat[-1]*math.sin(a_12*math.pi/200) + x[1]
# y_2 = X_hat[-1]*math.cos(a_12*math.pi/200) + y[1]
#Or:
x_2 = delta_x[-1]*math.sin(a_12*math.pi/200) + x[2]
y_2 = delta_x[-1]*math.cos(a_12*math.pi/200) + y[2]
#Create a new array with the adjusted coordinate values of all network nodes.
X_hat_extended = np.array([X_hat[0], X_hat[1], x_1,y_1,x_2,y_2, X_hat[2], X_hat[3], X_hat[4], X_hat[5]])
X_hat_extended
"""#Calculation of the a-priori variance-covariance matrix.
"""
#Define the a-priori standard error.
sigma_0 = 1
#Calculate the a-priori variance-covariance matrix.
V_x_hat = (sigma_0**2)*np.linalg.inv(multi_dot([A.T,P,A]))
V_x_hat
"""#Computation of the a posteriori standard error."""
#Calculation of the a-posteriori standard error.
u = np.dot(A,delta_x)-delta_l
sigma_0_hat = np.sqrt(multi_dot([u.T,P,u])/(n-m))
sigma_0_hat
#Create the necessary auxiliary variables and vectors.
x_0,y_0,x_1,y_1,x_2,y_2,x_3,y_3,x_4,y_4,S_12 = symbols('x_0,y_0,x_1,y_1,x_2,y_2,x_3,y_3,x_4,y_4,S_12')
DX_1 = np.array([x_0,y_0,x_3,y_3,x_4,y_4,S_12*math.sin(a_12*math.pi/200),S_12*math.cos(a_12*math.pi/200),x_1,y_1])
DX_2 = np.array([x_0,y_0,x_3,y_3,x_4,y_4,S_12])
#Calculation of the necessary Jacobian matrix for the propagation of uncertainty between the two vectors DX_1, DX_2.
J = np.zeros(shape=(2*v,V_x_hat.shape[0]))
for i in range(0,2*v):
for j in range(V_x_hat.shape[0]):
J[i,j] = diff(DX_1[i],DX_2[j])
print('Array J is:\n')
print(J)
#Calculation of the a-priori variance-covariance matrix of the coordinates of all network nodes.
V_x_hat_extended = multi_dot([J,V_x_hat,J.T])
V_x_hat_extended_df = pd.DataFrame(V_x_hat_extended,index=["0","00","3","33","4","44","2","22","1","11"], columns=["0","00","3","33","4","44","2","22","1","11"])
V_x_hat_extended_df=V_x_hat_extended_df.sort_index()
V_x_hat_extended_df=V_x_hat_extended_df.reindex(sorted(V_x_hat_extended_df.columns), axis=1)
V_x_hat_extended = np.array(V_x_hat_extended_df)
V_x_hat_extended
"""#Graphical representation of the error ellipses."""
#Final coordinates of network nodes.
x_hat = np.zeros(shape=(v,))
y_hat = np.zeros(shape=(v,))
j=0
for i in range(0,v):
x_hat[i] = X_hat_extended[j]
y_hat[i] = X_hat_extended[j+1]
if j%2==0:j+=2
#Network edges.
lines_x = np.concatenate((x_hat,np.array([x_hat[0],x_hat[4],x_hat[1],x_hat[4],x_hat[2],x_hat[4],x_hat[3],x_hat[4],x_hat[0],x_hat[3]])))
lines_y = np.concatenate((y_hat,np.array([y_hat[0],y_hat[4],y_hat[1],y_hat[4],y_hat[2],y_hat[4],y_hat[3],y_hat[4],y_hat[0],y_hat[3]])))
X_hat_extended
def auxfunc(V_xy):
'''This function takes as argument the variance-covariance submatrix of the corresponding
network node or edge, and as output it returns the absolute or relative ellipse properties.'''
width, height, angle =0,0,0
if (V_xy[0,1]+V_xy[0,0]-V_xy[1,1])!=0:
#Define equations for the calculation of the semi-major axis, the semi-minor axis and the orientation of the error ellipse.
sigma_x_sq,sigma_y_sq,sigma_xy = symbols('sigma_x_sq,sigma_y_sq,sigma_xy')
sigma_max_squared = ((sigma_x_sq+sigma_y_sq)+((sigma_x_sq-sigma_y_sq)**2+4*sigma_xy**2)**0.5)/2
sigma_min_squared = ((sigma_x_sq+sigma_y_sq)-((sigma_x_sq-sigma_y_sq)**2+4*sigma_xy**2)**0.5)/2
tan_2theta = 2*sigma_xy/(sigma_x_sq-sigma_y_sq)
#Calculate the length of the semi-major and the semi-minor ellipse axes.
sigma_max_squared = sigma_max_squared.subs([(sigma_x_sq,V_xy[0,0]),(sigma_y_sq,V_xy[1,1]),(sigma_xy,V_xy[0,1])])
sigma_min_squared = sigma_min_squared.subs([(sigma_x_sq,V_xy[0,0]),(sigma_y_sq,V_xy[1,1]),(sigma_xy,V_xy[0,1])])
sigma_u=sigma_max_squared**0.5
sigma_v=sigma_min_squared**0.5
width = 2*sigma_u
height = 2*sigma_v
#Calculate the orientation of the error ellipse.
tan_2theta = tan_2theta.subs([(sigma_x_sq,V_xy[0,0]),(sigma_y_sq,V_xy[1,1]),(sigma_xy,V_xy[0,1])])
theta = math.atan(tan_2theta)*180/(2*math.pi)
#Extract the variances and the covariances from the input matrix.
sigma_x=V_xy[0,0]**0.5
sigma_y=V_xy[1,1]**0.5
sigma_xy=V_xy[0,1]
#Angle investigation.
if sigma_x>sigma_y:
if sigma_xy>0:angle=theta
if sigma_xy<0:angle=theta+180
if sigma_x<sigma_y:
if sigma_xy>0:angle=theta+90
if sigma_xy<0:angle=theta+90
if sigma_x==sigma_y:
if sigma_xy>0:angle=45
if sigma_xy<0:angle=135
return width, height, angle
def ellipse_args(netnodecode1, netnodecode2=9999, V_x_hat=V_x_hat_extended):
'''This function takes as arguments the specific network node for the calculation of the absolute error ellipse properties, or
the two network nodes of the corresponding network edge in wich we want to calculate the relative error ellipse arguments.
Aditionally this function takes as argument the variance-covariance matrix of the coordinates of all network nodes. As output,
it extracts the absolute or relative ellipse properties.'''
netnodecode=netnodecode1
if netnodecode < 5:
if netnodecode2 == 9999: #If we want to calculate the absolute error ellipse.
#Extract the variance-covariance submatrix of the given network node.
V_xy = V_x_hat[2*netnodecode:2*netnodecode+2,2*netnodecode:2*netnodecode+2]
width, height, angle = auxfunc(V_xy)
return width, height, angle
elif netnodecode2 < 5: #If we want to calculate the relative error ellipse.
Jrij = np.array([[-1,0,1,0],[0,-1,0,1]])
Vrig = np.ones(shape=(4,4))
#Extract the variance-covariance submatrix of the given network edge.
V_xy1 = V_x_hat[2*netnodecode1:2*netnodecode1+2,2*netnodecode1:2*netnodecode1+2]
V_xy2 = V_x_hat[2*netnodecode2:2*netnodecode2+2,2*netnodecode2:2*netnodecode2+2]
V1 = V_x_hat[2*netnodecode1:2*netnodecode1+2,2*netnodecode2:2*netnodecode2+2]
V2 = V_x_hat[2*netnodecode2:2*netnodecode2+2,2*netnodecode1:2*netnodecode1+2]
Vrig=np.asarray(np.bmat([[V_xy1, V1], [V2, V_xy2]]))
VDrij = multi_dot([Jrij,Vrig,Jrij.T])
width, height, angle = auxfunc(VDrij)
return width, height, angle
else: return print("There is no network node with the given code name!")
#Graphical representation of the error ellipses.
fig=plt.figure(figsize=(15,10))
ax = fig.add_subplot(111, aspect='equal')
#Define plot options.
scalefactor = 1000
ld=1.5
ellcolor = 'red'
ellzorder = 4
pointzorder = 5
#Plot network edges.
plt.plot(lines_x, lines_y, linewidth=0.6, markersize=8, alpha=1)
#Plot network nodes.
plt.scatter(x_hat, y_hat, marker="+", zorder=pointzorder, s=70, c='black')
#Plot nodenames.
nodenames=["A","B","C","D","E"]
for i, txt in enumerate(nodenames):
ax.annotate(txt, (x_hat[i], y_hat[i]), xytext=(x_hat[i]+5,y_hat[i]+5), zorder=pointzorder)
#Absolute error ellipses of all network nodes.
width, height, angle = ellipse_args(0)
ax.add_artist(Ellipse(xy=X_hat_extended[:2], width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
width, height, angle = ellipse_args(1)
ax.add_artist(Ellipse(xy=X_hat_extended[2:4], width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
width, height, angle = ellipse_args(2)
ax.add_artist(Ellipse(xy=X_hat_extended[4:6], width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
width, height, angle = ellipse_args(3)
ax.add_artist(Ellipse(xy=X_hat_extended[6:8], width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
width, height, angle = ellipse_args(4)
ax.add_artist(Ellipse(xy=X_hat_extended[8:], width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
#Relative error ellipses between each station pair.
#Edge AC.
width, height, angle = ellipse_args(0,2)
xy=(X_hat_extended[:2]+X_hat_extended[4:6])/2
xx = np.array([x_hat[0],x_hat[2]])
yy = np.array([y_hat[0],y_hat[2]])
plt.plot(xx, yy, "k--", linewidth=2, markersize=4)
plt.plot((x_hat[0]+x_hat[2])/2,(y_hat[0]+y_hat[2])/2, marker=".", color='red', mec="black", markersize=12, zorder=pointzorder)
ax.add_artist(Ellipse(xy=xy, width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
#Edge AB.
width, height, angle = ellipse_args(0,1)
xy=(X_hat_extended[:2]+X_hat_extended[2:4])/2
xx = np.array([x_hat[0],x_hat[1]])
yy = np.array([y_hat[0],y_hat[1]])
plt.plot(xx, yy, "k--", linewidth=2, markersize=4)
plt.plot((x_hat[1]+x_hat[0])/2,(y_hat[1]+y_hat[0])/2, marker=".", color='red', mec="black", markersize=12, zorder=pointzorder)
ax.add_artist(Ellipse(xy=xy, width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
#Edge DB.
width, height, angle = ellipse_args(3,1)
xy=(X_hat_extended[6:8]+X_hat_extended[2:4])/2
xx = | np.array([x_hat[3],x_hat[1]]) | numpy.array |
import math
import cv2
import numpy as np
from numpy import exp as exp
from numpy import cos as cos
from numpy import sin as sin
from numpy import tan as tan
from numpy import sqrt as sqrt
from numpy import arctan2 as arctan2
from matplotlib import pyplot as plt
import os
import datetime
import zernike_functions
from zernike_functions import zernike
class PreProcess:
def mkdir_out_dir(dir_fn, fn, N, p, z):
dt_now = datetime.datetime.now()
time_str = dt_now.strftime('%Y%m%d-%H%M%S')
holo_size = int(float(N) * float(p) * float(pow(10.0,3)))
info_name = 'z'+str(int(z*pow(10,3)))+'mm'+'_size'+str(holo_size)+'mm'
out_path = './output_imgs/'
if not os.path.exists(out_path):
os.mkdir(out_path)
out_path = out_path + dir_fn + '/'
if not os.path.exists(out_path):
os.mkdir(out_path)
out_path = out_path + 'prop_out_' + time_str + '_' + fn + '_' + info_name + '/'
if not os.path.exists(out_path):
os.mkdir(out_path)
return out_path
class ImageProcess:
def add_zero_padding(img):
M = img.shape[0]
pad_img = np.pad(img, ((M//2,M//2), (M//2,M//2)), 'constant')
return pad_img
def remove_zero_padding(img):
N = img.shape[0]
M = N // 2
start_num = M//2
end_num = N - M//2
return img[start_num:end_num, start_num:end_num]
def show_imgs(imgs):
for i in range(len(imgs)):
img = imgs[i]
plt.figure(figsize=(6,6))
plt.imshow(img)
plt.gray()
plt.show()
def save_imgs(out_dir, imgs):
for i in range(len(imgs)):
img = imgs[i]
cv2.imwrite(out_dir + str(i) + '.png', img)
def normalize(img):
img = img / img.max() * 255
return img
def normalize_amp_one(img):
img_max_val = CGH.amplitude(img).max()
norm_img = img / img_max_val
return norm_img
class CGH:
def response(N, l_ambda, z, p):
phase = np.zeros((N,N), dtype=np.float)
h = np.zeros((N,N), dtype=np.complex)
if N%2==0:
array_x = np.array([i % N - (N//2-0.5) for i in range(N * N)]).reshape(N, N)
else:
array_x = np.array([i % N - (N//2) for i in range(N * N)]).reshape(N, N)
array_y = array_x.T
# not use np.sqrt
# phase = np.sqrt(np.power(array_x*p, 2) + np.power(array_y*p, 2)) * np.pi / (l_ambda * z)
phase = (np.power(array_x*p, 2) + np.power(array_y*p, 2)) * np.pi / (l_ambda * z)
h = np.exp(1j*phase)
return h
def fresnel_fft(u1, N, l_ambda, z, p):
u1_shift = np.fft.fftshift(u1)
# f_u1 = np.fft.fft2(u1_shift, (N,N))
f_u1 = np.fft.fftshift(np.fft.fft2(u1_shift, (N,N)))
# h: inpulse response
h = np.zeros((N, N), dtype=np.complex)
h = CGH.response(N, l_ambda, z, p)
h_shift = np.fft.fftshift(h)
# f_h = np.fft.fft2(h_shift, (N,N))
f_h = np.fft.fftshift(np.fft.fft2(h_shift, (N,N)))
mul_fft = f_u1 * f_h
# ifft_mul = np.fft.ifft2(mul_fft)
# fresnel_img = np.fft.fftshift(ifft_mul)
fresnel_img = np.fft.ifftshift( np.fft.ifft2( np.fft.ifftshift(mul_fft) ) )
return fresnel_img
def propagation(u1, N, l_ambda, z, p):
fresnel_img = CGH.fresnel_fft(u1, N, l_ambda, z, p)
prop_img = np.exp(1j * (2 * np.pi) * z / l_ambda ) / (1j * l_ambda * z) * fresnel_img
return prop_img
def fraunhofer_diffraction(org_img):
f = np.fft.fft2(org_img)
fshift = np.fft.fftshift(f)
return fshift
def shift_scale_propagation(u1, N, l_ambda, z, p, shift_x=0.0, shift_y=0.0, scale=1.0):
k = 2*np.pi/l_ambda
if N%2==0:
array_x = np.array([i % N - (N//2-0.5) for i in range(N * N)]).reshape(N, N)
else:
array_x = np.array([i % N - (N//2) for i in range(N * N)]).reshape(N, N)
array_x = array_x*p
array_y = array_x.T
Cz = np.zeros((N, N), dtype=np.complex)
Cz = np.exp(1j*k*z) / (1j*l_ambda*z) \
* np.exp( 1j*np.pi / (l_ambda*z) * ((1-scale)*(np.power(array_x,2)) + 2*shift_x*array_x + shift_x**2) ) \
* np.exp( 1j*np.pi / (l_ambda*z) * ((1-scale)*(np.power(array_y,2)) + 2*shift_y*array_y + shift_y**2) )
exp_phi_u = np.zeros((N, N), dtype=np.complex)
exp_phi_u = np.exp( 1j*np.pi * ((scale**2-scale)*(np.power(array_x,2)) - 2*scale*shift_x*array_x) / (l_ambda*z) ) \
* np.exp( 1j*np.pi * ((scale**2-scale)*(np.power(array_y,2)) - 2*scale*shift_y*array_y) / (l_ambda*z) )
exp_phi_h = np.zeros((N, N), dtype=np.complex)
exp_phi_h = np.exp( 1j*np.pi * (scale*(np.power(array_x,2)) + scale*(np.power(array_y,2))) / (l_ambda*z) )
### 2020.05.17 change order of {fft, fftshift}
u1 = u1 * exp_phi_u
u1_shift = np.fft.fftshift(u1)
f_u1 = np.fft.fft2(u1_shift, (N,N))
h_shift = np.fft.fftshift(exp_phi_h)
f_h = np.fft.fft2(h_shift, (N,N))
mul_fft = f_u1 * f_h
ifft_mul = np.fft.ifft2(mul_fft)
ifft_mul_shift = np.fft.fftshift(ifft_mul)
u2 = Cz * ifft_mul_shift
return u2
# https://github.com/thu12zwh/band-extended-angular-spectrum-method
def angular_spectrum(u1, N, l_ambda, z, p):
u1_shift = np.fft.fftshift(u1)
f_u1 = np.fft.fftshift(np.fft.fft2(u1_shift, (N,N)))
# f_u1 = np.fft.fft2(u1_shift, (N,N))
# Angular Spectrum
k = 2.0*np.pi/l_ambda
if N%2==0:
array_fx = np.array([i % N - (N//2-0.5) for i in range(N * N)]).reshape(N, N)
else:
array_fx = np.array([i % N - (N//2) for i in range(N * N)]).reshape(N, N)
array_fx = array_fx / array_fx.max() / 2.0 / p
array_fy = array_fx.T
fx2_fy2 = np.power(array_fx, 2) + np.power(array_fy, 2)
h = np.exp( 1j*k*z*sqrt(1-fx2_fy2*(l_ambda**2)) )
mul_fft = f_u1 * h
u2 = np.fft.ifftshift( np.fft.ifft2( np.fft.ifftshift(mul_fft) ) )
# u2 = np.fft.ifftshift( np.fft.ifft2( mul_fft ) )
return u2
def band_limited_angular_spectrum(u1, N, l_ambda, z, p):
u1_shift = np.fft.fftshift(u1)
f_u1 = np.fft.fftshift(np.fft.fft2(u1_shift, (N,N)))
# f_u1 = np.fft.fft2(u1_shift, (N,N))
# Angular Spectrum
k = 2.0*np.pi/l_ambda
if N%2==0:
array_fx = np.array([i % N - (N//2-0.5) for i in range(N * N)]).reshape(N, N)
else:
array_fx = np.array([i % N - (N//2) for i in range(N * N)]).reshape(N, N)
array_fx = array_fx / array_fx.max() / 2.0 / p
array_fy = array_fx.T
fx2_fy2 = np.power(array_fx, 2) + np.power(array_fy, 2)
h = np.exp( 1j*k*z*sqrt(1-fx2_fy2*(l_ambda**2)) )
fc = N*p/l_ambda/z/2
h = h * (~(np.abs(np.sqrt(fx2_fy2)) > fc)).astype(np.int)
mul_fft = f_u1 * h
u2 = np.fft.ifftshift( np.fft.ifft2( np.fft.ifftshift(mul_fft) ) )
# u2 = np.fft.ifftshift( np.fft.ifft2( mul_fft ) )
return u2
def amplitude(img):
amp_img = sqrt( img.real * img.real + img.imag * img.imag )
return amp_img
def phase(img):
height = img.shape[0]
width = img.shape[1]
phase_cgh = np.zeros((height,width), dtype=np.float)
re = img.real
im = img.imag
phase_cgh = arctan2(im, re)
return phase_cgh
def anti_phase(phase_img):
anti_phase_img = phase_img * -1.0
return anti_phase_img
def intensity(img):
intensity = img.real * img.real + img.imag * img.imag
return intensity
def exp_cgh(N, p, z, k, x_j, y_j):
r_map = np.zeros((N,N))
exp_cgh = np.empty((N,N), dtype='complex128')
if N%2==0:
array_x = np.array([i % N - (N//2-0.5) for i in range(N * N)]).reshape(N, N)
x_j = x_j % N - (N//2-0.5)
y_j = y_j % N - (N//2-0.5)
else:
array_x = np.array([i % N - (N//2) for i in range(N * N)]).reshape(N, N)
x_j = x_j % N - (N//2-0.5)
y_j = y_j % N - (N//2-0.5)
array_y = array_x.T
r_map = sqrt( np.power((array_x-x_j)*p, 2) + np.power((array_y-y_j)*p, 2) + z**2 )
exp_cgh = 1/r_map * exp(1j*k*r_map)
return exp_cgh
def normalize_exp_cgh(exp_cgh):
max_amp_abs = np.abs(CGH.amplitude(exp_cgh)).max()
norm_exp_cgh = exp_cgh / max_amp_abs
return norm_exp_cgh
def exp_cgh_zone_limit(N, p, z, k, x_j, y_j, limit_len, exp_cgh_img):
for y in range(N):
for x in range(N):
if sqrt( pow((x-x_j)*p,2) + pow((y-y_j)*p,2) ) > limit_len:
exp_cgh_img[y][x] = 0
return exp_cgh_img
def amp_cgh(N, p, z, k, x_j, y_j):
r_map = np.zeros((N,N))
amp_cgh = np.zeros((N,N))
if N%2==0:
array_x = np.array([i % N - (N//2-0.5) for i in range(N * N)]).reshape(N, N)
x_j = x_j % N - (N//2-0.5)
y_j = y_j % N - (N//2-0.5)
else:
array_x = np.array([i % N - (N//2) for i in range(N * N)]).reshape(N, N)
x_j = x_j % N - (N//2-0.5)
y_j = y_j % N - (N//2-0.5)
array_y = array_x.T
r_map = sqrt( np.power((array_x-x_j)*p, 2) + np.power((array_y-y_j)*p, 2) + z**2 )
amp_cgh = 1/r_map * cos(k*r_map)
return amp_cgh
def amp_cgh_zone_limit(N, p, z, k, x_j, y_j, limit_len, amp_cgh_img):
for y in range(N):
for x in range(N):
if sqrt( pow((x-x_j)*p,2) + pow((y-y_j)*p,2) ) > limit_len:
amp_cgh_img[y][x] = 255
return amp_cgh_img
def phase_cgh(N, p, z, k, x_j, y_j):
r_map = np.zeros((N,N))
phase_cgh = np.zeros((N,N))
if N%2==0:
array_x = np.array([i % N - (N//2-0.5) for i in range(N * N)]).reshape(N, N)
x_j = x_j % N - (N//2-0.5)
y_j = y_j % N - (N//2-0.5)
else:
array_x = np.array([i % N - (N//2) for i in range(N * N)]).reshape(N, N)
x_j = x_j % N - (N//2-0.5)
y_j = y_j % N - (N//2-0.5)
array_y = array_x.T
r_map = sqrt( np.power((array_x-x_j)*p, 2) + np.power((array_y-y_j)*p, 2) + z**2 )
re = 1/r_map * cos(k*r_map)
im = 1/r_map * sin(k*r_map)
phase_cgh = arctan2(im, re)
phase_cgh = ( phase_cgh / np.pi / 2.0 + 0.5 ) * 255
return phase_cgh
def phase_cgh_pi(N, p, z, k, x_j, y_j):
r_map = np.zeros((N,N))
phase_cgh = np.zeros((N,N))
if N%2==0:
array_x = np.array([i % N - (N//2-0.5) for i in range(N * N)]).reshape(N, N)
x_j = x_j % N - (N//2-0.5)
y_j = y_j % N - (N//2-0.5)
else:
array_x = np.array([i % N - (N//2) for i in range(N * N)]).reshape(N, N)
x_j = x_j % N - (N//2-0.5)
y_j = y_j % N - (N//2-0.5)
array_y = array_x.T
r_map = sqrt( np.power((array_x-x_j)*p, 2) + np.power((array_y-y_j)*p, 2) + z**2 )
re = 1/r_map * cos(k*r_map)
im = 1/r_map * sin(k*r_map)
phase_cgh = arctan2(im, re)
return phase_cgh
def phase_norm(phase_pi):
height = phase_pi.shape[0]
width = phase_pi.shape[1]
phase_norm = np.zeros((height,width))
phase_norm = (phase_pi + np.pi) / (2.0*np.pi) * 255
return phase_norm
def phase_from_img(phase_img):
height = phase_img.shape[0]
width = phase_img.shape[1]
phase_pi = np.zeros((height,width))
phase_pi = (phase_img / 255.0 * 2.0 - 1.0) * np.pi
return phase_pi
def amp_abs(amp_cgh_img):
new_amp_cgh = amp_cgh_img + abs(amp_cgh_img.min())
new_amp_cgh = new_amp_cgh / new_amp_cgh.max() * 255
return new_amp_cgh
def pupil_func(N, p, pupil_r_m):
p_xy = np.zeros((N, N))
screen_size = N * p
half_size = screen_size / 2.0
x = np.linspace(-1 * half_size, half_size, N)
y = np.linspace(-1 * half_size, half_size, N)
[X,Y] = np.meshgrid(x,y)
r_map = sqrt(X**2+Y**2)
p_xy = np.where(r_map>pupil_r_m, 0.0, 1.0)
return p_xy
def big_pupil_func(N):
p_xy = np.full((N,N), 1.0)
return p_xy
def norm_wave_aberration(N, nm):
W_xy = np.zeros((N, N))
x = np.linspace(-1, 1, N)
y = np.linspace(-1, 1, N)
[X,Y] = np.meshgrid(x,y)
r_map = sqrt(X**2+Y**2)
theta_map = np.arctan2(Y,X)
W_xy = zernike.z_func(nm[0], nm[1], r_map, theta_map, N)
r_map = np.where(r_map>1.0, 0.0, 1.0)
W_xy = W_xy * r_map
return W_xy
def resize_and_add_pad(W_xy, N, p, pupil_r_m):
pupil_pix = int(pupil_r_m / p * 2)
resized_W_xy = cv2.resize(W_xy, dsize=(pupil_pix, pupil_pix))
add_len01 = int((N - pupil_pix) / 2)
add_len02 = N - pupil_pix - add_len01
pad_img = | np.pad(resized_W_xy, ((add_len01,add_len02), (add_len01,add_len02)), 'constant') | numpy.pad |
#Converts gpas and gre scores to two lists of numbers of accepted and rejected \
#school ranking respectively, using keras linear regression and a functional \
#api with no hidden layer.
#Data downloaded from https://github.com/evansrjames/gradcafe-admissions-data.
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]="2"
import json
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from tensorflow.python.keras.models import Model, Sequential
from tensorflow.python.keras.layers import Input, Dense
from tensorflow.python.keras.callbacks import Callback, EarlyStopping
with open("ranking.json", "r") as rank:
rank_dic=json.load(rank)
def key(no):
if no in range(len(rank_dic.keys())):
return list(rank_dic.keys())[no]
def rank(no):
return int(key(no).split("-")[0])
def train(data):
with open("thegradcafe_accepted.json", "r") as nf:
list1=json.load(nf)
x1= | np.array(list1[0]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 31 15:48:57 2020
@author: eugen
This file contains possible static and dynamic testing policies for sampling
from end nodes. Static policies are called once at the beginning of the
simulation replication, while dynamic policies are called either every day
or on an interval basis. Each function takes the following inputs:
1) resultsList: A list with rows corresponding to each end node, with each
row having the following format:[Node ID, Num Samples,
Num Positive, Positive Rate, [IntNodeSourceCounts]]
2) totalSimDays=1000: Total number of days in the simulation
3) numDaysRemain=1000: Total number of days left in the simulation (same as
totalSimDays if a static policy)
4) totalBudget=1000: Total sampling budget for the simulation run
5) numBudgetRemain=1000: Total budget left, in number of samples (same as
totalBudget if a static policy)
6) policyParamList=[0]: List of different policy parameters that might be
called by different policy functions
And outputs a single list, sampleSchedule, with the following elements in each entry:
1) Day: Simulation day of the scheduled test
2) Node: Which node to test on the respective day
"""
import numpy as np
import random
from scipy.stats import beta
import scipy.special as sps
import utilities as simHelpers
import methods as simEst
def testPolicyHandler(polType,resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
'''
Takes in a testing policy choice, calls the respective function, and
returns the generated testing schedule
'''
polStr = ['Static_Deterministic','Static_Random','Dyn_EpsGreedy',\
'Dyn_EpsExpDecay','Dyn_EpsFirst','Dyn_ThompSamp','Dyn_EveryOther',\
'Dyn_EpsSine','Dyn_TSwithNUTS','Dyn_ExploreWithNUTS',\
'Dyn_ExploreWithNUTS_2','Dyn_ThresholdWithNUTS']
if polType not in polStr:
raise ValueError("Invalid policy type. Expected one of: %s" % polStr)
if polType == 'Static_Deterministic':
sampleSchedule = Pol_Stat_Deterministic(resultsList,totalSimDays,numDaysRemain,\
totalBudget,numBudgetRemain,policyParamList,startDay)
elif polType == 'Static_Random':
sampleSchedule = Pol_Stat_Random(resultsList,totalSimDays,numDaysRemain,\
totalBudget,numBudgetRemain,policyParamList,startDay)
elif polType == 'Dyn_EpsGreedy':
sampleSchedule = Pol_Dyn_EpsGreedy(resultsList,totalSimDays,numDaysRemain,\
totalBudget,numBudgetRemain,policyParamList,startDay)
elif polType == 'Dyn_EpsExpDecay':
sampleSchedule = Pol_Dyn_EpsExpDecay(resultsList,totalSimDays,numDaysRemain,\
totalBudget,numBudgetRemain,policyParamList,startDay)
elif polType == 'Dyn_EpsFirst':
sampleSchedule = Pol_Dyn_EpsFirst(resultsList,totalSimDays,numDaysRemain,\
totalBudget,numBudgetRemain,policyParamList,startDay)
elif polType == 'Dyn_ThompSamp':
sampleSchedule = Pol_Dyn_ThompSamp(resultsList,totalSimDays,numDaysRemain,\
totalBudget,numBudgetRemain,policyParamList,startDay)
elif polType == 'Dyn_EveryOther':
sampleSchedule = Pol_Dyn_EveryOther(resultsList,totalSimDays,numDaysRemain,\
totalBudget,numBudgetRemain,policyParamList,startDay)
elif polType == 'Dyn_EpsSine':
sampleSchedule = Pol_Dyn_EpsSine(resultsList,totalSimDays,numDaysRemain,\
totalBudget,numBudgetRemain,policyParamList,startDay)
elif polType == 'Dyn_TSwithNUTS':
sampleSchedule = Pol_Dyn_TSwithNUTS(resultsList,totalSimDays,numDaysRemain,\
totalBudget,numBudgetRemain,policyParamList,startDay)
elif polType == 'Dyn_ExploreWithNUTS':
sampleSchedule = Pol_Dyn_ExploreWithNUTS(resultsList,totalSimDays,numDaysRemain,\
totalBudget,numBudgetRemain,policyParamList,startDay)
elif polType == 'Dyn_ExploreWithNUTS_2':
sampleSchedule = Pol_Dyn_ExploreWithNUTS2(resultsList,totalSimDays,numDaysRemain,\
totalBudget,numBudgetRemain,policyParamList,startDay)
elif polType == 'Dyn_ThresholdWithNUTS':
sampleSchedule = Pol_Dyn_ThresholdWithNUTS(resultsList,totalSimDays,numDaysRemain,\
totalBudget,numBudgetRemain,policyParamList,startDay)
return sampleSchedule
def SampPol_Uniform(sysDict,testingDataList=[],numSamples=1,dataType='Tracked',
sens=1.0,spec=1.0,randSeed=-1):
'''
Conducts 'numSamples' random samples on the entered system dictionary and returns
a table of results according to the entered 'dataType' ('Tracked' or 'Untracked')
If testingDataList is non-empty, new results are appended to it
sysDict requires the following keys:
outletNames/importerNames: list of strings
sourcingMat: Numpy matrix
Matrix of sourcing probabilities between importers and outlets
trueRates: list
List of true SFP manifestation rates, in [importers, outlets] form
'''
impNames, outNames = sysDict['importerNames'], sysDict['outletNames']
numImp, numOut = len(impNames), len(outNames)
trueRates, sourcingMat = sysDict['trueRates'], sysDict['sourcingMat']
if dataType == 'Tracked':
if randSeed >= 0:
random.seed(randSeed + 2)
for currSamp in range(numSamples):
currOutlet = random.sample(outNames, 1)[0]
currImporter = random.choices(impNames, weights=sourcingMat[outNames.index(currOutlet)], k=1)[0]
currOutRate = trueRates[numImp + outNames.index(currOutlet)]
currImpRate = trueRates[impNames.index(currImporter)]
realRate = currOutRate + currImpRate - currOutRate * currImpRate
realResult = np.random.binomial(1, p=realRate)
if realResult == 1:
result = np.random.binomial(1, p=sens)
if realResult == 0:
result = np.random.binomial(1, p = 1-spec)
testingDataList.append([currOutlet, currImporter, result])
elif dataType == 'Untracked':
if randSeed >= 0:
random.seed(randSeed + 3)
for currSamp in range(numSamples):
currOutlet = random.sample(outNames, 1)[0]
currImporter = random.choices(impNames, weights=sourcingMat[outNames.index(currOutlet)], k=1)[0]
currOutRate = trueRates[numImp + outNames.index(currOutlet)]
currImpRate = trueRates[impNames.index(currImporter)]
realRate = currOutRate + currImpRate - currOutRate * currImpRate
realResult = np.random.binomial(1, p=realRate)
if realResult == 1:
result = np.random.binomial(1, p = sens)
if realResult == 0:
result = np.random.binomial(1, p = 1-spec)
testingDataList.append([currOutlet, result])
return testingDataList.copy()
def Pol_Stat_Deterministic(resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
"""
Deterministic policy that rotates through each end node in numerical order
until the sampling budget is exhausted, such that Day 1 features End Node 1,
Day 2 features End Node 2, etc.
"""
#Initialize our output, a list with the above mentioned outputs
sampleSchedule = []
endNodes = []
for nodeInd in range(len(resultsList)):
endNodes.append(resultsList[nodeInd][0])
# Generate a sampling schedule iterating through each end node
nodeCount = 0
currNode = endNodes[nodeCount]
lastEndNode = endNodes[-1]
for samp in range(totalBudget):
day = np.mod(samp,totalSimDays-startDay)
sampleSchedule.append([day+startDay,currNode])
if currNode == lastEndNode:
nodeCount = 0
currNode = endNodes[nodeCount]
else:
nodeCount += 1
currNode = endNodes[nodeCount]
sampleSchedule.sort(key=lambda x: x[0]) # Sort our schedule by day before output
return sampleSchedule
def Pol_Stat_Random(resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
"""
Random policy that selects random nodes on each day until the sampling
budget is exhausted
"""
#Initialize our output, a list with the above mentioned outputs
sampleSchedule = []
endNodes = []
for nodeInd in range(len(resultsList)):
endNodes.append(resultsList[nodeInd][0])
numEndNodes = len(endNodes)
# Generate a sampling schedule randomly sampling the list of end nodes
for samp in range(totalBudget):
day = np.mod(samp,totalSimDays-startDay)
currEndInd = int(np.floor(np.random.uniform(low=0,high=numEndNodes,size=1)))
currNode = endNodes[currEndInd]
sampleSchedule.append([day+startDay,currNode])
sampleSchedule.sort(key=lambda x: x[0]) # Sort our schedule by day before output
return sampleSchedule
def Pol_Dyn_EpsGreedy(resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
"""
Epsilon-greedy policy, where the first element of policyParamList is the
desired exploration ratio, epsilon
"""
#Initialize our output, a list with the above mentioned outputs
sampleSchedule = []
nextTestDay = totalSimDays - numDaysRemain # The day we are generating a schedule for
eps = policyParamList[0] # Our explore parameter
numToTest = int(np.floor(numBudgetRemain / numDaysRemain)) +\
min(numBudgetRemain % numDaysRemain,1) # How many samples to conduct in the next day
# Generate a sampling schedule using the current list of results
# First grab the pool of highest SF rate nodes
maxSFRate = 0
maxIndsList = []
for rw in resultsList:
if rw[3] > maxSFRate:
maxSFRate = rw[3]
for currInd in range(len(resultsList)):
if resultsList[currInd][3] == maxSFRate:
maxIndsList.append(currInd)
for testNum in range(numToTest):
# Explore or exploit?
if np.random.uniform() < 1-eps: # Exploit
exploitBool = True
else:
exploitBool = False
# Based on the previous dice roll, generate a sampling point
if exploitBool:
testInd = np.random.choice(maxIndsList)
NodeToTest = resultsList[testInd][0]
else:
testInd = np.random.choice(len(resultsList))
NodeToTest = resultsList[testInd][0]
sampleSchedule.append([nextTestDay,NodeToTest])
# Need to sort this list before passing it through
sampleSchedule.sort(key=lambda x: x[0])
return sampleSchedule
def Pol_Dyn_EpsExpDecay(resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
"""
Similar to the epsilon-greedy strategy, except that the value of epsilon
decays exponentially over time, resulting in more exploring at the start and
more exploiting at the end; initial epsilon is drawn from the parameter list
"""
#Initialize our output, a list with the above mentioned outputs
sampleSchedule = []
nextTestDay = totalSimDays - numDaysRemain # The day we are generating a schedule for
eps = np.exp(-1*(nextTestDay/totalSimDays)/policyParamList[0])
numToTest = int(np.floor(numBudgetRemain / numDaysRemain)) +\
min(numBudgetRemain % numDaysRemain,1) # How many samples to conduct in the next day
# Generate a sampling schedule using the current list of results
# First grab the pool of highest SF rate nodes
maxSFRate = 0
maxIndsList = []
for rw in resultsList:
if rw[3] > maxSFRate:
maxSFRate = rw[3]
for currInd in range(len(resultsList)):
if resultsList[currInd][3] == maxSFRate:
maxIndsList.append(currInd)
for testNum in range(numToTest):
# Explore or exploit?
if np.random.uniform() < 1-eps: # Exploit
exploitBool = True
else:
exploitBool = False
# Based on the previous dice roll, generate a sampling point
if exploitBool:
testInd = np.random.choice(maxIndsList)
NodeToTest = resultsList[testInd][0]
else:
testInd = np.random.choice(len(resultsList))
NodeToTest = resultsList[testInd][0]
sampleSchedule.append([nextTestDay,NodeToTest])
# Need to sort this list before passing it through
sampleSchedule.sort(key=lambda x: x[0])
return sampleSchedule
def Pol_Dyn_EpsFirst(resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
"""
Epsilon is now the fraction of our budget we devote to exploration before
moving to pure exploitation
"""
#Initialize our output, a list with the above mentioned outputs
sampleSchedule = []
nextTestDay = totalSimDays - numDaysRemain # The day we are generating a schedule for
eps = policyParamList[0] # Our exploit parameter
numToTest = int(np.floor(numBudgetRemain / numDaysRemain)) +\
min(numBudgetRemain % numDaysRemain,1) # How many samples to conduct in the next day
# Generate a sampling schedule using the current list of results
# First grab the pool of highest SF rate nodes
maxSFRate = 0
maxIndsList = []
for rw in resultsList:
if rw[3] > maxSFRate:
maxSFRate = rw[3]
for currInd in range(len(resultsList)):
if resultsList[currInd][3] == maxSFRate:
maxIndsList.append(currInd)
for testNum in range(numToTest):
# Explore or exploit?
if numBudgetRemain > (1-eps)*totalBudget: # Explore
exploitBool = False
else:
exploitBool = True
# Based on the previous dice roll, generate a sampling point
if exploitBool:
testInd = np.random.choice(maxIndsList)
NodeToTest = resultsList[testInd][0]
else:
testInd = np.random.choice(len(resultsList))
NodeToTest = resultsList[testInd][0]
sampleSchedule.append([nextTestDay,NodeToTest])
# Need to sort this list before passing it through
sampleSchedule.sort(key=lambda x: x[0])
return sampleSchedule
def Pol_Dyn_ThompSamp(resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
"""
Thompson sampling, using the testing results achieved thus far
"""
#Initialize our output, a list with the above mentioned outputs
sampleSchedule = []
nextTestDay = totalSimDays - numDaysRemain # The day we are generating a schedule for
numToTest = int(np.floor(numBudgetRemain / numDaysRemain)) +\
min(numBudgetRemain % numDaysRemain,1) # How many samples to conduct in the next day
# Generate a sampling schedule using the current list of results
for testNum in range(numToTest):
# Iterate through each end node, generating an RV according to the beta distribution of samples + positives
betaSamples = []
for rw in resultsList:
alphaCurr = 1 + rw[2]
betaCurr = 1 + (rw[1]-rw[2])
sampleCurr = np.random.beta(alphaCurr,betaCurr)
betaSamples.append(sampleCurr)
# Select the highest variable
maxSampleInd = betaSamples.index(max(betaSamples))
NodeToTest = resultsList[maxSampleInd][0]
sampleSchedule.append([nextTestDay,NodeToTest])
# Need to sort this list before passing it through
sampleSchedule.sort(key=lambda x: x[0])
return sampleSchedule
def Pol_Dyn_EveryOther(resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
"""
Every-other sampling, where we exploit on even days, explore on odd days
"""
#Initialize our output, a list with the above mentioned outputs
sampleSchedule = []
nextTestDay = totalSimDays - numDaysRemain # The day we are generating a schedule for
numToTest = int(np.floor(numBudgetRemain / numDaysRemain)) +\
min(numBudgetRemain % numDaysRemain,1) # How many samples to conduct in the next day
# Generate a sampling schedule using the current list of results
# First grab the pool of highest SF rate nodes
maxSFRate = 0
maxIndsList = []
for rw in resultsList:
if rw[3] > maxSFRate:
maxSFRate = rw[3]
for currInd in range(len(resultsList)):
if resultsList[currInd][3] == maxSFRate:
maxIndsList.append(currInd)
for testNum in range(numToTest):
# Explore or exploit?
if nextTestDay%2 == 1: # Exploit if we are on an odd sampling schedule day
exploitBool = True
else:
exploitBool = False
# Based on the previous dice roll, generate a sampling point
if exploitBool:
testInd = np.random.choice(maxIndsList)
NodeToTest = resultsList[testInd][0]
else:
testInd = np.random.choice(len(resultsList))
NodeToTest = resultsList[testInd][0]
sampleSchedule.append([nextTestDay,NodeToTest])
# Need to sort this list before passing it through
sampleSchedule.sort(key=lambda x: x[0])
return sampleSchedule
def Pol_Dyn_EpsSine(resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
"""
Epsilon follows a sine function of the number of days that have elapsed
"""
#Initialize our output, a list with the above mentioned outputs
sampleSchedule = []
nextTestDay = totalSimDays - numDaysRemain # The day we are generating a schedule for
eps = (np.sin(12.4*nextTestDay)) # Our exploit parameter
numToTest = int(np.floor(numBudgetRemain / numDaysRemain)) +\
min(numBudgetRemain % numDaysRemain,1) # How many samples to conduct in the next day
# Generate a sampling schedule using the current list of results
# First grab the pool of highest SF rate nodes
maxSFRate = 0
maxIndsList = []
for rw in resultsList:
if rw[3] > maxSFRate:
maxSFRate = rw[3]
for currInd in range(len(resultsList)):
if resultsList[currInd][3] == maxSFRate:
maxIndsList.append(currInd)
for testNum in range(numToTest):
# Explore or exploit?
if 0 < eps: # Exploit
exploitBool = True
else:
exploitBool = False
# Based on the previous dice roll, generate a sampling point
if exploitBool:
testInd = np.random.choice(maxIndsList)
NodeToTest = resultsList[testInd][0]
else:
testInd = np.random.choice(len(resultsList))
NodeToTest = resultsList[testInd][0]
sampleSchedule.append([nextTestDay,NodeToTest])
# Need to sort this list before passing it through
sampleSchedule.sort(key=lambda x: x[0])
return sampleSchedule
def Pol_Dyn_TSwithNUTS(resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
"""
Grab intermediate and end node distribtuions via NUTS, then project onto
end nodes for different samples from the resulting distribution; pick
the largest projected SF estimate
policyParamList = [number days to plan for, sensitivity, specificity, M,
Madapt, delta]
(Only enter the number of days to plan for in the main simulation code,
as the other parameters will be pulled from the respective input areas)
"""
#Initialize our output, a list with the above mentioned outputs
sampleSchedule = []
# How many days to plan for?
numDaysToSched = min(policyParamList[0],numDaysRemain)
usedBudgetSoFar = 0
firstTestDay = totalSimDays - numDaysRemain
if numDaysRemain == totalSimDays: # Our initial schedule should just be a distrubed exploration
currNode = resultsList[0][0]
for currDay in range(numDaysToSched):
numToTest = int(np.floor((numBudgetRemain-usedBudgetSoFar) / (numDaysRemain-currDay))) +\
min((numBudgetRemain-usedBudgetSoFar) % (numDaysRemain-currDay),1) # How many samples to conduct in the next day
for testInd in range(numToTest): # Iterate through our end nodes
if currNode > resultsList[len(resultsList)-1][0]:
currNode = resultsList[0][0]
sampleSchedule.append([firstTestDay+currDay,currNode])
currNode += 1
else:
sampleSchedule.append([firstTestDay+currDay,currNode])
currNode += 1
usedBudgetSoFar += 1
else: # Generate NUTS sample using current results and use it to generate a new schedule
ydata = []
nSamp = []
for rw in resultsList:
ydata.append(rw[2])
nSamp.append(rw[1])
A = simEst.GenerateTransitionMatrix(resultsList)
sens, spec, M, Madapt, delta = policyParamList[1:]
NUTSsamples = simEst.GenerateNUTSsamples(ydata,nSamp,A,sens,spec,M,Madapt,delta)
# Now pick from these samples to generate projections
for currDay in range(numDaysToSched):
numToTest = int(np.floor((numBudgetRemain-usedBudgetSoFar) / (numDaysRemain-currDay))) +\
min((numBudgetRemain-usedBudgetSoFar) % (numDaysRemain-currDay),1) # How many samples to conduct in the next day
for testInd in range(numToTest):
currSample = sps.expit(NUTSsamples[random.randrange(len(NUTSsamples))])
probs = currSample[A.shape[1]:] + np.matmul(A,currSample[:A.shape[1]])
# Normalize? Or just pick largest value
highInd = [i for i,j in enumerate(probs) if j == max(probs)]
currNode = resultsList[highInd[0]][0]
sampleSchedule.append([firstTestDay+currDay,currNode])
usedBudgetSoFar += 1
# Need to sort this list before passing it through
sampleSchedule.sort(key=lambda x: x[0])
return sampleSchedule
def Pol_Dyn_ExploreWithNUTS(resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
"""
Grab intermediate and end node distribtuions via NUTS. Identify intermediate node
sample variances. Pick an intermediate node, weighed towards picking those
with higher sample variances. Pick an outlet from this intermediate node's
column in the transition matrix A, again by a weighting (where 0% nodes
have a non-zero probability of being selected). [log((p/1-p) + eps)?]
policyParamList = [number days to plan for, sensitivity, specificity, M,
Madapt, delta]
(Only enter the number of days to plan for in the main simulation code,
as the other parameters will be pulled from the respective input areas)
"""
#Initialize our output, a list with the above mentioned outputs
sampleSchedule = []
# How many days to plan for?
numDaysToSched = min(policyParamList[0],numDaysRemain)
usedBudgetSoFar = 0
firstTestDay = totalSimDays - numDaysRemain
if numDaysRemain == totalSimDays: # Our initial schedule should just be a distrubed exploration
currNode = resultsList[0][0]
for currDay in range(numDaysToSched):
numToTest = int(np.floor((numBudgetRemain-usedBudgetSoFar) / (numDaysRemain-currDay))) +\
min((numBudgetRemain-usedBudgetSoFar) % (numDaysRemain-currDay),1) # How many samples to conduct in the next day
for testInd in range(numToTest): # Iterate through our end nodes
if currNode > resultsList[len(resultsList)-1][0]:
currNode = resultsList[0][0]
sampleSchedule.append([firstTestDay+currDay,currNode])
currNode += 1
else:
sampleSchedule.append([firstTestDay+currDay,currNode])
currNode += 1
usedBudgetSoFar += 1
else: # Generate NUTS sample using current results and use it to generate a new schedule
ydata = []
nSamp = []
for rw in resultsList:
ydata.append(rw[2])
nSamp.append(rw[1])
A = simHelpers.GenerateTransitionMatrix(resultsList)
sens, spec, M, Madapt, delta = policyParamList[1:]
NUTSsamples = simEst.GenerateNUTSsamples(ydata,nSamp,A,sens,spec,M,Madapt,delta)
# Store sample variances for intermediate nodes
NUTSintVars = []
for intNode in range(A.shape[1]):
currVar = np.var(sps.expit(NUTSsamples[:,intNode]))
NUTSintVars.append(currVar)
# Normalize sum of all variances to 1
NUTSintVars = NUTSintVars/np.sum(NUTSintVars)
# Now pick from these samples to generate projections
for currDay in range(numDaysToSched):
numToTest = int(np.floor((numBudgetRemain-usedBudgetSoFar) / (numDaysRemain-currDay))) +\
min((numBudgetRemain-usedBudgetSoFar) % (numDaysRemain-currDay),1) # How many samples to conduct in the next day
for testInd in range(numToTest):
# Pick an intermediate node to "target", with more emphasis on higher sample variances
rUnif = random.uniform(0,1)
for intInd in range(A.shape[1]):
if rUnif < np.sum(NUTSintVars[0:(intInd+1)]):
targIntInd = intInd
break
# Go through the same process with the column of A
# pertaining to this target intermediate node
AtargCol = [row[targIntInd] for row in A]
# Add a small epsilon, for 0 values, and normalize
AtargCol = | np.add(AtargCol,1e-3) | numpy.add |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = | N.array([1,1,1]) | numpy.array |
"""
EF21 with heavy ball acceleration
experiment for least squares function
"""
import numpy as np
import time
import sys
import os
import argparse
from numpy.random import normal, uniform
from sklearn.datasets import make_spd_matrix, make_sparse_spd_matrix, load_svmlight_file, dump_svmlight_file
from numpy.linalg import norm
import itertools
from scipy.special import binom
import pandas as pd
from matplotlib import pyplot as plt
import math
from sklearn.datasets import load_svmlight_file
import datetime
from IPython import display
from least_squares_functions_fast import *
#np.random.seed(23)
def myrepr(x):
return repr(round(x, 4)).replace('.',',') if isinstance(x, float) else repr(x)
def stopping_criterion(func_diff, eps, it, Nsteps):
#return (R_k > eps * R_0) and (it <= Nsteps)
return (it <= Nsteps) and (func_diff >=eps)
def top_k_matrix (X,k):
output = np.zeros(X.shape)
for i in range (X.shape[0]):
output[i] = top_k_compressor(X[i],k)
return output
def top_k_compressor(x, k):
output = np.zeros(x.shape)
x_abs = np.abs(x)
idx = np.argpartition(x_abs, -k)[-k:] # Indices not sorted
inds = idx[np.argsort(x_abs[idx])][::-1]
output[inds] = x[inds]
return output
def compute_full_grads (A, x, b, la,n_workers):
grad_ar = np.zeros((n_workers, x.shape[0]))
for i in range(n_workers):
grad_ar[i] = least_squares_grad(x, A[i], b[i], la).copy()
return grad_ar
def ef21_hb_estimator(A, x, b, la, k, g_ar, n_workers):
grads = compute_full_grads(A, x, b, la, n_workers)
assert(grads.shape==(n_workers, x.shape[0]))
g_ar_new = np.zeros((n_workers, x.shape[0]))
delta = grads - g_ar
g_ar_new = g_ar + top_k_matrix(delta, k)
size_value_sent = 32
return g_ar_new, size_value_sent, np.mean(grads, axis=0)
def ef21_hb(x_0, x_star, f_star, A, b, A_0, b_0, stepsize, eta, eps,la,k, n_workers, experiment_name, project_path,dataset, Nsteps=100000):
g_ar = compute_full_grads(A, x_0, b, la, n_workers)
g = np.mean(g_ar, axis=0)
v = g.copy()
dim = x_0.shape[0]
f_x = least_squares_loss(x_0, A_0, b_0, la)
sq_norm_ar = [np.linalg.norm(x=g, ord=2) ** 2]
its_bits_od_ar = [0]
its_bits_bd_ar = [0]
its_comm_ar = [0]
its_arg_res_ar = [np.linalg.norm(x=(x_0 - x_star), ord=2) ** 2] #argument residual \sqnorm{x^t - x_star}
func_diff_ar = [f_x - f_star]
x = x_0.copy()
it = 0
PRINT_EVERY = 1000
COMPUTE_FG_EVERY = 10
while stopping_criterion(func_diff_ar[-1], eps, it, Nsteps):
x = x - stepsize*v
g_ar, size_value_sent, grad = ef21_hb_estimator(A, x, b, la, k, g_ar, n_workers)
g = np.mean(g_ar, axis=0)
v = eta*v + g
it += 1
f_x = least_squares_loss(x, A_0, b_0, la)
sq_norm_ar.append(np.linalg.norm(x=grad, ord=2) ** 2)
its_bits_od_ar.append(it*k*size_value_sent)
its_bits_bd_ar.append(it*(k+dim)*size_value_sent)
its_comm_ar.append(it)
its_arg_res_ar.append(np.linalg.norm(x=(x - x_star), ord=2) ** 2)
func_diff_ar.append(f_x - f_star)
if it%PRINT_EVERY ==0:
print(it, sq_norm_ar[-1], func_diff_ar[-1])
its_bits_od = np.array(its_bits_od_ar)
its_bits_bd = np.array(its_bits_bd_ar)
its_comm = np.array(its_comm_ar)
its_arg_res = np.array(its_arg_res_ar)
func_diff = np.array(func_diff_ar)
norms = np.array(sq_norm_ar)
sol = x.copy()
its_epochs = its_comm.copy()
save_data(its_bits_od, its_bits_bd, its_epochs, its_comm, its_arg_res, func_diff, norms, sol, k, experiment_name, project_path,dataset)
return np.array(its_bits_od_ar), np.array(its_bits_bd_ar), np.array(its_comm_ar), np.array(its_arg_res_ar), np.array(func_diff_ar), np.array(sq_norm_ar), x,
def save_data(its_bits_od, its_bits_bd, its_epochs, its_comm, its_arg_res, func_diff, f_grad_norms, x_solution, k_size, experiment_name, project_path, dataset):
experiment = '{0}_{1}'.format(experiment_name, k_size)
logs_path = project_path + "logs/logs_{0}_{1}/".format(dataset, experiment)
if not os.path.exists(project_path + "logs/"):
os.makedirs(project_path + "logs/")
if not os.path.exists(logs_path):
os.makedirs(logs_path)
np.save(logs_path + 'iteration_bits_od' + '_' + experiment, np.array(its_bits_od))
np.save(logs_path + 'iteration_bits_bd' + '_' + experiment, np.array(its_bits_bd))
np.save(logs_path + 'iteration_epochs' + '_' + experiment, np.array(its_epochs))
np.save(logs_path + 'iteration_comm' + '_' + experiment, np.array(its_comm))
np.save(logs_path + 'iteration_arg_res' + '_' + experiment, np.array(its_arg_res))
np.save(logs_path + 'func_diff' + '_' + experiment, np.array(func_diff))
np.save(logs_path + 'norms' + '_' + experiment, np.array(f_grad_norms))
np.save(logs_path + 'solution' + '_' + experiment, x_solution)
##}
parser = argparse.ArgumentParser(description='Run top-k algorithm')
parser.add_argument('--max_it', action='store', dest='max_it', type=int, default=None, help='Maximum number of iteration')
parser.add_argument('--k', action='store', dest='k', type=int, default=1, help='Sparcification parameter')
parser.add_argument('--num_workers', action='store', dest='num_workers', type=int, default=1, help='Number of workers that will be used')
parser.add_argument('--factor', action='store', dest='factor', type=float, default=1, help='Stepsize factor')
parser.add_argument('--eta', action='store', dest='eta', type=float, default=0.99, help='eta parameter')
parser.add_argument('--tol', action='store', dest='tol', type=float, default=1e-5, help='tolerance')
parser.add_argument('--dataset', action='store', dest='dataset', type=str, default='mushrooms',help='Dataset name for saving logs')
args = parser.parse_args()
nsteps = args.max_it
k_tk = args.k
n_w = args.num_workers
dataset = args.dataset
loss_func = "least-sq"
factor = args.factor
eps = args.tol
eta = args.eta
'''
nsteps = 2000
k_tk = 1
n_w = 20
dataset = "phishing"
loss_func = "least-sq"
factor = 1
eps = 1e-7
eta = 0.5
'''
la = 0
user_dir = os.path.expanduser('~/')
project_path = os.getcwd() + "/"
data_path = project_path + "data_{0}/".format(dataset)
if not os.path.exists(data_path):
os.mkdir(data_path)
X_0 = np.load(data_path + 'X.npy') #whole dateset
y_0 = np.load(data_path + 'y.npy')
n_0, d_0 = X_0.shape
hess_f_0 = (2 /n_0) * (X_0.T @ X_0) + 2*la*np.eye(d_0)
eigvs = np.linalg.eigvals(hess_f_0)
mu_0 = eigvs[np.where(eigvs > 0, eigvs, np.inf).argmin()] #returns smallest positive number
L_0 = np.max( | np.linalg.eigvals(hess_f_0) | numpy.linalg.eigvals |
# original code is from https://github.com/ShiyuLiang/odin-pytorch/blob/master/code/calMetric.py
# Modeified by <NAME>
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
from prettytable import PrettyTable
from sklearn.cluster import KMeans
def tpr95(dir_name, task='OOD'):
# calculate the falsepositive error when tpr is 95%
if task == 'OOD':
cifar = np.loadtxt('%s/confidence_Base_In.txt' %
dir_name, delimiter=',')
other = np.loadtxt('%s/confidence_Base_Out.txt' %
dir_name, delimiter=',')
elif task == 'mis':
cifar = np.loadtxt('%s/confidence_Base_Succ.txt' %
dir_name, delimiter=',')
other = np.loadtxt('%s/confidence_Base_Err.txt' %
dir_name, delimiter=',')
Y1 = other
X1 = cifar
end = np.max([np.max(X1), np.max(Y1)])
start = np.min([np.min(X1), np.min(Y1)])
gap = (end - start)/200000 # precision:200000
total = 0.0
fpr = 0.0
for delta in np.arange(start, end, gap):
tpr = np.sum( | np.sum(X1 >= delta) | numpy.sum |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
# -----------------------------------------------------------------------------
from guidedprojectionbase import GuidedProjectionBase
# -----------------------------------------------------------------------------
# try:
from constraints_basic import columnnew,con_planarity_constraints,\
con_isometry
from constraints_net import con_unit_edge,con_orthogonal_midline,\
con_isogonal,con_isogonal_diagnet,\
con_anet,con_anet_diagnet,con_gnet,con_gnet_diagnet,\
con_anet_geodesic,con_polyline_ruling,con_osculating_tangents,\
con_planar_1familyof_polylines,con_nonsquare_quadface,\
con_singular_Anet_diag_geodesic,con_gonet, \
con_diag_1_asymptotic_or_geodesic,\
con_ctrlnet_symmetric_1_diagpoly, con_AGnet
from singularMesh import quadmesh_with_1singularity
from constraints_glide import con_alignment,con_alignments,con_fix_vertices
# -----------------------------------------------------------------------------
__author__ = '<NAME>'
# -----------------------------------------------------------------------------
class GuidedProjection_AGNet(GuidedProjectionBase):
_N1 = 0
_N5 = 0
_N6 = 0
_Nanet = 0
_Ndgeo = 0
_Ndgeoliou = 0
_Ndgeopc = 0
_Nruling = 0
_Noscut = 0
_Nnonsym = 0
_Npp = 0
_Ncd = _Ncds = 0
_Nag = 0
def __init__(self):
GuidedProjectionBase.__init__(self)
weights = {
## Commen setting:
'geometric' : 0, ##NOTE SHOULD BE 1 ONCE planarity=1
'planarity' : 0,
## shared used:
'unit_edge' : 0,
'unit_diag_edge' : 0,
'orthogonal' :0,
'isogonal' : 0,
'isogonal_diagnet' :0,
'Anet' : 0,
'Anet_diagnet' : 0,
'Gnet' : 0,
'Gnet_diagnet' : 0,
'GOnet' : 0,
'diag_1_asymptotic': 0,
'diag_1_geodesic': 0,
'ctrlnet_symmetric_1diagpoly': 0,
'nonsymmetric' :0,
'isometry' : 0,
'z0' : 0,
'boundary_glide' :0, #Hui in gpbase.py doesn't work, replace here.
'i_boundary_glide' :0,
'fix_point' :0,
## only for AGNet:
'GGGnet': 0,
'GGG_diagnet': 0, #TODO
'AGnet': 0,
'AAGnet': 0,
'GAAnet': 0,
'GGAnet': 0,
'AGGnet': 0,
'AAGGnet': 0,
'GGAAnet': 0,
'planar_geodesic' : 0,
'agnet_liouville': 0,
'ruling': 0,# opt for ruling quadratic mesh, straight lines-mesh
'oscu_tangent' :0,
'AAG_singular' :0,
'planar_ply1' : 0,
'planar_ply2' : 0,
}
self.add_weights(weights)
self.switch_diagmeth = False
self.is_initial = True
self.if_angle = False
self._angle = 90
self._glide_reference_polyline = None
self.i_glide_bdry_crv, self.i_glide_bdry_ver = [],[]
self.ind_fixed_point, self.fixed_value = None,None
self.set_another_polyline = 0
self._ver_poly_strip1,self._ver_poly_strip2 = None,None
self.nonsym_eps = 0.01
self.ind_nonsym_v124,self.ind_nonsym_l12 = None,None
self.is_singular = False
self._singular_polylist = None
self._ind_rr_vertex = None
self.weight_checker = 1
### isogonal AGnet:
self.is_AG_or_GA = True
self.opt_AG_ortho = False
self.opt_AG_const_rii = False
self.opt_AG_const_r0 = False
#--------------------------------------------------------------------------
#
#--------------------------------------------------------------------------
@property
def mesh(self):
return self._mesh
@mesh.setter
def mesh(self, mesh):
self._mesh = mesh
self.initialization()
@property
def max_weight(self):
return max(self.get_weight('boundary_glide'),
self.get_weight('i_boundary_glide'),
self.get_weight('geometric'),
self.get_weight('planarity'),
self.get_weight('unit_edge'),
self.get_weight('unit_diag_edge'),
self.get_weight('orthogonal'),
self.get_weight('isometry'),
self.get_weight('oscu_tangent'),
self.get_weight('Anet'),
self.get_weight('Anet_diagnet'),
self.get_weight('diag_1_asymptotic'), #n defined from only ctrl-net
self.get_weight('diag_1_geodesic'),
self.get_weight('ctrlnet_symmetric_1diagpoly'),
self.get_weigth('nonsymmetric'),
self.get_weight('AAG_singular'),
self.get_weight('planar_plys'),
1)
@property
def angle(self):
return self._angle
@angle.setter
def angle(self,angle):
if angle != self._angle:
self.mesh.angle=angle
self._angle = angle
@property
def glide_reference_polyline(self):
if self._glide_reference_polyline is None:
polylines = self.mesh.boundary_curves(corner_split=False)[0]
N = 5
for polyline in polylines:
polyline.refine(N)
self._glide_reference_polyline = polyline
return self._glide_reference_polyline
# @glide_reference_polyline.setter##NOTE: used by reference-mesh case
# def glide_reference_polyline(self,polyline):
# self._glide_reference_polyline = polyline
@property
def ver_poly_strip1(self):
if self._ver_poly_strip1 is None:
if self.get_weight('planar_ply1') or self.opt_AG_const_rii:
self.index_of_mesh_polylines()
else:
self.index_of_strip_along_polyline()
return self._ver_poly_strip1
@property
def ver_poly_strip2(self):
if self._ver_poly_strip2 is None:
if self.get_weight('planar_ply2'):
self.index_of_mesh_polylines()
return self._ver_poly_strip2
@property
def singular_polylist(self):
if self._singular_polylist is None:
self.get_singularmesh_diagpoly()
return self._singular_polylist
@property
def ind_rr_vertex(self):
if self._ind_rr_vertex is None:
self.get_singularmesh_diagpoly()
return self._ind_rr_vertex
#--------------------------------------------------------------------------
# Initialization
#--------------------------------------------------------------------------
def set_weights(self):
#------------------------------------
if self.get_weight('isogonal'):
self.set_weight('unit_edge', 1*self.get_weight('isogonal'))
elif self.get_weight('isogonal_diagnet'):
self.set_weight('unit_diag_edge', 1*self.get_weight('isogonal_diagnet'))
if self.get_weight('Gnet') or self.get_weight('GOnet'):
self.set_weight('unit_edge', 1)
elif self.get_weight('Gnet_diagnet'):
self.set_weight('unit_diag_edge', 1)
if self.get_weight('GGGnet'):
self.set_weight('Gnet', 1)
self.set_weight('diag_1_geodesic',1)
if self.get_weight('AAGnet'):
self.set_weight('Anet', 1)
elif self.get_weight('GAAnet'):
self.set_weight('Anet_diagnet', 1)
elif self.get_weight('GGAnet'):
self.set_weight('Gnet', 1)
elif self.get_weight('AGGnet'):
self.set_weight('Gnet_diagnet', 1)
elif self.get_weight('AAGGnet'):
self.set_weight('Anet', 1)
self.set_weight('Gnet_diagnet', 1)
elif self.get_weight('GGAAnet'):
self.set_weight('Gnet', 1)
self.set_weight('Anet_diagnet', 1)
if self.get_weight('AGnet'):
self.set_weight('oscu_tangent', self.get_weight('AGnet'))
if self.get_weight('AAG_singular'):
self.set_weight('Anet', 1*self.get_weight('AAG_singular'))
if self.get_weight('diag_1_asymptotic') or self.get_weight('diag_1_geodesic'):
self.set_weight('unit_edge',1)
if self.get_weight('ctrlnet_symmetric_1diagpoly'):
pass
#--------------------------------------
def set_dimensions(self): # Huinote: be used in guidedprojectionbase
V = self.mesh.V
F = self.mesh.F
num_regular = self.mesh.num_regular
N = 3*V
N1 = N5 = N6 = N
Nanet = N
Ndgeo = Ndgeoliou = Ndgeopc = Nruling = Noscut = N
Nnonsym = N
Npp = N
Ncd = Ncds = N
Nag = N
#---------------------------------------------
if self.get_weight('planarity'):
N += 3*F
N1 = N
if self.get_weight('unit_edge'): #Gnet
"le1,le2,le3,le4,ue1,ue2,ue3,ue4 "
if self.get_weight('isogonal'):
N += 16*num_regular
else:
"for Anet"
N += 16*len(self.mesh.ind_rr_star_v4f4)
N5 = N
elif self.get_weight('unit_diag_edge'): #Gnet_diagnet
"le1,le2,le3,le4,ue1,ue2,ue3,ue4 "
N += 16*len(self.mesh.ind_rr_star_v4f4)
N5 = N
if self.get_weight('isogonal'):
"lt1,lt2, ut1,ut2, cos0"
N += 8*num_regular+1
N6 = N
elif self.get_weight('isogonal_diagnet'):
"lt1,lt2, ut1,ut2, cos0"
N += 8*len(self.mesh.ind_rr_star_v4f4)+1
N6 = N
if self.get_weight('Anet') or self.get_weight('Anet_diagnet'):
N += 3*len(self.mesh.ind_rr_star_v4f4)#3*num_regular
Nanet = N
if self.get_weight('AAGnet') or self.get_weight('GAAnet'):
N += 3*len(self.mesh.ind_rr_star_v4f4)
Ndgeo = N
elif self.get_weight('GGAnet') or self.get_weight('AGGnet'):
N += 9*len(self.mesh.ind_rr_star_v4f4)
Ndgeo = N
elif self.get_weight('AAGGnet') or self.get_weight('GGAAnet'):
N += 6*len(self.mesh.ind_rr_star_v4f4)
Ndgeo = N
if self.get_weight('oscu_tangent'):
"X +=[ll1,ll2,ll3,ll4,lu1,lu2,u1,u2]"
N += 12*len(self.mesh.ind_rr_star_v4f4)
Noscut = N
if self.get_weight('AGnet'):
"osculating tangents; X += [surfN; ogN]; if const.ri, X+=[Ri]"
N += 6*len(self.mesh.ind_rr_star_v4f4)
if self.opt_AG_const_rii:
"const rii for each geodesic polylines, default v2-v-v4"
N += len(self.ver_poly_strip1)#TODO
elif self.opt_AG_const_r0:
"unique r"
N += 1
Nag = N
if self.get_weight('agnet_liouville'):
"X +=[lu1,tu1; lla,llc,g1, lg1,tg1, c]"
N += 13*len(self.mesh.ind_rr_star_v4f4) +1
Ndgeoliou = N
if self.get_weight('planar_geodesic'):
N += 3*len(self.ver_poly_strip1[0])
Ndgeopc = N
if self.get_weight('ruling'):
N += 3*len(self.mesh.get_both_isopolyline(self.switch_diagmeth))
Nruling = N
if self.get_weight('nonsymmetric'):
"X += [E,s]"
N += self.mesh.E + len(self.ind_nonsym_v124[0]) ##self.mesh.num_rrf ##len=self.rr_quadface
Nnonsym = N
if self.get_weight('AAG_singular'):
"X += [on]"
N += 3*len(self.singular_polylist[1])
Ndgeo = N
### PPQ-project:
if self.get_weight('planar_ply1'):
N += 3*len(self.ver_poly_strip1)
## only for \obj_cheng\every_5_PPQ.obj'
##matrix = self.ver_poly_strip1
#matrix = self.mesh.rot_patch_matrix[:,::5].T
#N += 3*len(matrix)
Nppq = N
if self.get_weight('planar_ply2'):
N += 3*len(self.ver_poly_strip2)
Nppo = N
### CG / CA project:
if self.get_weight('diag_1_asymptotic') or self.get_weight('diag_1_geodesic'):
if self.get_weight('diag_1_asymptotic'):
"[ln,v_N]"
N += 4*len(self.mesh.ind_rr_star_v4f4)
elif self.get_weight('diag_1_geodesic'):
if self.is_singular:
"[ln,v_N;la[ind],lc[ind],ea[ind],ec[ind]]"
N += (1+3)*len(self.mesh.ind_rr_star_v4f4)+8*len(self.ind_rr_vertex)
else:
"[ln,v_N;la,lc,ea,ec]"
N += (1+3+3+3+1+1)*len(self.mesh.ind_rr_star_v4f4)
Ncd = N #ctrl-diag net
if self.get_weight('ctrlnet_symmetric_1diagpoly'):
N += (1+1+3+3+1+3)*len(self.mesh.ind_rr_star_v4f4) #[e_ac,l_ac]
Ncds = N
#---------------------------------------------
if N1 != self._N1:
self.reinitialize = True
if N5 != self._N5 or N6 != self._N6:
self.reinitialize = True
if Nanet != self._Nanet:
self.reinitialize = True
if Ndgeo != self._Ndgeo:
self.reinitialize = True
if Nag != self._Nag:
self.reinitialize = True
if Ndgeoliou != self._Ndgeoliou:
self.reinitialize = True
if Ndgeopc != self._Ndgeopc:
self.reinitialize = True
if Nruling != self._Nruling:
self.reinitialize = True
if Noscut != self._Noscut:
self.reinitialize = True
if Nnonsym != self._Nnonsym:
self.reinitialize = True
if Npp != self._Npp:
self.reinitialize = True
if Ncd != self._Ncd:
self.reinitialize = True
if Ncds != self._Ncds:
self.reinitialize = True
#----------------------------------------------
self._N = N
self._N1 = N1
self._N5 = N5
self._N6 = N6
self._Nanet = Nanet
self._Ndgeo = Ndgeo
self._Ndgeoliou = Ndgeoliou
self._Ndgeopc = Ndgeopc
self._Nruling = Nruling
self._Noscut = Noscut
self._Nnonsym = Nnonsym
self._Npp = Npp
self._Ncd = Ncd
self._Ncds = Ncds
self._Nag = Nag
self.build_added_weight() # Hui add
def initialize_unknowns_vector(self):
X = self.mesh.vertices.flatten('F')
if self.get_weight('planarity'):
normals = self.mesh.face_normals()
normals = normals.flatten('F')
X = np.hstack((X, normals))
if self.get_weight('unit_edge'):
if True:
"self.get_weight('Gnet')"
rr=True
l1,l2,l3,l4,E1,E2,E3,E4 = self.mesh.get_v4_unit_edge(rregular=rr)
X = np.r_[X,l1,l2,l3,l4]
X = np.r_[X,E1.flatten('F'),E2.flatten('F'),E3.flatten('F'),E4.flatten('F')]
elif self.get_weight('unit_diag_edge'):
l1,l2,l3,l4,E1,E2,E3,E4 = self.mesh.get_v4_diag_unit_edge()
X = np.r_[X,l1,l2,l3,l4]
X = np.r_[X,E1.flatten('F'),E2.flatten('F'),E3.flatten('F'),E4.flatten('F')]
if self.get_weight('isogonal'):
lt1,lt2,ut1,ut2,_,_ = self.mesh.get_v4_unit_tangents()
cos0 = np.mean(np.einsum('ij,ij->i', ut1, ut2))
X = np.r_[X,lt1,lt2,ut1.flatten('F'),ut2.flatten('F'),cos0]
elif self.get_weight('isogonal_diagnet'):
lt1,lt2,ut1,ut2,_,_ = self.mesh.get_v4_diag_unit_tangents()
cos0 = np.mean(np.einsum('ij,ij->i', ut1, ut2))
X = np.r_[X,lt1,lt2,ut1.flatten('F'),ut2.flatten('F'),cos0]
if self.get_weight('Anet'):
if True:
"only r-regular vertex"
v = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4][:,0]
else:
v = self.mesh.ver_regular
V4N = self.mesh.vertex_normals()[v]
X = np.r_[X,V4N.flatten('F')]
elif self.get_weight('Anet_diagnet'):
v = self.mesh.rr_star_corner[0]
V4N = self.mesh.vertex_normals()[v]
X = np.r_[X,V4N.flatten('F')]
if self.get_weight('AAGnet'):
on = self.get_agweb_initial(diagnet=False,
another_poly_direction=self.set_another_polyline,
AAG=True)
X = np.r_[X,on]
elif self.get_weight('GAAnet'):
on = self.get_agweb_initial(diagnet=True,
another_poly_direction=self.set_another_polyline,
AAG=True)
X = np.r_[X,on]
elif self.get_weight('GGAnet'):
vNoN1oN2 = self.get_agweb_initial(diagnet=False,
another_poly_direction=self.set_another_polyline,
GGA=True)
X = np.r_[X,vNoN1oN2]
elif self.get_weight('AGGnet'):
vNoN1oN2 = self.get_agweb_initial(diagnet=True,
another_poly_direction=self.set_another_polyline,
GGA=True)
X = np.r_[X,vNoN1oN2]
elif self.get_weight('AAGGnet'):
oN1oN2 = self.get_agweb_initial(diagnet=False,
another_poly_direction=self.set_another_polyline,
AAGG=True)
X = np.r_[X,oN1oN2]
elif self.get_weight('GGAAnet'):
oN1oN2 = self.get_agweb_initial(diagnet=True,
another_poly_direction=self.set_another_polyline,
AAGG=True)
X = np.r_[X,oN1oN2]
if self.get_weight('oscu_tangent'):
"X +=[ll1,ll2,ll3,ll4,lu1,lu2,u1,u2]"
if self.get_weight('GAAnet') or self.get_weight('AGGnet') or self.get_weight('GGAAnet'):
diag=True
else:
diag=False
l,t,lt1,lt2 = self.mesh.get_net_osculating_tangents(diagnet=diag)
[ll1,ll2,ll3,ll4],[lt1,t1],[lt2,t2] = l,lt1,lt2
X = np.r_[X,ll1,ll2,ll3,ll4]
X = np.r_[X,lt1,lt2,t1.flatten('F'),t2.flatten('F')]
if self.get_weight('AGnet'):
"osculating tangent"
v,v1,v2,v3,v4 = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4].T
V = self.mesh.vertices
_,_,lt1,lt2 = self.mesh.get_net_osculating_tangents()
srfN = np.cross(lt1[1],lt2[1])
srfN = srfN / np.linalg.norm(srfN,axis=1)[:,None]
if not self.is_AG_or_GA:
v2,v4 = v1,v3
biN = np.cross(V[v2]-V[v], V[v4]-V[v])
ogN = biN / np.linalg.norm(biN,axis=1)[:,None]
X = np.r_[X,srfN.flatten('F'),ogN.flatten('F')]
if self.opt_AG_const_rii:
"const rii for each geodesic polylines, default v2-v-v4"
pass #TODO
elif self.opt_AG_const_r0:
"unique r"
from frenet_frame import FrenetFrame
allr = FrenetFrame(V[v],V[v2],V[v4]).radius
X = np.r_[X,np.mean(allr)]
if self.get_weight('agnet_liouville'): # no need now
"X +=[lu1,tu1; lla,llc,g1, lg1,tg1, c]"
lulg = self.get_agweb_liouville(diagnet=True)
X = np.r_[X,lulg]
if self.get_weight('planar_geodesic'):
sn = self.get_poly_strip_normal()
X = np.r_[X,sn.flatten('F')]
if self.get_weight('ruling'): # no need now
sn = self.get_poly_strip_ruling_tangent()
X = np.r_[X,sn.flatten('F')]
if self.get_weight('nonsymmetric'):
E, s = self.get_nonsymmetric_edge_ratio(diagnet=False)
X = np.r_[X, E, s]
if self.get_weight('AAG_singular'):
"X += [on]"
on = self.get_initial_singular_diagply_normal(is_init=True)
X = np.r_[X,on.flatten('F')]
if self.get_weight('planar_ply1'):
sn = self.get_poly_strip_normal(pl1=True)
X = np.r_[X,sn.flatten('F')]
if self.get_weight('planar_ply2'):
sn = self.get_poly_strip_normal(pl2=True)
X = np.r_[X,sn.flatten('F')]
### CG / CA project:
if self.get_weight('diag_1_asymptotic') or self.get_weight('diag_1_geodesic'):
"X += [ln,uN;la,lc,ea,ec]"
v,v1,v2,v3,v4 = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4].T
V = self.mesh.vertices
v4N = np.cross(V[v3]-V[v1], V[v4]-V[v2])
ln = np.linalg.norm(v4N,axis=1)
un = v4N / ln[:,None]
if self.get_weight('diag_1_asymptotic'):
"X += [ln,un]"
X = np.r_[X,ln,un.flatten('F')]
elif self.get_weight('diag_1_geodesic'):
"X += [ln,un; la,lc,ea,ec]"
if self.is_singular:
"new, different from below"
vl,vc,vr = self.singular_polylist
la = np.linalg.norm(V[vl]-V[vc],axis=1)
lc = np.linalg.norm(V[vr]-V[vc],axis=1)
ea = (V[vl]-V[vc]) / la[:,None]
ec = (V[vr]-V[vc]) / lc[:,None]
X = np.r_[X,ln,un.flatten('F'),la,lc,ea.flatten('F'),ec.flatten('F')]
else:
"no singular case"
l1,l2,l3,l4,E1,E2,E3,E4 = self.mesh.get_v4_diag_unit_edge()
if self.set_another_polyline:
"switch to another diagonal polyline"
ea,ec,la,lc = E2,E4,l2,l4
else:
ea,ec,la,lc = E1,E3,l1,l3
X = np.r_[X,ln,un.flatten('F'),la,lc,ea.flatten('F'),ec.flatten('F')]
if self.get_weight('ctrlnet_symmetric_1diagpoly'):
"X += [lt1,lt2,ut1,ut2; lac,ud1]"
lt1,lt2,ut1,ut2,_,_ = self.mesh.get_v4_unit_tangents()
ld1,ld2,ud1,ud2,_,_ = self.mesh.get_v4_diag_unit_tangents()
if self.set_another_polyline:
"switch to another diagonal polyline"
eac,lac = ud2,ld2
else:
eac,lac = ud1,ld1
X = np.r_[X,lt1,lt2,ut1.flatten('F'),ut2.flatten('F')]
X = np.r_[X,lac,eac.flatten('F')]
self._X = X
self._X0 = np.copy(X)
self.build_added_weight() # Hui add
#--------------------------------------------------------------------------
# Errors strings
#--------------------------------------------------------------------------
def make_errors(self):
self.planarity_error()
self.isogonal_error()
self.isogonal_diagnet_error()
self.anet_error()
self.gnet_error()
self.gonet_error()
#self.oscu_tangent_error() # good enough: mean=meax=90
#self.liouville_error()
def planarity_error(self):
if self.get_weight('planarity') == 0:
return None
P = self.mesh.face_planarity()
Emean = np.mean(P)
Emax = np.max(P)
self.add_error('planarity', Emean, Emax, self.get_weight('planarity'))
def isogonal_error(self):
if self.get_weight('isogonal') == 0:
return None
cos,cos0 = self.unit_tangent_vectors()
err = np.abs(cos-cos0) # no divided by cos
emean = np.mean(err)
emax = np.max(err)
self.add_error('isogonal', emean, emax, self.get_weight('isogonal'))
def isogonal_diagnet_error(self):
if self.get_weight('isogonal_diagnet') == 0:
return None
cos,cos0 = self.unit_tangent_vectors_diagnet()
err = np.abs(cos-cos0) # no divided by cos
emean = np.mean(err)
emax = np.max(err)
self.add_error('isogonal_diagnet', emean, emax, self.get_weight('isogonal_diagnet'))
def isometry_error(self): # Hui
"compare all edge_lengths"
if self.get_weight('isometry') == 0:
return None
L = self.edge_lengths_isometry()
L0 = self.edge_lengths_isometry(initialized=True)
norm = np.mean(L)
Err = np.abs(L-L0) / norm
Emean = np.mean(Err)
Emax = np.max(Err)
self.add_error('isometry', Emean, Emax, self.get_weight('isometry'))
def anet_error(self):
if self.get_weight('Anet') == 0 and self.get_weight('Anet_diagnet')==0:
return None
if self.get_weight('Anet'):
name = 'Anet'
if True:
star = self.mesh.rr_star
v,v1,v2,v3,v4 = star[self.mesh.ind_rr_star_v4f4].T
else:
v,v1,v2,v3,v4 = self.mesh.ver_regular_star.T
elif self.get_weight('Anet_diagnet'):
name = 'Anet_diagnet'
v,v1,v2,v3,v4 = self.mesh.rr_star_corner
if self.is_initial:
Nv = self.mesh.vertex_normals()[v]
else:
num = len(v)
c_n = self._Nanet-3*num+np.arange(3*num)
Nv = self.X[c_n].reshape(-1,3,order='F')
V = self.mesh.vertices
err1 = np.abs(np.einsum('ij,ij->i',Nv,V[v1]-V[v]))
err2 = np.abs(np.einsum('ij,ij->i',Nv,V[v2]-V[v]))
err3 = np.abs(np.einsum('ij,ij->i',Nv,V[v3]-V[v]))
err4 = np.abs(np.einsum('ij,ij->i',Nv,V[v4]-V[v]))
Err = err1+err2+err3+err4
Emean = np.mean(Err)
Emax = np.max(Err)
self.add_error(name, Emean, Emax, self.get_weight(name))
def gnet_error(self):
if self.get_weight('Gnet') == 0 and self.get_weight('Gnet_diagnet')==0:
return None
if self.get_weight('Gnet'):
name = 'Gnet'
if True:
star = self.mesh.rr_star
v,v1,v2,v3,v4 = star[self.mesh.ind_rr_star_v4f4].T
else:
v,v1,v2,v3,v4 = self.mesh.ver_regular_star.T
elif self.get_weight('Gnet_diagnet'):
name = 'Gnet_diagnet'
v,v1,v2,v3,v4 = self.mesh.rr_star_corner
V = self.mesh.vertices
E1 = (V[v1]-V[v]) / np.linalg.norm(V[v1]-V[v],axis=1)[:,None]
E2 = (V[v2]-V[v]) / np.linalg.norm(V[v2]-V[v],axis=1)[:,None]
E3 = (V[v3]-V[v]) / np.linalg.norm(V[v3]-V[v],axis=1)[:,None]
E4 = (V[v4]-V[v]) / np.linalg.norm(V[v4]-V[v],axis=1)[:,None]
err1 = np.abs(np.einsum('ij,ij->i',E1,E2)-np.einsum('ij,ij->i',E3,E4))
err2 = np.abs(np.einsum('ij,ij->i',E2,E3)-np.einsum('ij,ij->i',E4,E1))
Err = err1+err2
Emean = np.mean(Err)
Emax = np.max(Err)
self.add_error(name, Emean, Emax, self.get_weight(name))
def gonet_error(self):
if self.get_weight('GOnet') == 0:
return None
name = 'GOnet'
if True:
star = self.mesh.rr_star
v,v1,v2,v3,v4 = star[self.mesh.ind_rr_star_v4f4].T
else:
v,v1,v2,v3,v4 = self.mesh.ver_regular_star.T
V = self.mesh.vertices
E1 = (V[v1]-V[v]) / np.linalg.norm(V[v1]-V[v],axis=1)[:,None]
E2 = (V[v2]-V[v]) / np.linalg.norm(V[v2]-V[v],axis=1)[:,None]
E3 = (V[v3]-V[v]) / np.linalg.norm(V[v3]-V[v],axis=1)[:,None]
E4 = (V[v4]-V[v]) / np.linalg.norm(V[v4]-V[v],axis=1)[:,None]
if self.is_AG_or_GA:
err1 = np.abs(np.einsum('ij,ij->i',E1,E2)-np.einsum('ij,ij->i',E2,E3))
err2 = np.abs(np.einsum('ij,ij->i',E3,E4)-np.einsum('ij,ij->i',E4,E1))
else:
err1 = np.abs(np.einsum('ij,ij->i',E1,E2)-np.einsum('ij,ij->i',E1,E4))
err2 = np.abs(np.einsum('ij,ij->i',E2,E3)-np.einsum('ij,ij->i',E3,E4))
Err = err1+err2
Emean = np.mean(Err)
Emax = np.max(Err)
self.add_error(name, Emean, Emax, self.get_weight(name))
def oscu_tangent_error(self):
if self.get_weight('oscu_tangent') == 0:
return None
if self.get_weight('GAAnet') or self.get_weight('AGGnet') or self.get_weight('GGAAnet'):
diag=True
else:
diag=False
angle = self.mesh.get_net_osculating_tangents(diagnet=diag,printerr=True)
emean = '%.2f' % np.mean(angle)
emax = '%.2f' % np.max(angle)
print('ortho:',emean,emax)
#self.add_error('orthogonal', emean, emax, self.get_weight('oscu_tangent'))
def liouville_error(self):
if self.get_weight('agnet_liouville') == 0:
return None
cos,cos0 = self.agnet_liouville_const_angle()
err = np.abs(cos-cos0) # no divided by cos
emean = np.mean(err)
emax = np.max(err)
self.add_error('Liouville', emean, emax, self.get_weight('agnet_liouville'))
def planarity_error_string(self):
return self.error_string('planarity')
def isogonal_error_string(self):
return self.error_string('isogonal')
def isogonal_diagnet_error_string(self):
return self.error_string('isogonal_diagnet')
def isometry_error_string(self):
return self.error_string('isometry')
def anet_error_string(self):
return self.error_string('Anet')
def liouville_error_string(self):
return self.error_string('agnet_liouville')
#--------------------------------------------------------------------------
# Getting (initilization + Plotting):
#--------------------------------------------------------------------------
def unit_tangent_vectors(self, initialized=False):
if self.get_weight('isogonal') == 0:
return None
if initialized:
X = self._X0
else:
X = self.X
N6 = self._N6
num = self.mesh.num_regular
ut1 = X[N6-6*num-1:N6-3*num-1].reshape(-1,3,order='F')
ut2 = X[N6-3*num-1:N6-1].reshape(-1,3,order='F')
cos = np.einsum('ij,ij->i',ut1,ut2)
cos0 = X[N6-1]
return cos,cos0
def unit_tangent_vectors_diagnet(self, initialized=False):
if self.get_weight('isogonal_diagnet') == 0:
return None
if initialized:
X = self._X0
else:
X = self.X
N6 = self._N6
num = len(self.mesh.ind_rr_star_v4f4)
ut1 = X[N6-6*num-1:N6-3*num-1].reshape(-1,3,order='F')
ut2 = X[N6-3*num-1:N6-1].reshape(-1,3,order='F')
cos = np.einsum('ij,ij->i',ut1,ut2)
cos0 = X[N6-1]
return cos,cos0
def edge_lengths_isometry(self, initialized=False): # Hui
"isometry: keeping all edge_lengths"
if self.get_weight('isometry') == 0:
return None
if initialized:
X = self._X0
else:
X = self.X
vi, vj = self.mesh.vertex_ring_vertices_iterators(order=True) # later should define it as global
Vi = X[columnnew(vi,0,self.mesh.V)].reshape(-1,3,order='F')
Vj = X[columnnew(vj,0,self.mesh.V)].reshape(-1,3,order='F')
el = np.linalg.norm(Vi-Vj,axis=1)
return el
def get_agweb_initial(self,diagnet=False,another_poly_direction=False,
AAG=False,GGA=False,AAGG=False):
"initilization of AG-net project"
V = self.mesh.vertices
v,v1,v2,v3,v4 = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4].T # regular
v,va,vb,vc,vd = self.mesh.rr_star_corner# in diagonal direction
V0,V1,V2,V3,V4,Va,Vb,Vc,Vd = V[v],V[v1],V[v2],V[v3],V[v4],V[va],V[vb],V[vc],V[vd]
vnn = self.mesh.vertex_normals()[v]
if diagnet:
"GGAA / GAA"
Vg1,Vg2,Vg3,Vg4 = V1,V2,V3,V4
else:
"AAGG / AAG"
Vg1,Vg2,Vg3,Vg4 = Va,Vb,Vc,Vd
"X +=[ln, vN] + [oNi]; oNi not need to be unit; all geodesics matter"
if AAGG:
"oN1,oN2 from Gnet-osculating_normals,s.t. anetN*oN1(oN2)=0"
oN1,oN2 = np.cross(Vg3-V0,Vg1-V0),np.cross(Vg4-V0,Vg2-V0)
X = np.r_[oN1.flatten('F'),oN2.flatten('F')]
elif AAG:
"oN from geodesic-osculating-normal (not unit)"
if another_poly_direction:
Vl,Vr = Vg2, Vg4
else:
Vl,Vr = Vg1, Vg3
oN = np.cross(Vr-V0,Vl-V0)
X = np.r_[oN.flatten('F')]
elif GGA:
"X +=[vN, oN1, oN2]; oN1,oN2 from Gnet-osculating_normals"
if diagnet:
"AGG"
Vg1,Vg2,Vg3,Vg4 = Va,Vb,Vc,Vd # different from above
else:
"GGA"
Vg1,Vg2,Vg3,Vg4 = V1,V2,V3,V4 # different from above
oN1,oN2 = np.cross(Vg3-V0,Vg1-V0),np.cross(Vg4-V0,Vg2-V0)
vn = np.cross(oN1,oN2)
vN = vn / np.linalg.norm(vn,axis=1)[:,None]
ind = np.where(np.einsum('ij,ij->i',vnn,vN)<0)[0]
vN[ind]=-vN[ind]
X = np.r_[vN.flatten('F'),oN1.flatten('F'),oN2.flatten('F')]
return X
def get_agweb_an_n_on(self,is_n=False,is_on=False,is_all_n=False):
V = self.mesh.vertices
v = self.mesh.rr_star[:,0]#self.mesh.rr_star_corner[0]
an = V[v]
n = self.mesh.vertex_normals()[v]
on1=on2=n
num = len(self.mesh.ind_rr_star_v4f4)
if self.is_initial:
if self.get_weight('AAGnet') or self.get_weight('GAAnet'):
"vertex normal from A-net"
X = self.get_agweb_initial(AAG=True)
#on = X[:3*num].reshape(-1,3,order='F')
elif self.get_weight('GGAnet') or self.get_weight('AGGnet'):
"X=+[N,oN1,oN2]"
X = self.get_agweb_initial(GGA=True)
n = X[:3*num].reshape(-1,3,order='F')
on1 = X[3*num:6*num].reshape(-1,3,order='F')
on2 = X[6*num:9*num].reshape(-1,3,order='F')
elif self.get_weight('AAGGnet') or self.get_weight('GGAAnet'):
"vertex-normal from Anet, X+=[on1,on2]"
X = self.get_agweb_initial(AAGG=True)
on1 = X[:3*num].reshape(-1,3,order='F')
on2 = X[3*num:6*num].reshape(-1,3,order='F')
elif self.get_weight('Anet'):
pass
# v = v[self.mesh.ind_rr_star_v4f4]
# n = n[v]
elif self.get_weight('AGnet'):
if False:
_,_,lt1,lt2 = self.mesh.get_net_osculating_tangents()
n = np.cross(lt1[1],lt2[1])
n = n / np.linalg.norm(n,axis=1)[:,None]
else:
_,_,ut1,ut2,_,_ = self.mesh.get_v4_unit_tangents(False,True)
n = np.cross(ut1,ut2)
n = n / np.linalg.norm(n,axis=1)[:,None]
else:
X = self.X
if self.get_weight('AAGnet') or self.get_weight('GAAnet'):
"X=+[oNg]"
##print(v,self.mesh.ind_rr_star_v4f4,len(v),len(self.mesh.ind_rr_star_v4f4))
v = v[self.mesh.ind_rr_star_v4f4]
n = X[self._Nanet-3*num:self._Nanet].reshape(-1,3,order='F')
d = self._Ndgeo-3*num
#on = X[d:d+3*num].reshape(-1,3,order='F')
elif self.get_weight('GGAnet') or self.get_weight('AGGnet'):
d = self._Ndgeo-9*num
n = X[d:d+3*num].reshape(-1,3,order='F')
on1 = X[d+3*num:d+6*num].reshape(-1,3,order='F')
on2 = X[d+6*num:d+9*num].reshape(-1,3,order='F')
elif self.get_weight('AAGGnet') or self.get_weight('GGAAnet'):
v = v[self.mesh.ind_rr_star_v4f4]
n = X[self._Nanet-3*num:self._Nanet].reshape(-1,3,order='F')
d = self._Ndgeo-6*num
on1 = X[d:d+3*num].reshape(-1,3,order='F')
on2 = X[d+3*num:d+6*num].reshape(-1,3,order='F')
elif self.get_weight('Anet'):
v = v[self.mesh.ind_rr_star_v4f4]
n = X[self._Nanet-3*num:self._Nanet].reshape(-1,3,order='F')
elif self.get_weight('AGnet'):
if False:
Nag = self._Nag
arr3 = np.arange(3*num)
if self.opt_AG_const_rii or self.opt_AG_const_r0:
if self.opt_AG_const_rii:
#k = len(igeo)
#c_ri = Nag-k+np.arange(k)
pass
#c_srfN = Nag-6*num+arr3-k
#c_ogN = Nag-4*num+arr3-k
elif self.opt_AG_const_r0:
#c_r = Nag-1
c_srfN = Nag-6*num+arr3-1
#c_ogN = Nag-4*num+arr3-1
else:
c_srfN = Nag-6*num+arr3
#c_ogN = Nag-3*num+arr3
n = X[c_srfN].reshape(-1,3,order='F')
#on = X[c_ogN].reshape(-1,3,order='F')
elif False:
ie1 = self._N5-12*num+np.arange(3*num)
ue1 = X[ie1].reshape(-1,3,order='F')
ue2 = X[ie1+3*num].reshape(-1,3,order='F')
ue3 = X[ie1+6*num].reshape(-1,3,order='F')
ue4 = X[ie1+9*num].reshape(-1,3,order='F')
#try:
if self.is_AG_or_GA:
n = ue2+ue4
else:
n = ue1+ue3
n = n / np.linalg.norm(n,axis=1)[:,None]
# except:
# t1,t2 = ue1-ue3,ue2-ue4
# n = np.cross(t1,t2)
# n = n / np.linalg.norm(n,axis=1)[:,None]
v = v[self.mesh.ind_rr_star_v4f4]
else:
c_srfN = self._Nag-3*num+np.arange(3*num)
n = X[c_srfN].reshape(-1,3,order='F')
if is_n:
n = n / np.linalg.norm(n,axis=1)[:,None]
alln = self.mesh.vertex_normals()
n0 = alln[v]
j = np.where(np.einsum('ij,ij->i',n0,n)<0)[0]
n[j] = -n[j]
return V[v],n
elif is_on:
on1 = on1 / np.linalg.norm(on1,axis=1)[:,None]
on2 = on2 / np.linalg.norm(on2,axis=1)[:,None]
return an,on1,on2
elif is_all_n:
alln = self.mesh.vertex_normals()
n0 = alln[v]
j = np.where(np.einsum('ij,ij->i',n0,n)<0)[0]
n[j] = -n[j]
alln[v] = n
return alln
def get_agnet_normal(self,is_biN=False):
V = self.mesh.vertices
v,v1,v2,v3,v4 = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4].T
an = V[v]
if is_biN:
"AGnet: Asy(v1-v-v3), Geo(v2-v-v4), binormal of geodesic crv"
if self.is_AG_or_GA:
eb = (V[v2]-V[v])#/np.linalg.norm(V[v2]-V[v],axis=1)[:,None]
ed = (V[v4]-V[v])#/np.linalg.norm(V[v4]-V[v],axis=1)[:,None]
else:
eb = (V[v1]-V[v])#/np.linalg.norm(V[v1]-V[v],axis=1)[:,None]
ed = (V[v3]-V[v])#/np.linalg.norm(V[v3]-V[v],axis=1)[:,None]
n = np.cross(eb,ed)
i = np.where(np.linalg.norm(n,axis=1)==0)[0]
if len(i)!=0:
n[i]=np.zeros(3)
else:
n = n / np.linalg.norm(n,axis=1)[:,None]
return an, n
if False:
_,_,lt1,lt2 = self.mesh.get_net_osculating_tangents()
n = np.cross(lt1[1],lt2[1])
n = n / np.linalg.norm(n,axis=1)[:,None]
else:
_,_,ut1,ut2,_,_ = self.mesh.get_v4_unit_tangents(False,True)
n = np.cross(ut1,ut2)
n = n / np.linalg.norm(n,axis=1)[:,None]
return an, n
def index_of_strip_along_polyline(self):
"ver_poly_strip1: 2-dim list with different length, at least 2"
w3 = self.get_weight('AAGnet')
w4 = self.get_weight('AAGGnet')
diag = True if w3 or w4 else False
d = self.set_another_polyline
if diag:
iall,iind,_,_ = self.mesh.get_diagonal_vertex_list(5,d) # interval is random
else:
iall,iind,_,_ = self.mesh.get_isoline_vertex_list(5,d) # updated, need to check
self._ver_poly_strip1 = [iall,iind]
def index_of_mesh_polylines(self):
"index_of_strip_along_polyline without two bdry vts, this include full"
if self.is_singular:
self._ver_poly_strip1,_,_ = quadmesh_with_1singularity(self.mesh)
else:
"ver_poly_strip1,ver_poly_strip2"
iall = self.mesh.get_both_isopolyline(diagpoly=self.switch_diagmeth,
is_one_or_another=self.set_another_polyline)
self._ver_poly_strip1 = iall
iall = self.mesh.get_both_isopolyline(diagpoly=self.switch_diagmeth,
is_one_or_another=not self.set_another_polyline)
self._ver_poly_strip2 = iall
def get_initial_singular_diagply_normal(self,is_init=False,AGnet=False,CCnet=False):
V = self.mesh.vertices
vl,vc,vr = self.singular_polylist
Vl,Vc,Vr = V[vl], V[vc], V[vr]
if is_init:
on = np.cross(Vl-Vc, Vr-Vc)
return on / np.linalg.norm(on,axis=1)[:,None]
else:
if self.is_initial:
v = self.mesh.rr_star[self.mesh.ind_rr_star_v4f4][:,0]
vN = self.mesh.vertex_normals()[v] ##approximate.
else:
if AGnet:
#num = self.mesh.num_regular
num = len(self.mesh.ind_rr_star_v4f4)
arr = self._Nanet-3*num+np.arange(3*num)
vN = self.X[arr].reshape(-1,3,order='F')
elif CCnet:
num1 = len(self.mesh.ind_rr_star_v4f4)
num2 = len(self.ind_rr_vertex)
arr = self._Ncd-3*num1-8*num2+np.arange(3*num1)
vN = self.X[arr].reshape(-1,3,order='F')
Nc = vN[self.ind_rr_vertex]
return Nc,Vl,Vc,Vr
def get_poly_strip_normal(self,pl1=False,pl2=False):
"for planar strip: each strip 1 normal as variable, get mean n here"
V = self.mesh.vertices
if pl1:
iall = self.ver_poly_strip1
elif pl2:
iall = self.ver_poly_strip2
else:
iall = self.ver_poly_strip1[0]
n = | np.array([0,0,0]) | numpy.array |
# -*- coding: UTF-8 -*-
"""
Constrained KMeans.
Reference:
https://github.com/Behrouz-Babaki/COP-Kmeans
:author: <NAME> (2019)
:license: Apache License, Version 2.0, see LICENSE for details.
"""
import math
import random
import numpy as np
from sklearn.utils.validation import check_X_y
from sklearn.base import BaseEstimator
from .utils import DistanceFun
# ----------------------------------------------------------------------------
# COPKMeans
# ----------------------------------------------------------------------------
class COPKMeans(BaseEstimator):
"""
Parameters
----------
n_clusters : int (default=10)
Number of clusters used for the COP k-means clustering algorithm.
init : str (default='kmpp')
Initialization method for the cluster centra.
n_init : int (default=3)
Number of initializations.
max_iter : int (default=300)
Maximum iterations for the algorithm.
metric : string (default=euclidean)
Distance metric for constructing the BallTree.
Can be any of sklearn.neighbors.DistanceMetric methods or 'dtw'
chunk_size : int (default=2000)
Size of each chunck to recompute the cluster centra.
"""
def __init__(self,
n_clusters=10,
init='kmpp',
n_init=3,
max_iter=300,
metric='euclidean',
chunk_size=2000,
tol=1e-10,
verbose=False):
super().__init__()
# initialize parameters
self.k = n_clusters
self.init = init
self.n_init = n_init
self.max_iter = max_iter
self.chunk_size = chunk_size
self.sample_tol = 1e-10
self.n_samples = None
self.D_matrix_ = None
self.labels_ = None
self.cluster_centers_ = None
self.dist = DistanceFun(metric)
def fit_predict(self, X, must_link=np.array([]), cannot_link=np.array([])):
""" Fit COP Kmeans clustering to the data given the constraints.
:param X: np.array()
2D data array to be clustered.
:param must_link: np.array() [n_mlcs, 2]
Indices of the must link datapoints.
:param cannot_link : np.array() [n_clcs, 2]
Indices of cannot link datapoints.
:returns cluster_centers_: np.array()
Cluster centroids.
:returns labels_: np.array()
Labels of the centroids to which the points belong.
"""
return self.fit(X, must_link, cannot_link)._predict()
def fit(self, X, must_link=np.array([]), cannot_link= | np.array([]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 6 20:33:56 2019
@author: abhigyan
"""
import numpy as np
"""Class state to initialize various states in a Gridworld (the various positions
at which a users might find themselves). It has attributes like position, state
type (transient/recurrent as a boolean value for recurrent), its inital value,
immediate action reward.
This class is controlled by the Gridworld class
"""
class state(object):
#Initializes a state with its position, type = transient/terminal, reward due to immediate
#action, maximum rows and maximum columns a Gridworld board can have
def __init__(self, position, terminal, value, actionReward, max_x, max_y):
self.position = position
self.terminal = terminal
self.stateValue = value
self.actionReward = actionReward
self.mins = 0,0
self.maxs = max_x, max_y #max_x = maximum rows, max_y = maximum columns
#Enumertates all the possible moves from a state if hitting a wall it returns to the same state (enumerated explicitly)
def enumerateNextStates(self, probability = 'stochastic'):
self.possibleMoves = np.array([[-1, 0], [1, 0], [0, -1], [0, 1]])
if(self.terminal):
self.nextStates = np.array([self.position])
self.actionReward = 0
else:
self.theoreticalNextStates = self.possibleMoves + self.position
self.nextStates = self.theoreticalNextStates[(self.theoreticalNextStates >= self.mins).all(axis = 1) &
(self.theoreticalNextStates <= self.maxs).all(axis = 1)]
if(len(self.nextStates) < 4 and len(self.nextStates) > 1): #Adding the wall hits as return to the same state explicilty
selfPosition = np.tile(self.position, (4-len(self.nextStates), 1))
self.nextStates = np.concatenate((self.nextStates, selfPosition), axis = 0)
self.numberOfNextStates = len(self.nextStates)
if(probability == 'random'): #Assigining a random policy
self.transitionProbabilities = np.random.randint(low = 0, high = 50,
size = (self.numberOfNextStates))
self.transitionProbabilities = self.transitionProbabilities / sum(self.transitionProbabilities)
elif(probability == 'stochastic'): #Assigining a uniform stochastic policy
self.transitionProbabilities = 1 / self.numberOfNextStates * np.ones(self.numberOfNextStates)
#Links the state classes to a state, including reching itself by hitting a wall
def linkStates(self, states):
self.nextStateList = []
count = 0
for i in self.nextStates:
entry = str(i[0]) + ',' + str(i[1])
self.nextStateList.append([states[entry], self.transitionProbabilities[count]])
count += 1
#Acts greedily in a current step at a current iteration
def actGreedily(self):
self.newBestValue = -np.inf
for i in self.nextStateList:
nextValue = self.actionReward + i[0].getStateValue()
if(nextValue > self.newBestValue):
self.newBestValue = nextValue
#Evaluates a given policy
def evaluatePolicy(self):
self.expectedValue = 0
for i in self.nextStateList:
self.expectedValue += i[1] * (self.actionReward + i[0].getStateValue())
#Improves the policy by changing state transition probabilities
def improvePolicy(self):
self.newBestValue = -np.inf
self.nextBestState = 0
index = 0
for i in self.nextStateList:
nextValue = self.actionReward + i[0].getStateValue()
if(nextValue > self.newBestValue):
self.newBestValue = nextValue
index = self.nextStateList.index(i)
for i in range(0, len(self.nextStateList)):
if(i == index):
self.nextStateList[i][1] = 1
else:
self.nextStateList[i][1] = 0
#Updates the state value function for greedy policy
def updateGreedyAct(self):
self.stateValue = self.newBestValue
#Updates the state value function for stochastic policy
def updatePolicyAct(self):
self.stateValue = self.expectedValue
#Getter methods
def getNextStates(self):
return self.nextStateList
def getStateValue(self):
return self.stateValue
"""The Gridworld class contains all the states a user can begin in Gridworld and is
responsible for finding the optimal value function v_* via 2 methods:
-> Policy Iteration
-> Value Iteration
"""
class Gridworld(object):
#Initializes a Gridowrld board with its dimensions, winning states = terminal states,
#an immediate action rewards which a user can get on traversing the board
def __init__(self, dimensions, terminalStates, immediateRewards):
self.dimensions = dimensions
self.terminalStates = terminalStates
self.numberOfStates = dimensions[0] * dimensions[1]
self.immediateRewards = immediateRewards
self.valueFunction = np.zeros(self.dimensions) #Value function representation for different states, initialized to 0
#Creates the board by initializing various states and linking them to proper
#reachable states
def createBoard(self):
self.states = {}
for i in range(0, self.dimensions[0]):
for j in range(0, self.dimensions[1]):
position = np.array([i, j])
terminal = (np.any(np.equal(self.terminalStates, position).all(axis=1)))
self.states[str(i)+','+str(j)] = state(position, terminal, 0, self.immediateRewards,
self.dimensions[0]-1, self.dimensions[1]-1)
self.states[str(i)+','+str(j)].enumerateNextStates()
for i in self.states:
self.states[i].linkStates(self.states)
#Act Greedily in a current iteration and update the value function of the states
def actGreedily(self):
for i in self.states:
self.states[i].actGreedily()
for i in self.states:
self.states[i].updateGreedyAct()
self.updateValueFunction()
#Policy Evaluation
def policyEvaluation(self):
stable = False
while(stable == False):
oldValueFunction = np.copy(self.valueFunction)
for i in self.states:
self.states[i].evaluatePolicy()
for i in self.states:
self.states[i].updatePolicyAct()
self.updateValueFunction()
if(np.sum(np.abs(self.valueFunction - oldValueFunction)) <= 10 ** -10):
stable = True
#Improves a policy by acting greedily w.r.t current policy
def policyImprovement(self):
for i in self.states:
self.states[i].improvePolicy()
#Updates the value function represenation of the board (making the values equal to the value function of the states)
def updateValueFunction(self):
for i in range(0, self.dimensions[0]):
for j in range(0, self.dimensions[1]):
self.valueFunction[i,j] = self.states[str(i) + ',' + str(j)].getStateValue()
#Solving by Value Iteration
def valueIteration(self):
stable = False
while(stable == False):
oldValueFunction = np.copy(self.valueFunction)
self.actGreedily()
self.updateValueFunction()
if(np.sum( | np.abs(self.valueFunction - oldValueFunction) | numpy.abs |
from itertools import islice
import logging
from scipy import stats
import h5py
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
import torch
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from uncertify.visualization.plotting import setup_plt_figure
from uncertify.visualization.grid import imshow_grid
from uncertify.evaluation.datasets import get_n_normal_abnormal_pixels
from uncertify.visualization.histograms import plot_multi_histogram
from typing import Tuple
LOG = logging.getLogger(__name__)
def plot_brats_batches(brats_dataloader: DataLoader, plot_n_batches: int, **kwargs) -> None:
"""Plot batches of a BraTS dataloader.
Keyword Args:
nrow: kwarg to change number of rows
uppercase_keys: if True, changes 'scan' to 'Scan' to support legacy hdf5 datasets
"""
LOG.info('Plotting BraTS2017 Dataset [scan & segmentation]')
for sample in islice(brats_dataloader, plot_n_batches):
nrow_kwarg = {'nrow': kwargs.get('nrow')} if 'nrow' in kwargs.keys() else dict()
scan_key = 'Scan' if kwargs.get('uppercase_keys', False) else 'scan'
seg_key = 'Seg' if kwargs.get('uppercase_keys', False) else 'seg'
mask_key = 'Mask' if kwargs.get('uppercase_keys', False) else 'mask'
mask = torch.where(sample[mask_key], sample[mask_key].type(torch.FloatTensor), -3.5 * torch.ones_like(sample[scan_key]))
seg = torch.where(sample[seg_key].type(torch.BoolTensor), sample[seg_key].type(torch.FloatTensor), -3.5 * torch.ones_like(sample[scan_key]))
grid = make_grid(
torch.cat((sample[scan_key].type(torch.FloatTensor),
seg.type(torch.FloatTensor),
mask.type(torch.FloatTensor)),
dim=2),
padding=0, **nrow_kwarg)
imshow_grid(grid, one_channel=True, plt_show=True, axis='off', **kwargs)
plt.show()
def plot_camcan_batches(camcan_dataloader: DataLoader, plot_n_batches: int, **kwargs) -> None:
"""Plot batches of a CamCAN dataloader.
Keyword Args:
nrow: kwarg to change number of rows
uppercase_keys: if True, changes 'scan' to 'Scan' to support legacy hdf5 datasets
"""
LOG.info('Plotting CamCAN Dataset [scan only]')
nrow_kwarg = {'nrow': kwargs.get('nrow')} if 'nrow' in kwargs.keys() else dict()
for sample in islice(camcan_dataloader, plot_n_batches):
scan = 'Scan' if kwargs.get('uppercase_keys', False) else 'scan'
grid = make_grid(sample[scan].type(torch.FloatTensor), padding=0, **nrow_kwarg)
imshow_grid(grid, one_channel=True, plt_show=True, axis='off', **kwargs)
plt.show()
def plot_samples(h5py_file: h5py.File, n_samples: int = 3, dataset_length: int = 4000, cmap: str = 'Greys_r',
vmin: float = None, vmax: float = None) -> None:
"""Plot samples and pixel distributions as they come out of the h5py file directly."""
sample_indices = np.random.choice(dataset_length, n_samples)
keys = sorted(list(h5py_file.keys()))
for counter, idx in enumerate(sample_indices):
fig, axes = plt.subplots(ncols=len(keys) + 1, nrows=2, figsize=(12, 12))
mask = h5py_file['mask'][idx]
scan = h5py_file['scan'][idx]
masked_scan = np.where(mask.astype(bool), scan, np.zeros(scan.shape))
min_val = np.min(masked_scan) if vmin is None else vmin
max_val = np.max(masked_scan) if vmax is None else vmax
masked_pixels = scan[mask.astype(bool)].flatten()
datasets = [h5py_file[key] for key in keys] + [masked_scan]
for dataset_name, dataset, ax in zip(keys + ['masked_scan'], datasets, np.transpose(axes)):
if dataset_name != 'masked_scan':
array_2d = dataset[idx]
else: # actually not a dataset but simply an array already
array_2d = dataset
im = ax[0].imshow(np.reshape(array_2d, (200, 200)), cmap=cmap, vmin=min_val, vmax=max_val)
divider = make_axes_locatable(ax[0])
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
ax[0].axis('off')
ax[0].set_title(dataset_name)
ax[1].hist(array_2d if dataset_name != 'masked_scan' else masked_pixels, bins=30, density=False)
try:
description = stats.describe(array_2d if dataset_name != 'masked_scan' else masked_pixels)
except ValueError:
print(f'Found sample with empty mask. No statistics available.')
else:
ax[1].set_title(f'mean: {description.mean:.2f}, var: {description.variance:.2f}')
print(f'{dataset_name:15}: min/max: {description.minmax[0]:.2f}/{description.minmax[1]:.2f}, '
f'mean: {description.mean:.2f}, variance: {description.variance:.2f}')
plt.tight_layout()
plt.show()
def plot_patient_histograms(dataloader: DataLoader, n_batches: int, accumulate_batches: bool = False,
bins: int = 40, uppercase_keys: bool = False):
"""Plot the batch-wise intensity histograms.
Arguments
dataloader: a hdf5 dataloader
plot_n_batches: how many batches to take into account
accumulate_batches: if True, stack all values from all batches and report one histogram
if False, do one histogram for every batch in the figure
bins: number of bins in histograms
uppercase_keys: if True supports legacy upper case keys
"""
accumulated_values = []
for idx, batch in enumerate(dataloader):
mask = batch['mask' if not uppercase_keys else 'Mask'].cpu().detach().numpy()
scan = batch['scan' if not uppercase_keys else 'Scan'].cpu().detach().numpy()
masked_pixels = scan[mask != 0].flatten()
accumulated_values.append(masked_pixels)
if idx + 1 == n_batches:
break
if accumulate_batches:
values = | np.concatenate(accumulated_values) | numpy.concatenate |
import pytest
import numpy as np
from engine.base_classes import Location, AbsLoc, RelLoc
@pytest.fixture(params=[
[1, 2], [[2, 3]], (3, 3), [(3, 4)], [(3, 3), (4, 4)],
np.random.randint(0, 10, (5, 2))])
def coord2d(request):
return request.param
@pytest.fixture
def loc2d(coord2d):
return Location(coord2d)
@pytest.fixture(params=[1, 2, 3, 4, 5])
def ndim(request):
return request.param
@pytest.fixture(params=[0, 1] + np.random.randint(2, 10, 3).tolist())
def ncoord(request):
return request.param
@pytest.fixture
def coord_nd(ncoord, ndim, hypercube_grid_shape_nd):
return np.random.randint(0, hypercube_grid_shape_nd[0], (ncoord, ndim))
@pytest.fixture
def loc_nd(coord_nd):
return Location(coord_nd)
@pytest.fixture
def hypercube_grid_shape_nd(ndim):
return (10, ) * ndim
@pytest.fixture(params=[[[[3, 3]]], np.random.randint(0, 1, (3, 2, 2))])
def invalid_coord(request):
return request.param
@pytest.fixture
def loc10d():
return Location(np.random.randint(0, 3, 10))
def test_Location_class_type(loc2d, coord2d):
assert issubclass(Location, np.ndarray)
def test_Location_init(loc2d, coord2d):
assert np.array_equal(np.array(coord2d, ndmin=2, dtype='int64'), loc2d)
def test_Location_init2(loc_nd, coord_nd, ndim, ncoord):
assert np.array_equal(coord_nd, Location(coord_nd))
assert Location(coord_nd).shape == (ncoord, ndim)
def test_Location_init_invalid(invalid_coord):
with pytest.raises(AssertionError):
Location(invalid_coord)
def test_Location_dtype():
assert Location((3.0, 3.0)).dtype == np.int64
def test_Location_intersect1(loc10d, loc_nd, hypercube_grid_shape_nd):
with pytest.raises(UserWarning):
Location.intersect(loc10d, loc_nd, hypercube_grid_shape_nd)
with pytest.raises(UserWarning):
Location.intersect(loc_nd, loc10d, hypercube_grid_shape_nd)
def test_Location_intersect2(loc_nd, hypercube_grid_shape_nd):
assert np.array_equal(
loc_nd, Location.intersect(loc_nd, loc_nd, hypercube_grid_shape_nd))
def test_Location_intersect_mask(loc_nd, hypercube_grid_shape_nd):
assert np.array_equal(
np.ones(loc_nd.shape[0], dtype=np.bool),
Location.intersect_mask(loc_nd, loc_nd, hypercube_grid_shape_nd))
@pytest.mark.parametrize("a,b,expected", [
(Location([[3,3], [3,4]]), Location([3,3]), | np.array([True, False]) | numpy.array |
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import numpy as np
from .layers.contrastive import ContrastiveLoss
from .layers.utils import l1norm, l2norm
from .layers.img_enc import EncoderImage
from .layers.txt_enc import EncoderText
class VisualSA(nn.Layer):
"""
Build global image representations by self-attention.
Args: - local: local region embeddings, shape: (batch_size, 36, 1024)
- raw_global: raw image by averaging regions, shape: (batch_size, 1024)
Returns: - new_global: final image by self-attention, shape: (batch_size, 1024).
"""
def __init__(self, embed_dim, dropout_rate, num_region):
super(VisualSA, self).__init__()
self.embedding_local = nn.Sequential(nn.Linear(embed_dim, embed_dim),
nn.BatchNorm1D(num_region),
nn.Tanh(), nn.Dropout(dropout_rate))
self.embedding_global = nn.Sequential(nn.Linear(embed_dim, embed_dim),
nn.BatchNorm1D(embed_dim),
nn.Tanh(), nn.Dropout(dropout_rate))
self.embedding_common = nn.Sequential(nn.Linear(embed_dim, 1))
self.init_weights()
def init_weights(self):
for embeddings in self.children():
for m in embeddings:
if isinstance(m, nn.Linear):
r = np.sqrt(6.) / | np.sqrt(m.weight.shape[0] + m.weight.shape[1]) | numpy.sqrt |
"""
Addition operator.
Example usage
-------------
Distribution and a constant::
>>> distribution = chaospy.Normal(0, 1)+10
>>> distribution
Add(Normal(mu=0, sigma=1), 10)
>>> distribution.sample(5).round(4)
array([10.395 , 8.7997, 11.6476, 9.9553, 11.1382])
>>> distribution.fwd([9, 10, 11]).round(4)
array([0.1587, 0.5 , 0.8413])
>>> distribution.inv(distribution.fwd([9, 10, 11])).round(4)
array([ 9., 10., 11.])
>>> distribution.pdf([9, 10, 11]).round(4)
array([0.242 , 0.3989, 0.242 ])
>>> distribution.mom([1, 2, 3]).round(4)
array([ 10., 101., 1030.])
>>> distribution.ttr([1, 2, 3]).round(4)
array([[10., 10., 10.],
[ 1., 2., 3.]])
Construct joint addition distribution::
>>> lhs = chaospy.Uniform(2, 3)
>>> rhs = chaospy.Uniform(3, 4)
>>> addition = lhs + rhs
>>> addition
Add(Uniform(lower=2, upper=3), Uniform(lower=3, upper=4))
>>> joint1 = chaospy.J(lhs, addition)
>>> joint2 = chaospy.J(rhs, addition)
Generate random samples::
>>> joint1.sample(4).round(4)
array([[2.2123, 2.0407, 2.3972, 2.2331],
[6.0541, 5.2478, 6.1397, 5.6253]])
>>> joint2.sample(4).round(4)
array([[3.1823, 3.7435, 3.0696, 3.8853],
[6.1349, 6.6747, 5.485 , 5.9143]])
Forward transformations::
>>> lcorr = numpy.array([2.1, 2.5, 2.9])
>>> rcorr = numpy.array([3.01, 3.5, 3.99])
>>> joint1.fwd([lcorr, lcorr+rcorr]).round(4)
array([[0.1 , 0.5 , 0.9 ],
[0.01, 0.5 , 0.99]])
>>> joint2.fwd([rcorr, lcorr+rcorr]).round(4)
array([[0.01, 0.5 , 0.99],
[0.1 , 0.5 , 0.9 ]])
Inverse transformations::
>>> joint1.inv(joint1.fwd([lcorr, lcorr+rcorr])).round(4)
array([[2.1 , 2.5 , 2.9 ],
[5.11, 6. , 6.89]])
>>> joint2.inv(joint2.fwd([rcorr, lcorr+rcorr])).round(4)
array([[3.01, 3.5 , 3.99],
[5.11, 6. , 6.89]])
Raw moments::
>>> joint1.mom([(0, 1, 1), (1, 0, 1)]).round(4)
array([ 6. , 2.5 , 15.0834])
>>> joint2.mom([(0, 1, 1), (1, 0, 1)]).round(4)
array([ 6. , 3.5 , 21.0834])
"""
from __future__ import division
from scipy.special import comb
import numpy
import chaospy
from ..baseclass import Distribution, OperatorDistribution
class Add(OperatorDistribution):
"""Addition operator."""
def __init__(self, left, right):
super(Add, self).__init__(
left=left,
right=right,
repr_args=[left, right],
)
def _lower(self, idx, left, right, cache):
"""
Distribution bounds.
Example:
>>> chaospy.Uniform().lower
array([0.])
>>> chaospy.Add(chaospy.Uniform(), 2).lower
array([2.])
>>> chaospy.Add(2, chaospy.Uniform()).lower
array([2.])
"""
if isinstance(left, Distribution):
left = left._get_lower(idx, cache=cache)
if isinstance(right, Distribution):
right = right._get_lower(idx, cache=cache)
return left+right
def _upper(self, idx, left, right, cache):
"""
Distribution bounds.
Example:
>>> chaospy.Uniform().upper
array([1.])
>>> chaospy.Add(chaospy.Uniform(), 2).upper
array([3.])
>>> chaospy.Add(2, chaospy.Uniform()).upper
array([3.])
"""
if isinstance(left, Distribution):
left = left._get_upper(idx, cache=cache)
if isinstance(right, Distribution):
right = right._get_upper(idx, cache=cache)
return (left.T+right.T).T
def _cdf(self, xloc, idx, left, right, cache):
if isinstance(right, Distribution):
left, right = right, left
xloc = (xloc.T-numpy.asfarray(right).T).T
uloc = left._get_fwd(xloc, idx, cache=cache)
return uloc
def _pdf(self, xloc, idx, left, right, cache):
"""
Probability density function.
Example:
>>> chaospy.Uniform().pdf([-2, 0, 2, 4])
array([0., 1., 0., 0.])
>>> chaospy.Add(chaospy.Uniform(), 2).pdf([-2, 0, 2, 4])
array([0., 0., 1., 0.])
>>> chaospy.Add(2, chaospy.Uniform()).pdf([-2, 0, 2, 4])
array([0., 0., 1., 0.])
"""
if isinstance(right, Distribution):
left, right = right, left
xloc = (xloc.T-numpy.asfarray(right).T).T
return left._get_pdf(xloc, idx, cache=cache)
def _ppf(self, uloc, idx, left, right, cache):
"""
Point percentile function.
Example:
>>> chaospy.Uniform().inv([0.1, 0.2, 0.9])
array([0.1, 0.2, 0.9])
>>> chaospy.Add(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9])
array([2.1, 2.2, 2.9])
>>> chaospy.Add(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9])
array([2.1, 2.2, 2.9])
"""
if isinstance(right, Distribution):
left, right = right, left
xloc = left._get_inv(uloc, idx, cache=cache)
return (xloc.T+numpy.asfarray(right).T).T
def _mom(self, keys, left, right, cache):
"""
Statistical moments.
Example:
>>> chaospy.Uniform().mom([0, 1, 2, 3]).round(4)
array([1. , 0.5 , 0.3333, 0.25 ])
>>> chaospy.Add(chaospy.Uniform(), 2).mom([0, 1, 2, 3]).round(4)
array([ 1. , 2.5 , 6.3333, 16.25 ])
>>> chaospy.Add(2, chaospy.Uniform()).mom([0, 1, 2, 3]).round(4)
array([ 1. , 2.5 , 6.3333, 16.25 ])
"""
del cache
keys_ = numpy.mgrid[tuple(slice(0, key+1, 1) for key in keys)]
keys_ = keys_.reshape(len(self), -1)
if isinstance(left, Distribution):
if chaospy.shares_dependencies(left, right):
raise chaospy.StochasticallyDependentError(
"%s: left and right side of sum stochastically dependent." % self)
left = [left._get_mom(key) for key in keys_.T]
else:
left = list(reversed(numpy.array(left).T**keys_.T))
if isinstance(right, Distribution):
right = [right._get_mom(key) for key in keys_.T]
else:
right = list(reversed(numpy.prod(numpy.array(right).T**keys_.T, -1)))
out = 0.
for idx in range(keys_.shape[1]):
key = keys_.T[idx]
coef = numpy.prod(comb(keys, key))
out += coef*left[idx]*right[idx]* | numpy.all(key <= keys) | numpy.all |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
import matplotlib.pyplot as plt
import seaborn as sns
##x%matplotlib inline
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import roc_auc_score, log_loss
from numpy import linalg as LA
import re
#import Stemmer
import nltk
from nltk.corpus import wordnet as wn
import gensim
from numpy import linalg as LA
# average embedding
def makeFeatureVec(words, model, num_features):
# Function to average all of the word vectors in a given
# paragraph
#
# Pre-initialize an empty numpy array (for speed)
featureVec = np.zeros((num_features,),dtype="float32")
#
nwords = 0.
#
# Index2word is a list that contains the names of the words in
# the model's vocabulary. Convert it to a set, for speed
index2word_set = set(model.index2word)
#
# Loop over each word in the review and, if it is in the model's
# vocaublary, add its feature vector to the total
for word in words:
if word in index2word_set:
nwords = nwords + 1.
featureVec = | np.add(featureVec,model[word]) | numpy.add |
import numpy as np
from algo_CROWN import CROWN
from threat_models.threat_lighten import get_first_layers as lighten_layers
from threat_models.threat_saturate import get_first_layers as saturate_layers
from threat_models.threat_hue import get_first_layers as hue_layers
from threat_models.threat_bandc import get_first_layers as bandc_layers
from utils.get_epsilon import get_eps_2 as get_eps
class Semantic(CROWN):
def __init__(self, model):
super().__init__(model)
@staticmethod
def get_layer_bound_implicit(W, b, UB_prev, LB_prev, is_first, x0, eps):
UB_new = np.empty_like(b)
LB_new = np.empty_like(b)
if is_first: # first layer
Ax0 = np.matmul(W, x0)
for j in range(W.shape[0]):
dualnorm_Aj = np.sum(np.abs(np.multiply(W[j], eps)), axis=1)
UB_new[j] = np.max(Ax0[j] + dualnorm_Aj) + b[j]
LB_new[j] = np.min(Ax0[j] - dualnorm_Aj) + b[j]
else: # 2nd layer or more
UB_hat = self.ReLU(UB_prev)
LB_hat = self.ReLU(LB_prev)
W_abs = np.abs(W)
# not sure why, but in numba, W_abs is float32 and 0.5*(UB_hat-LB_hat) is float64
# while in numpy, W_abs and UB_hat are both float32
B_sum = np.float32(0.5) * (UB_hat + LB_hat)
B_diff = np.float32(0.5) * (UB_hat - LB_hat)
term_1st = np.dot(W_abs, B_diff)
term_2nd = np.dot(W, B_sum) + b
# term_1st = np.dot(W_abs,np.float32(0.5)*(UB_hat-LB_hat))
# term_2nd = np.dot(W_Nk,np.float32(0.5)*(UB_hat+LB_hat))+b_Nk
UB_new = term_1st + term_2nd
LB_new = -term_1st + term_2nd
return UB_new, LB_new
@staticmethod
# @jit(nopython=True)
def get_semantic_layer_bound_implicit(Ws, bs, UBs, LBs, neuron_state, nlayer, bounds_ul, x0, eps):
constants_ub = np.copy(bs[-1])
constants_lb = np.copy(bs[-1])
UB_final = np.zeros_like(constants_ub)
LB_final = np.zeros_like(constants_lb)
A_UB = np.copy(Ws[nlayer - 1])
A_LB = np.copy(Ws[nlayer - 1])
for i in range(nlayer - 1, 0, -1):
# create intercepts array for this layer
l_ub = np.empty_like(LBs[i])
l_lb = np.empty_like(LBs[i])
diags_ub = np.empty_like(bounds_ul[i][0, :])
diags_lb = np.empty_like(bounds_ul[i][0, :])
upper_k = bounds_ul[i][0]
upper_b = bounds_ul[i][1]
lower_k = bounds_ul[i][2]
lower_b = bounds_ul[i][3]
for j in range(A_UB.shape[0]):
# index for positive entries in A for upper bound
idx_pos_ub = np.nonzero(A_UB[j] > 0)[0]
# index for negative entries in A for upper bound
idx_neg_ub = np.nonzero(A_UB[j] <= 0)[0]
# index for positive entries in A for lower bound
idx_pos_lb = | np.nonzero(A_LB[j] > 0) | numpy.nonzero |
import serial
import time
import datetime
import math
import numpy as np
#import math
import matplotlib.pyplot as plt
class Embo(object):
TIMEOUT_CMD = 0.1
TIMEOUT_READ = 2.0
VM_MAX_LEN = 200
READY_STR = "Ready"
NOT_READY_STR = "Not ready"
def __init__(self):
if __name__ == '__main__':
com = self.input2("Select COM port:", "COM16")
self.ser = serial.Serial(com, 115200, timeout=0,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS)
self.ser.flush()
self.receive(self.TIMEOUT_CMD)
rx = self.send_cmd("*IDN?", self.TIMEOUT_CMD)
print(rx)
if (rx == ""):
print("Device not connected!")
return False
rx = self.send_cmd("*RST", self.TIMEOUT_CMD)
print(rx)
if (rx == ""):
print("Device not connected!")
return False
self.limits()
if self.input2("\nWant to setup PWM? (n, y)", "n") == "y":
self.pwm()
if not self.choose_mode():
return
self.it = 0
self.vm_data = []
self.vm_data.extend(([], [], [], [], []))
self.tm_last = datetime.datetime.now()
self.loop()
def choose_mode(self):
while True:
self.mode = self.input2("\nChoose mode (SCOPE, VM, LA, CNTR):", "SCOPE")
if self.mode == "CNTR":
self.counter()
rx = self.send_cmd("SYS:MODE " + self.mode, self.TIMEOUT_CMD)
print(rx + "\n")
if ("OK" in rx):
time.sleep(0.25)
break
return True
def counter(self):
self.send_cmd("CNTR:ENABLE 1", self.TIMEOUT_CMD)
while True:
rx = self.send_cmd("CNTR:READ?", self.TIMEOUT_READ)
print(rx + "\n")
time.sleep(0.5)
def limits(self):
self.receive(self.TIMEOUT_CMD)
rx = self.send_cmd("SYS:LIM?", self.TIMEOUT_CMD)
print(rx + "\n")
toks = rx.split(",")
self.lim_adc_1ch_smpl_tm12 = float(toks[0])
self.lim_adc_1ch_smpl_tm8 = float(toks[1])
self.lim_mem = int(toks[2])
self.lim_la_fs = int(toks[3])
self.lim_pwm_fs = int(toks[4])
self.lim_adcs = int(toks[5].replace("D", "").replace("I", ""))
self.lim_dual = "D" in toks[5]
self.lim_inter = "I" in toks[5]
self.lim_bit8 = bool(toks[6])
self.lim_dac = bool(toks[7])
def pwm(self):
default_pwm = "1000,25,25,50,1,1";
while True:
cmd = input("Enter PWM 2-ch settings (FREQ1,DUTY1,DUTY2,OFFSET,EN1,EN2): [" + default_pwm + "]\n")
if cmd == "":
cmd = default_pwm
rx = self.send_cmd("PWM:SET " + cmd, self.TIMEOUT_CMD)
print(rx + "\n")
if "OK" in rx:
break
print(self.send_cmd("PWM:SET?", self.TIMEOUT_CMD) + "\n")
def loop(self):
try:
self.setup()
plt.ion()
fig=plt.figure()
fig.canvas.set_window_title('EMBO')
while True:
if self.read():
self.plot()
self.it = self.it + 1
if self.mode != "VM" and self.trig_mode == "S":
input("Press enter for continue...")
if self.mode == "SCOPE":
rx = self.send_cmd("SYS:MODE SCOPE", self.TIMEOUT_CMD)
else:
rx = self.send_cmd("SYS:MODE LA", self.TIMEOUT_CMD)
if self.READY_STR in rx:
self.ready = True
except KeyboardInterrupt:
pass
self.ser.close()
def setup(self):
while True:
if self.mode == "VM":
self.avg = self.input3("Enter num of averaging samples:", "1", 1, 200)
while True:
self.ch = self.input2("Enter enabled channels (XXXX => T/F):", "TTFF")
if len(self.ch) == 4:
break
self.parse_ch()
self.vcc = self.input2("Show VCC:", "True")
self.vcc = "True" in self.vcc
else:
self.bits = 1
if self.mode == "SCOPE":
if not self.lim_bit8:
self.bits = self.input2("Enter bitness (8 / 12):", "12")
else:
self.bits = "12"
self.bits = int(self.bits)
while True:
self.ch = self.input2("Enter enabled channels (XXXX => T/F):", "TFFF")
if len(self.ch) == 4:
break
self.parse_ch()
max_mem = self.lim_mem
if self.bits == 12:
max_mem = max_mem / 2
if self.mode == "SCOPE":
max_mem = max_mem / ((self.ch_num * 2))
if self.mode == "LA":
max_mem = max_mem / 2
self.mem = self.input3("Enter memory depth", str(int(max_mem)), 1, int(max_mem))
self.mem = int(self.mem)
max_fs = self.lim_la_fs
if self.mode == "SCOPE":
smpltm = self.lim_adc_1ch_smpl_tm12;
if self.bits == 8:
smpltm = self.lim_adc_1ch_smpl_tm8;
if self.lim_adcs == 1:
max_fs = smpltm / float(self.ch_num)
if self.lim_dual and (self.ch_num == 2 or self.ch_num == 4):
print("Dual mode enabled (x2)\n");
max_fs = max_fs * 2.0
elif self.lim_adcs == 2:
cnt1 = int(self.ch1) + int(self.ch2)
cnt2 = int(self.ch3) + int(self.ch4)
cnt_result = cnt1
if cnt2 > cnt1:
cnt_result = cnt2
max_fs = smpltm / float(cnt_result)
else: # 4
max_fs = smpltm
fs_max = int(math.floor(max_fs / 100.0)) * 100
if self.mode == "SCOPE" and self.lim_inter and self.ch_num == 1:
tmpstr = "Interleaved mode enabled";
if self.lim_adcs == 4:
fs_max = fs_max * 4.0;
tmpstr = tmpstr + " (x4)\n"
else:
fs_max = fs_max * 2.0;
tmpstr = tmpstr + " (x2)\n"
print(tmpstr)
self.fs = self.input3("\nEnter sample frequency", str(fs_max), 0, fs_max)
self.fs = int(float(self.fs))
self.trig_ch = self.input3("Enter trigger channel", "1", 1, 4)
self.trig_ch = int(self.trig_ch)
self.trig_val = 0
if self.mode == "SCOPE":
self.trig_val = self.input3("Enter trigger value in percentage", "50", 0, 100)
self.trig_edge = self.input4("Enter trigger edge (R - Rising / F - Falling):", "R", ["R", "F"])
self.trig_mode = self.input4("Enter trigger mode (A - Auto / N - Normal / S - Single / D - Disabled):",
"A", ["A", "N", "S", "D"])
self.trig_pre = self.input3("Enter pretrigger value in percentage", "50", 0, 100)
self.trig_pre = int(self.trig_pre)
rx = ""
#self.ser.flush()
self.receive(self.TIMEOUT_CMD)
if self.mode == "SCOPE":
rx = self.send_cmd('SCOP:SET {},{},{},{},{},{},{},{},{}'.format(self.bits, self.mem, self.fs, self.ch,
self.trig_ch, self.trig_val, self.trig_edge, self.trig_mode, self.trig_pre), self.TIMEOUT_CMD)
elif self.mode == "LA":
rx = self.send_cmd('LA:SET {},{},{},{},{},{}'.format(self.mem, self.fs, self.trig_ch, self.trig_edge,
self.trig_mode, self.trig_pre), self.TIMEOUT_CMD)
elif self.mode == "VM":
rx = "OK"
if self.mode == "VM":
break
print(rx + "\n")
self.ready = False
if self.READY_STR in rx:
self.ready = True
if "OK" not in rx:
continue
"""
if self.mode == "SCOPE":
rx = self.send_cmd("SCOP:SET?", self.TIMEOUT_CMD)
elif self.mode == "LA":
rx = self.send_cmd("LA:SET?", self.TIMEOUT_CMD)
if self.READY_STR in rx:
self.ready = True
print("settings: " + rx + "\n")
"""
break
def read(self):
rx = ""
if self.mode == "VM":
rx = self.send_cmd("VM:READ? " + str(self.avg), self.TIMEOUT_CMD)
toks = rx.split(",")
if len(toks) != 5:
print("Invalid data!")
return False
print(rx)
self.vm_data[0].append(float(toks[0]))
self.vm_data[1].append(float(toks[1]))
self.vm_data[2].append(float(toks[2]))
self.vm_data[3].append(float(toks[3]))
self.vm_data[4].append(float(toks[4]))
if len(self.vm_data[0]) > self.VM_MAX_LEN:
del self.vm_data[0][0]
del self.vm_data[1][0]
del self.vm_data[2][0]
del self.vm_data[3][0]
del self.vm_data[4][0]
el = datetime.datetime.now() - self.tm_last
self.tm_last = datetime.datetime.now()
print("\n" + str(self.it) + f"., " + str(el.total_seconds()) + " ms")
print("---------------------")
return True
else:
#if self.trig_mode != "D":
if not self.ready:
rx2 = self.receive(0.1)
if self.READY_STR not in rx2:
print(".", sep='', end='', flush=True)
time.sleep(0.05)
return False
else:
print(rx2)
#print(rx2)
if self.mode == "SCOPE":
rx = self.read_bin_data("SCOP:READ?", self.TIMEOUT_READ)
elif self.mode == "LA":
rx = self.read_bin_data("LA:READ?", self.TIMEOUT_READ)
self.ready = False
if rx == "DATA":
pass
elif rx == "TIMEOUT":
print("No answer from device")
print("---------------------")
return False
elif self.NOT_READY_STR in rx:
print(".", sep='', end='', flush=True)
return False
else:
print(rx)
print("---------------------")
return False
print("\n")
el = datetime.datetime.now() - self.tm_last
self.tm_last = datetime.datetime.now()
#print(self.raw_data)
buff = []
if self.mode == "SCOPE":
if self.bits == 12:
for i in range(0, int(len(self.raw_data) / 2)):
j = i * 2
buff.append(float((self.raw_data[j + 1] << 8 | self.raw_data[j]) / 10000.0))
else:
for i in range(0, int(len(self.raw_data))):
buff.append(float((self.raw_data[i]) / 100.0))
buff_split = np.array_split(buff, self.ch_num)
self.scope_data = []
self.scope_data.extend(([], [], [], []))
i = 0
if self.ch1:
self.scope_data[0] = buff_split[i]
i = i + 1
if self.ch2:
self.scope_data[1] = buff_split[i]
i = i + 1
if self.ch3:
self.scope_data[2] = buff_split[i]
i = i + 1
if self.ch4:
self.scope_data[3] = buff_split[i]
i = i + 1
else: # LA
self.la_data = []
self.la_data.extend(([], [], [], []))
for i in range(0, int(len(self.raw_data))):
ch1 = self.raw_data[i] & 2 != 0
ch2 = self.raw_data[i] & 4 != 0
ch3 = self.raw_data[i] & 8 != 0
ch4 = self.raw_data[i] & 16 != 0
self.la_data[0].append(ch1)
self.la_data[1].append(ch2)
self.la_data[2].append(ch3)
self.la_data[3].append(ch4)
print(str(self.it) + f"., {(float(len(self.raw_data)) / 1024.0):.1f} KB, " + str(el.total_seconds()) + " s")
print("---------------------")
return True
def plot(self):
ax = plt.gca()
plt.clf()
plt.grid(True, linestyle=':')
ax.grid(which='major', alpha=0.9)
if self.mode != "VM":
rngx = (self.mem / self.fs * 1000) * 1.0
rngx_l = -1 * rngx * (self.trig_pre / 100.0)
rngx_r = rngx * ((100.0 - self.trig_pre) / 100.0)
#rngx = self.mem
#rngx_l = 0
#rngx_r = rngx
major_ticks_x = np.linspace(rngx_l, rngx_r, 50)
minor_ticks_x = np.linspace(rngx_l, rngx_r, 10)
ax.set_xticks(major_ticks_x)
ax.set_xticks(minor_ticks_x, minor=True)
plt.xlim(rngx_l, rngx_r)
plt.axvline(x=0, color='k', linestyle='--', linewidth=2.5, alpha=0.8)
if self.mode != "LA":
plt.axhline(y=3.3 * (float(self.trig_val) / 100.0), color='k', linestyle='--', linewidth=2.5, alpha=0.8)
plt.xlabel("Time [ms]")
else:
plt.xlabel("Time")
major_ticks_x = np.arange(0, self.VM_MAX_LEN, 50)
minor_ticks_x = | np.arange(0, self.VM_MAX_LEN, 10) | numpy.arange |
# This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS).
# Last modified by <NAME> (<EMAIL>) 25/02/2021, 13:52. Copyright (c) <NAME>
import numpy as np
from astropy.units import Quantity
from ...models.misc import power_law
from ...products.relation import ScalingRelation
# These are from the classic M-T relation paper published by Arnaud, as R-T relations are a byproduct of M-T relations
arnaud_r200 = ScalingRelation(np.array([0.57, 1674]), | np.array([0.02, 23]) | numpy.array |
""" Basic named axis implementation.
Requires python 3
Author: <NAME>,
Philipps University of Marburg
<EMAIL>
"""
import textwrap
from collections import namedtuple
import numpy as np
class Map(object):
pass
class NegOp(Map):
@staticmethod
def __call__(x):
return -x
class Selector(Map):
pass
class SliceSelector(Map):
pass
class ConditionSelector(Map):
pass
class ReductionOp(Map):
pass
class SumReductionOp(Map):
@staticmethod
def __call__(x, axes):
return np.sum(x, axis=axes, keepdims=False)
class ProdReductionOp(Map):
@staticmethod
def __call__(x, axes):
return np.prod(x, axis=axes, keepdims=False)
class MeanReductionOp(Map):
@staticmethod
def __call__(x, axes):
return np.mean(x, axis=axes, keepdims=False)
class CrossOperation(Map):
pass
class ProdCrossOperation(Map):
@staticmethod
def __call__(x, y):
return x * y
neg = NegOp()
sum = SumReductionOp()
prod = ProdReductionOp()
mean = MeanReductionOp()
el_prod = ProdCrossOperation()
class Named(object):
def __init__(self, tensor, names):
assert len(tensor.shape) == len(names)
assert isinstance(names, tuple)
self._tensor = tensor # actual data
self._names = names # axes names
def shape(self):
return {n: s for n, s in zip(self._names, self._tensor.shape)}
def map(self, op):
""" Apply a unitary mapping operation for every tensor element
"""
return Named(op(self._tensor), self._names)
def reduce(self, op, names):
""" Remove axis by applying a commutative operation along it
"""
axes = tuple((i for i, name in enumerate(self._names) if name in names))
new_tensor = op(self._tensor, axes)
new_names = tuple((name for name in self._names if name not in names))
return Named(new_tensor, new_names)
def elementwise(self, op, other):
""" Apply elementwise operation between self and other.
Resulting tensor axes names is join set of the argument tensor's axes
"""
if not isinstance(other, Named):
other = Named(np.array(other), ())
new_names = self.join_names(self._names, other._names)
new_tensor = op(self.expand(new_names)._tensor, other.expand(new_names)._tensor)
return Named(new_tensor, new_names)
def cross(self, other, name):
""" Cartesian product of tensors by new axis name.
Resulting tensor axes names is join set of the argument tensor's axes
with added new axis name.
Shared names dimensions shoud match
"""
new_names = self.join_names(self._names, other._names)
expanded_self = self.expand(new_names)._tensor
expanded_other = other.expand(new_names)._tensor
new_shape = np.max([expanded_self.shape, expanded_other.shape], axis=0)
expanded_self = np.broadcast_to(expanded_self, new_shape)
expanded_other = np.broadcast_to(expanded_other, new_shape)
new_tensor = np.stack([expanded_self, expanded_other], axis=0)
return Named(new_tensor, (name,) + new_names)
def split(self, name):
""" Splits and removes the axis
Return a list of named tensors
"""
new_names = tuple((n for n in self._names if n != name))
for sliced in self.expand((name,) + new_names)._tensor:
yield Named(sliced, new_names)
@staticmethod
def merge(tensors, name):
""" Creates a new axis and merges tensors along it
"""
assert len(tensors) == 2
t1, t2 = tensors
assert Named.contains_names(t1._names, t2._names)
new_names = (name,) + t1._names
new_tensor = np.concatenate([t1.expand(new_names)._tensor, t2.expand(new_names)._tensor])
return Named(new_tensor, new_names)
def expand(self, names):
""" Expand by new axes in the exact order provided.
If no new names - then just reorders the axes.
The resulting tensor axes names are exactly $names$
"""
assert Named.contains_names(names, self._names)
additional_names = Named.diff_names(names, self._names)
new_names = additional_names + self._names # insert in front
new_tensor = self._tensor
for i in range(len(additional_names)):
new_tensor = np.expand_dims(new_tensor, axis=0) # insert in front
ind = [new_names.index(n) for n in names]
new_tensor = np.transpose(new_tensor, ind)
return Named(new_tensor, names)
def select(self, selectorop, names):
inds = tuple((self._names.index(name) for name in names))
new_tensor = selectorop(self._tensor, inds)
new_names = "-".join(names)
return Named(new_tensor, new_names)
def __getitem__(self, slices):
""" Slice the tensor according to the slices dictionary
"""
assert isinstance(slices, dict)
slices = tuple((slices[key] if key in slices else slice(None) for key in self._names))
new_names = tuple((name for name, s in zip(self._names, slices)
if isinstance(s, (slice, list, tuple, np.ndarray))))
return Named(self._tensor[slices], new_names)
def append(self, other, name):
""" Append to existing axis
"""
ind = self._names.index(name)
return Named(np.concatenate([self._tensor, other._tensor], ind), self._names)
def rename(self, pairs):
""" Renames axes {old: new} for every pair
"""
names = list(self._names)
for key, val in pairs.items():
i = names.index(key)
names[i] = val
new_names = tuple(names)
return Named(self._tensor, new_names)
@staticmethod
def join_names(a, b):
""" Set join
"""
return tuple(set(a).union(set(b)))
@staticmethod
def intersect_names(a, b):
""" Set join
"""
return tuple(set(a).intersection(set(b)))
@staticmethod
def diff_names(a, b):
""" Set difference a-b
"""
return tuple(set(a).difference(set(b)))
@staticmethod
def contains_names(a, b):
""" Check if a contains b
"""
return set(a).issuperset(set(b))
@staticmethod
def same_names(a, b):
""" Check if a and b are the same
"""
return set(a) == set(b)
def __add__(self, other):
return self.elementwise(lambda a, b: a+b , other)
def __sub__(self, other):
return self.elementwise(lambda a, b: a-b , other)
def __mul__(self, other):
return self.elementwise(lambda a, b: a*b , other)
def __truediv__(self, other):
return self.elementwise(lambda a, b: a/b , other)
def __radd__(self, other):
if not isinstance(other, Named):
other = Named(np.array(other), ())
return other + self
def __rsub__(self, other):
if not isinstance(other, Named):
other = Named(np.array(other), ())
return other - self
def __rmul__(self, other):
if not isinstance(other, Named):
other = Named(np.array(other), ())
return other * self
def __rtruediv__(self, other):
if not isinstance(other, Named):
other = Named(np.array(other), ())
return other / self
def __neg__(self):
return self.map(neg)
def __repr__(self):
meta = ", ".join(["{}: {}".format(n, s) for n, s in zip(self._names, self._tensor.shape)])
res = "Named ({})".format(meta) + ":\n" + textwrap.indent(str(self._tensor), " ")
return res
if __name__ == "__main__":
print("--- Reduction ---")
x = Named(np.ones([2, 3, 4]), ("a", "b", "c"))
print("x =", x)
y = x.reduce(sum, "c")
print("x.sum('c') =", y)
print("--- Expansion ---")
z = y.expand(("<", "b", "-", "a", ">"))
print(z)
print("--- Elementwise operation ---")
x = Named(np.ones([2, 3]), ("a", "b"))
y = Named(np.ones([3, 4]), ("b", "c"))
z = x.elementwise(el_prod, y)
print(z)
print("--- Outer product ---")
x = Named(np.arange(3), ("x",))
y = Named(np.arange(4), ("y",))
z = x.elementwise(el_prod, y)
print(z)
print("--- Rename axis ---")
x = Named(np.ones([2, 3]), ("a", "b"))
y = x.rename({"a": "new_a"})
print(y)
print("--- Inner product ---")
x = Named(np.arange(3), ("x",))
print("x =", x)
z = x.elementwise(el_prod, x).reduce(sum, "x")
print("x dot x =", z)
print("--- Self outer product ---")
x = Named(np.arange(3), ("x",))
print("x =", x)
z = x.elementwise(el_prod, x.rename({"x": "x'"}))
print("x*x' =", z)
print("--- Cross ---")
x = Named(np.arange(3), ("x",))
z = x.cross(x.rename({"x": "x'"}), "choice")
print(z)
y = Named(np.arange(4), ("y",))
z = x.cross(y, "choice")
print(z)
print("--- Splitting ---")
for i, t in enumerate(z.split("choice")):
print("Splited by choice =", i, t)
print("--- Merging ---")
z = Named.merge([t for t in z.split("choice")], name="merged")
print(z)
print("--- Operators ---")
x = Named(np.arange(3), ("x",))
y = Named(np.arange(2), ("y",))
print(x * x)
print(x + x * y)
print(x * x.rename({"x": "x'"}))
print("--- Generic function ---")
def var(x, along):
x_centered = x - x.reduce(mean, along)
return (x_centered * x_centered).reduce(sum, along) / x.shape()[along]
x = Named(np.reshape(np.random.uniform(size=3*4), [3, 4]), ("t", "x"))
print(x)
print(var(x, along="t"))
def covar(a, b, along):
a = a - a.reduce(mean, along)
b = b - b.reduce(mean, along)
return (a * b).reduce(sum, along) / a.shape()[along]
a = Named(np.reshape(np.random.uniform(size=5*3), [5, 3]), ("t", "a"))
b = Named(np.reshape( | np.random.uniform(size=5*4) | numpy.random.uniform |
from lacecore import ArityException, GroupMap, LoadException, load_obj, load_obj_string
import numpy as np
import pytest
from .._mesh import FACE_DTYPE
@pytest.fixture
def write_tmp_mesh(tmp_path):
def _write_tmp_mesh(mesh_contents, basename="example.obj"):
test_mesh_path = str(tmp_path / basename)
with open(test_mesh_path, "w") as f:
f.write(mesh_contents)
return test_mesh_path
return _write_tmp_mesh
def assert_is_cube_mesh(mesh):
assert mesh.num_v == 8
np.testing.assert_array_equal(mesh.v[0], np.array([0.0, 2.0, 2.0]))
np.testing.assert_array_equal(mesh.f[0], np.array([0, 1, 2, 3]))
np.testing.assert_array_equal(mesh.f[-1], np.array([1, 5, 6, 2]))
assert mesh.num_f == 6
assert isinstance(mesh.face_groups, GroupMap)
assert mesh.face_groups.keys() == [
"front",
"cube",
"back",
"right",
"top",
"left",
"bottom",
]
assert mesh.f.dtype == FACE_DTYPE
def test_loads_from_local_path():
mesh = load_obj("./examples/tinyobjloader/models/cube.obj")
assert_is_cube_mesh(mesh)
def test_loads_from_string():
with open("./examples/tinyobjloader/models/cube.obj", "r") as f:
contents = f.read()
mesh = load_obj_string(contents)
assert_is_cube_mesh(mesh)
def test_loads_from_string_with_error():
contents = """
f 0 0 0
"""
with pytest.raises(LoadException, match="^Failed parse `f' line"):
load_obj_string(contents)
def test_loads_from_local_path_with_nonexistent_file():
with pytest.raises(
LoadException, match=r"^Cannot open file \[./thispathdoesnotexist\]"
):
load_obj("./thispathdoesnotexist")
def test_loads_from_local_path_with_mixed_arities():
with pytest.raises(ArityException):
load_obj("./examples/tinyobjloader/models/smoothing-group-two-squares.obj")
def test_triangulation_is_abc_acd(write_tmp_mesh):
"""
There is some complex code in tinyobjloader which occasionally switches
the axes of triangulation based on the vertex positions. This is
undesirable in lacecore as it scrambles correspondence.
"""
mesh_path = write_tmp_mesh(
"""
v 0 0 0
v 0 0 0
v 0 0 0
v 0 0 0
f 1 2 3 4
v 46.367584 82.676086 8.867414
v 46.524185 82.81955 8.825487
v 46.59864 83.086678 8.88121
v 46.461926 82.834091 8.953863
f 5 6 7 8
"""
)
# ABC + ACD
expected_triangle_faces = np.array([[0, 1, 2], [0, 2, 3], [4, 5, 6], [4, 6, 7]])
mesh = load_obj(mesh_path, triangulate=True)
np.testing.assert_array_equal(mesh.f, expected_triangle_faces)
assert mesh.f.dtype == FACE_DTYPE
def test_mesh_with_mixed_tris_and_quads_returns_expected(write_tmp_mesh):
mesh_path = write_tmp_mesh(
"""
v 0 1 1
v 0 2 2
v 0 3 3
v 0 4 4
v 0 5 5
f 1 2 3 4
f 1 4 5
"""
)
expected_triangle_faces = np.array([[0, 1, 2], [0, 2, 3], [0, 3, 4]])
mesh = load_obj(mesh_path, triangulate=True)
| np.testing.assert_array_equal(mesh.f, expected_triangle_faces) | numpy.testing.assert_array_equal |
import torch, os, numpy as np, copy
import cv2
import glob
from .map import GeometricMap
class preprocess(object):
def __init__(self, data_root, seq_name, parser, log, split='train', phase='training'):
self.parser = parser
self.dataset = parser.dataset
self.data_root = data_root
self.past_frames = parser.past_frames
self.future_frames = parser.future_frames
self.frame_skip = parser.get('frame_skip', 1)
self.min_past_frames = parser.get('min_past_frames', self.past_frames)
self.min_future_frames = parser.get('min_future_frames', self.future_frames)
self.traj_scale = parser.traj_scale
self.past_traj_scale = parser.traj_scale
self.load_map = parser.get('load_map', False)
self.map_version = parser.get('map_version', '0.1')
self.seq_name = seq_name
self.split = split
self.phase = phase
self.log = log
if parser.dataset == 'nuscenes_pred':
label_path = os.path.join(data_root, 'label/{}/{}.txt'.format(split, seq_name))
delimiter = ' '
elif parser.dataset in {'eth', 'hotel', 'univ', 'zara1', 'zara2'}:
label_path = f'{data_root}/{parser.dataset}/{seq_name}.txt'
delimiter = ' '
else:
assert False, 'error'
self.gt = np.genfromtxt(label_path, delimiter=delimiter, dtype=str)
frames = self.gt[:, 0].astype(np.float32).astype(np.int)
fr_start, fr_end = frames.min(), frames.max()
self.init_frame = fr_start
self.num_fr = fr_end + 1 - fr_start
if self.load_map:
self.load_scene_map()
else:
self.geom_scene_map = None
self.class_names = class_names = {'Pedestrian': 1, 'Car': 2, 'Cyclist': 3, 'Truck': 4, 'Van': 5, 'Tram': 6, 'Person': 7, \
'Misc': 8, 'DontCare': 9, 'Traffic_cone': 10, 'Construction_vehicle': 11, 'Barrier': 12, 'Motorcycle': 13, \
'Bicycle': 14, 'Bus': 15, 'Trailer': 16, 'Emergency': 17, 'Construction': 18}
for row_index in range(len(self.gt)):
self.gt[row_index][2] = class_names[self.gt[row_index][2]]
self.gt = self.gt.astype('float32')
self.xind, self.zind = 13, 15
def GetID(self, data):
id = []
for i in range(data.shape[0]):
id.append(data[i, 1].copy())
return id
def TotalFrame(self):
return self.num_fr
def PreData(self, frame):
DataList = []
for i in range(self.past_frames):
if frame - i < self.init_frame:
data = []
data = self.gt[self.gt[:, 0] == (frame - i * self.frame_skip)]
DataList.append(data)
return DataList
def FutureData(self, frame):
DataList = []
for i in range(1, self.future_frames + 1):
data = self.gt[self.gt[:, 0] == (frame + i * self.frame_skip)]
DataList.append(data)
return DataList
def get_valid_id(self, pre_data, fut_data):
cur_id = self.GetID(pre_data[0])
valid_id = []
for idx in cur_id:
exist_pre = [(False if isinstance(data, list) else (idx in data[:, 1])) for data in pre_data[:self.min_past_frames]]
exist_fut = [(False if isinstance(data, list) else (idx in data[:, 1])) for data in fut_data[:self.min_future_frames]]
if | np.all(exist_pre) | numpy.all |
import numpy as np
import pandas as pd
from backtesting.analysis import plot_cost_proceeds, plot_holdings, \
plot_performance
from backtesting.report import Report
from backtesting.simulation import simulate
def main() -> None:
from string import ascii_uppercase
| np.random.seed(42) | numpy.random.seed |
#! /usr/bin/env python
"""
Module with frame de-rotation routine for ADI.
"""
__author__ = '<NAME>, <NAME>'
__all__ = ['cube_derotate',
'frame_rotate',
'rotate_fft']
from astropy.stats import sigma_clipped_stats
import numpy as np
from numpy.fft import fft, ifft, fftshift, fftfreq
import warnings
from astropy.utils.exceptions import AstropyWarning
# intentionally ignore NaN warnings from astropy - won't ignore other warnings
warnings.simplefilter('ignore', category=AstropyWarning)
try:
import cv2
no_opencv = False
except ImportError:
msg = "Opencv python bindings are missing."
warnings.warn(msg, ImportWarning)
no_opencv = True
try:
from numba import njit
no_numba = False
except ImportError:
msg = "Numba python bindings are missing. "
msg+= "Consider installing Numba for faster fft-based image rotations."
warnings.warn(msg, ImportWarning)
no_numba = True
from skimage.transform import rotate
from multiprocessing import cpu_count
from .cosmetics import frame_pad
from ..conf.utils_conf import pool_map, iterable
from ..var import frame_center, frame_filter_lowpass
data_array = None # holds the (implicitly mem-shared) data array
def frame_rotate(array, angle, imlib='opencv', interpolation='lanczos4',
cxy=None, border_mode='constant', mask_val=np.nan,
edge_blend=None, interp_zeros=False, ker=1):
""" Rotates a frame or 2D array.
Parameters
----------
array : numpy ndarray
Input image, 2d array.
angle : float
Rotation angle.
imlib : {'opencv', 'skimage', 'vip-fft'}, str optional
Library used for image transformations. Opencv is faster than
Skimage or scipy.ndimage. 'vip-fft' corresponds to the FFT-based
rotation described in Larkin et al. (1997), and implemented in this
module. Best results are obtained with images without any sharp
intensity change (i.e. no numerical mask). Edge-blending and/or
zero-interpolation may help if sharp transitions are unavoidable.
interpolation : str, optional
[Only used for imlib='opencv' or imlib='skimage']
For Skimage the options are: 'nearneig', bilinear', 'biquadratic',
'bicubic', 'biquartic' or 'biquintic'. The 'nearneig' interpolation is
the fastest and the 'biquintic' the slowest. The 'nearneig' is the
poorer option for interpolation of noisy astronomical images.
For Opencv the options are: 'nearneig', 'bilinear', 'bicubic' or
'lanczos4'. The 'nearneig' interpolation is the fastest and the
'lanczos4' the slowest and more accurate. 'lanczos4' is the default for
Opencv and 'biquartic' for Skimage.
cxy : float, optional
Coordinates X,Y of the point with respect to which the rotation will be
performed. By default the rotation is done with respect to the center
of the frame.
border_mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, str opt
Pixel extrapolation method for handling the borders. 'constant' for
padding with zeros. 'edge' for padding with the edge values of the
image. 'symmetric' for padding with the reflection of the vector
mirrored along the edge of the array. 'reflect' for padding with the
reflection of the vector mirrored on the first and last values of the
vector along each axis. 'wrap' for padding with the wrap of the vector
along the axis (the first values are used to pad the end and the end
values are used to pad the beginning). Default is 'constant'.
mask_val: flt, opt
If any numerical mask in the image to be rotated, what are its values?
Will only be used if a strategy to mitigate Gibbs effects is adopted -
see below.
edge_blend: str, opt {None,'noise','interp','noise+interp'}
Whether to blend the edges, by padding nans then inter/extrapolate them
with a gaussian filter. Slower but can significantly reduce ringing
artefacts from Gibbs phenomenon, in particular if several consecutive
rotations are involved in your image processing.
'noise': pad with small amplitude noise inferred from neighbours
'interp': interpolated from neighbouring pixels using Gaussian kernel.
'noise+interp': sum both components above at masked locations.
Original mask will be placed back after rotation.
interp_zeros: bool, opt
[only used if edge_blend is not None]
Whether to interpolate zeros in the frame before (de)rotation. Not
dealing with them can induce a Gibbs phenomenon near their location.
However, this flag should be false if rotating a binary mask.
ker: float, opt
Size of the Gaussian kernel used for interpolation.
Returns
-------
array_out : numpy ndarray
Resulting frame.
"""
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array')
if edge_blend is None:
edge_blend = ''
if edge_blend!='' or imlib=='vip-fft':
# fill with nans
cy_ori, cx_ori = frame_center(array)
y_ori, x_ori = array.shape
if np.isnan(mask_val):
mask_ori = np.where(np.isnan(array))
else:
mask_ori = np.where(array==mask_val)
array_nan = array.copy()
array_zeros = array.copy()
if interp_zeros == 1 or mask_val!=0: # set to nans for interpolation
array_nan[np.where(array==mask_val)]=np.nan
else:
array_zeros[np.where(np.isnan(array))]=0
if 'noise' in edge_blend :
# evaluate std and med far from the star, avoiding nans
_, med, stddev = sigma_clipped_stats(array_nan, sigma=1.5,
cenfunc=np.nanmedian,
stdfunc=np.nanstd)
# pad and interpolate, about 1.2x original size
if imlib=='vip-fft':
fac=1.5
else:
fac=1.1
new_y = int(y_ori*fac)
new_x = int(x_ori*fac)
if y_ori%2 != new_y%2:
new_y += 1
if x_ori%2 != new_x%2:
new_x += 1
array_prep = np.empty([new_y,new_x])
array_prep1 = np.zeros([new_y,new_x])
array_prep[:] = np.nan
if 'interp' in edge_blend:
array_prep2 = array_prep.copy()
med=0 # local level will be added with Gaussian kernel
if 'noise' in edge_blend:
array_prep = np.random.normal(loc=med, scale=stddev,
size=(new_y,new_x))
cy, cx = frame_center(array_prep)
y0_p = int(cy-cy_ori)
y1_p = int(cy+cy_ori+1)
x0_p = int(cx-cx_ori)
x1_p = int(cx+cx_ori+1)
if interp_zeros:
array_prep[y0_p:y1_p,x0_p:x1_p] = array_nan.copy()
array_prep1[y0_p:y1_p,x0_p:x1_p] = array_nan.copy()
else:
array_prep[y0_p:y1_p,x0_p:x1_p] = array_zeros.copy()
# interpolate nans with a Gaussian filter
if 'interp' in edge_blend:
array_prep2[y0_p:y1_p,x0_p:x1_p] = array_nan.copy()
#gauss_ker1 = Gaussian2DKernel(x_stddev=int(array_nan.shape[0]/15))
# Lanczos4 requires 4 neighbours & default Gaussian box=8*stddev+1:
#gauss_ker2 = Gaussian2DKernel(x_stddev=1)
cond1 = array_prep1==0
cond2 = np.isnan(array_prep2)
new_nan = np.where(cond1&cond2)
mask_nan = np.where(np.isnan(array_prep2))
if not ker:
ker = array_nan.shape[0]/5
ker2 = 1
array_prep_corr1 = frame_filter_lowpass(array_prep2, mode='gauss',
fwhm_size=ker)
#interp_nan(array_prep2, kernel=gauss_ker1)
if 'noise' in edge_blend:
array_prep_corr2 = frame_filter_lowpass(array_prep2,
mode='gauss',
fwhm_size=ker2)
#interp_nan(array_prep2, kernel=gauss_ker2)
ori_nan = np.where(np.isnan(array_prep1))
array_prep[ori_nan] = array_prep_corr2[ori_nan]
array_prep[new_nan] += array_prep_corr1[new_nan]
else:
array_prep[mask_nan] = array_prep_corr1[mask_nan]
# finally pad zeros for 4x larger images before FFT
if imlib=='vip-fft':
array_prep, new_idx = frame_pad(array_prep, fac=4/fac, fillwith=0,
full_output=True)
y0 = new_idx[0]+y0_p
y1 = new_idx[0]+y1_p
x0 = new_idx[2]+x0_p
x1 = new_idx[2]+x1_p
else:
y0 = y0_p
y1 = y1_p
x0 = x0_p
x1 = x1_p
else:
array_prep = array.copy()
# residual (non-interp) nans should be set to 0 to avoid bug in rotation
array_prep[np.where(np.isnan(array_prep))] = 0
y, x = array_prep.shape
if cxy is None:
cy, cx = frame_center(array_prep)
elif edge_blend:
cx_rot, cy_rot = cxy
cx += cx_rot-cx_ori
cy += cy_rot-cy_ori
if imlib=='fft' and (cy, cx) != frame_center(array_prep):
msg = "Case not yet implemented. Center image manually first"
raise ValueError(msg)
else:
cx, cy = cxy
if imlib == 'vip-fft':
array_out = rotate_fft(array_prep, angle)
elif imlib == 'skimage':
if interpolation == 'nearneig':
order = 0
elif interpolation == 'bilinear':
order = 1
elif interpolation == 'biquadratic':
order = 2
elif interpolation == 'bicubic':
order = 3
elif interpolation == 'biquartic' or interpolation == 'lanczos4':
order = 4
elif interpolation == 'biquintic':
order = 5
else:
raise ValueError('Skimage interpolation method not recognized')
if border_mode not in ['constant', 'edge', 'symmetric', 'reflect',
'wrap']:
raise ValueError('Skimage `border_mode` not recognized.')
min_val = np.min(array_prep)
im_temp = array_prep - min_val
max_val = np.max(im_temp)
im_temp /= max_val
array_out = rotate(im_temp, angle, order=order, center=cxy, cval=np.nan,
mode=border_mode)
array_out *= max_val
array_out += min_val
array_out = np.nan_to_num(array_out)
elif imlib == 'opencv':
if no_opencv:
msg = 'Opencv python bindings cannot be imported. Install opencv or'
msg += ' set imlib to skimage'
raise RuntimeError(msg)
if interpolation == 'bilinear':
intp = cv2.INTER_LINEAR
elif interpolation == 'bicubic':
intp= cv2.INTER_CUBIC
elif interpolation == 'nearneig':
intp = cv2.INTER_NEAREST
elif interpolation == 'lanczos4':
intp = cv2.INTER_LANCZOS4
else:
raise ValueError('Opencv interpolation method not recognized')
if border_mode == 'constant':
bormo = cv2.BORDER_CONSTANT # iiiiii|abcdefgh|iiiiiii
elif border_mode == 'edge':
bormo = cv2.BORDER_REPLICATE # aaaaaa|abcdefgh|hhhhhhh
elif border_mode == 'symmetric':
bormo = cv2.BORDER_REFLECT # fedcba|abcdefgh|hgfedcb
elif border_mode == 'reflect':
bormo = cv2.BORDER_REFLECT_101 # gfedcb|abcdefgh|gfedcba
elif border_mode == 'wrap':
bormo = cv2.BORDER_WRAP # cdefgh|abcdefgh|abcdefg
else:
raise ValueError('Opencv `border_mode` not recognized.')
M = cv2.getRotationMatrix2D((cx,cy), angle, 1)
array_out = cv2.warpAffine(array_prep.astype(np.float32), M, (x, y),
flags=intp, borderMode=bormo)
else:
raise ValueError('Image transformation library not recognized')
if edge_blend!='' or imlib =='vip-fft':
array_out = array_out[y0:y1,x0:x1] #remove padding
array_out[mask_ori] = mask_val # mask again original masked values
return array_out
def cube_derotate(array, angle_list, imlib='opencv', interpolation='lanczos4',
cxy=None, nproc=1, border_mode='constant', mask_val=np.nan,
edge_blend=None, interp_zeros=False):
""" Rotates an cube (3d array or image sequence) providing a vector or
corresponding angles. Serves for rotating an ADI sequence to a common north
given a vector with the corresponding parallactic angles for each frame. By
default bicubic interpolation is used (opencv).
Parameters
----------
array : numpy ndarray
Input 3d array, cube.
angle_list : list
Vector containing the parallactic angles.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
cxy : tuple of int, optional
Coordinates X,Y of the point with respect to which the rotation will be
performed. By default the rotation is done with respect to the center
of the frames, as it is returned by the function
vip_hci.var.frame_center.
nproc : int, optional
Whether to rotate the frames in the sequence in a multi-processing
fashion. Only useful if the cube is significantly large (frame size and
number of frames).
border_mode : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
edge_blend : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interp_zeros : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
Returns
-------
array_der : numpy ndarray
Resulting cube with de-rotated frames.
"""
if array.ndim != 3:
raise TypeError('Input array is not a cube or 3d array.')
n_frames = array.shape[0]
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
if nproc == 1:
array_der = np.zeros_like(array)
for i in range(n_frames):
array_der[i] = frame_rotate(array[i], -angle_list[i], imlib=imlib,
interpolation=interpolation, cxy=cxy,
border_mode=border_mode,
mask_val=mask_val,
edge_blend=edge_blend,
interp_zeros=interp_zeros)
elif nproc > 1:
global data_array
data_array = array
res = pool_map(nproc, _frame_rotate_mp, iterable(range(n_frames)),
angle_list, imlib, interpolation, cxy, border_mode,
mask_val, edge_blend, interp_zeros)
array_der = np.array(res)
return array_der
def _frame_rotate_mp(num_fr, angle_list, imlib, interpolation, cxy,
border_mode, mask_val, edge_blend, interp_zeros):
framerot = frame_rotate(data_array[num_fr], -angle_list[num_fr], imlib,
interpolation, cxy, border_mode, mask_val,
edge_blend, interp_zeros)
return framerot
def _find_indices_adi(angle_list, frame, thr, nframes=None, out_closest=False,
truncate=False, max_frames=200):
""" Returns the indices to be left in frames library for annular ADI median
subtraction, LOCI or annular PCA.
Parameters
----------
angle_list : numpy ndarray, 1d
Vector of parallactic angle (PA) for each frame.
frame : int
Index of the current frame for which we are applying the PA threshold.
thr : float
PA threshold.
nframes : int or None, optional
Exact number of indices to be left. For annular median-ADI subtraction,
where we keep the closest frames (after the PA threshold). If None then
all the indices are returned (after the PA threshold).
out_closest : bool, optional
If True then the function returns the indices of the 2 closest frames.
truncate : bool, optional
Useful for annular PCA, when we want to discard too far away frames and
avoid increasing the computational cost.
max_frames : int, optional
Max number of indices to be left. To be provided if ``truncate`` is
True (used e.g. in pca_annular).
Returns
-------
indices : numpy ndarray, 1d
Vector with the indices left.
If ``out_closest`` is True then the function returns instead:
index_prev, index_foll
"""
n = angle_list.shape[0]
index_prev = 0
index_foll = frame
for i in range(0, frame):
if np.abs(angle_list[frame] - angle_list[i]) < thr:
index_prev = i
break
else:
index_prev += 1
for k in range(frame, n):
if np.abs(angle_list[k] - angle_list[frame]) > thr:
index_foll = k
break
else:
index_foll += 1
if out_closest:
return index_prev, index_foll - 1
else:
if nframes is not None:
# For annular ADI median subtraction, returning n_frames closest
# indices (after PA thresholding)
window = nframes // 2
ind1 = index_prev - window
ind1 = max(ind1, 0)
ind2 = index_prev
ind3 = index_foll
ind4 = index_foll + window
ind4 = min(ind4, n)
indices = np.array(list(range(ind1, ind2)) +
list(range(ind3, ind4)), dtype='int32')
else:
# For annular PCA, returning all indices (after PA thresholding)
half1 = range(0, index_prev)
half2 = range(index_foll, n)
indices = np.array(list(half1) + list(half2), dtype='int32')
# The goal is to keep min(num_frames/2, ntrunc) in the library after
# discarding those based on the PA threshold
if truncate:
thr = min(n-1, max_frames)
all_indices = np.array(list(half1)+list(half2))
if len(all_indices) > thr:
# then truncate and update indices
# first sort by dPA
dPA = np.abs(angle_list[all_indices]-angle_list[frame])
sort_indices = all_indices[np.argsort(dPA)]
# keep the ntrunc first ones
good_indices = sort_indices[:thr]
# sort again, this time by increasing indices
indices = np.sort(good_indices)
return indices
def _compute_pa_thresh(ann_center, fwhm, delta_rot=1):
""" Computes the parallactic angle threshold [degrees]
Replacing approximation: delta_rot * (fwhm/ann_center) / np.pi * 180
"""
return np.rad2deg(2 * np.arctan(delta_rot * fwhm / (2 * ann_center)))
def _define_annuli(angle_list, ann, n_annuli, fwhm, radius_int, annulus_width,
delta_rot, n_segments, verbose, strict=False):
""" Function that defines the annuli geometry using the input parameters.
Returns the parallactic angle threshold, the inner radius and the annulus
center for each annulus.
"""
if ann == n_annuli - 1:
inner_radius = radius_int + (ann * annulus_width - 1)
else:
inner_radius = radius_int + ann * annulus_width
ann_center = inner_radius + (annulus_width / 2)
pa_threshold = _compute_pa_thresh(ann_center, fwhm, delta_rot)
mid_range = np.abs(np.amax(angle_list) - np.amin(angle_list)) / 2
if pa_threshold >= mid_range - mid_range * 0.1:
new_pa_th = float(mid_range - mid_range * 0.1)
msg = 'WARNING: PA threshold {:.2f} is too big, recommended '
msg+=' value for annulus {:.0f}: {:.2f}'
if strict:
print(msg.format(pa_threshold,ann, new_pa_th))
#raise ValueError(msg.format(pa_threshold,ann, new_pa_th))
else:
print('PA threshold {:.2f} is likely too big, will be set to '
'{:.2f}'.format(pa_threshold, new_pa_th))
pa_threshold = new_pa_th
if verbose:
if pa_threshold > 0:
print('Ann {} PA thresh: {:5.2f} Ann center: '
'{:3.0f} N segments: {} '.format(ann + 1, pa_threshold,
ann_center, n_segments))
else:
print('Ann {} Ann center: {:3.0f} N segments: '
'{} '.format(ann + 1, ann_center, n_segments))
return pa_threshold, inner_radius, ann_center
def rotate_fft(array, angle):
""" Rotates a frame or 2D array using Fourier transform phases:
Rotation = 3 consecutive lin. shears = 3 consecutive FFT phase shifts
See details in Larkin et al. (1997) and Hagelberg et al. (2016).
Note: this is significantly slower than interpolation methods
(e.g. opencv/lanczos4 or ndimage), but preserves the flux better
(by construction it preserves the total power). It is more prone to
large-scale Gibbs artefacts, so make sure no sharp edge is present in
the image to be rotated.
! Warning: if input frame has even dimensions, the center of rotation
will NOT be between the 4 central pixels, instead it will be on the top
right of those 4 pixels. Make sure your images are centered with
respect to that pixel before rotation.
Parameters
----------
array : numpy ndarray
Input image, 2d array.
angle : float
Rotation angle.
Returns
-------
array_out : numpy ndarray
Resulting frame.
"""
y_ori, x_ori = array.shape
while angle<0:
angle+=360
while angle>360:
angle-=360
if angle>45:
dangle = angle%90
if dangle>45:
dangle = -(90-dangle)
nangle = np.rint(angle/90)
array_in = np.rot90(array, nangle)
else:
dangle = angle
array_in = array.copy()
if y_ori%2 or x_ori%2:
# NO NEED TO SHIFT BY 0.5px: FFT assumes rot. center on cx+0.5, cy+0.5!
array_in = array_in[:-1,:-1]
a = np.tan(np.deg2rad(dangle)/2)
b = -np.sin(np.deg2rad(dangle))
ori_y, ori_x = array_in.shape
cy, cx = frame_center(array)
arr_xy = np.mgrid[0:ori_y,0:ori_x]
arr_y = arr_xy[0]-cy
arr_x = arr_xy[1]-cx
# TODO: FFT padding not currently working properly. Only option '0' works.
s_x = _fft_shear(array_in, arr_x, a, ax=1, pad=0)
s_xy = _fft_shear(s_x, arr_y, b, ax=0, pad=0)
s_xyx = _fft_shear(s_xy, arr_x, a, ax=1, pad=0)
if y_ori%2 or x_ori%2:
# shift + crop back to odd dimensions , using FFT
array_out = np.zeros([s_xyx.shape[0]+1,s_xyx.shape[1]+1])
# NO NEED TO SHIFT BY 0.5px: FFT assumes rot. center on cx+0.5, cy+0.5!
array_out[:-1,:-1] = np.real(s_xyx)
else:
array_out = np.real(s_xyx)
return array_out
def _fft_shear(arr, arr_ori, c, ax, pad=0, shift_ini=True):
ax2=1-ax%2
freqs = | fftfreq(arr_ori.shape[ax2]) | numpy.fft.fftfreq |
#! /usr/bin/env python
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import nose
from nose.tools import assert_equal, assert_true
import numpy as np
import pymatgen as pmg
from sknano.core import rezero_array
from sknano.core.crystallography import Crystal2DLattice, Crystal3DLattice, \
Reciprocal2DLattice, Reciprocal3DLattice
# from sknano.core.atoms import Atom, Atoms, XAtom, XAtoms
from sknano.core.math import Point, transformation_matrix, zhat, \
rotation_matrix
from sknano.core.refdata import aCC, element_data
r_CC_vdw = element_data['C']['VanDerWaalsRadius']
def test1():
dlattice = Crystal2DLattice(a=4.0, b=8.0, gamma=120)
orientation_matrix = rotation_matrix(angle=np.pi/6, axis=zhat)
rlattice = \
Reciprocal2DLattice(a_star=dlattice.reciprocal_lattice.a_star,
b_star=dlattice.reciprocal_lattice.b_star,
gamma_star=dlattice.reciprocal_lattice.gamma_star,
orientation_matrix=orientation_matrix)
print('\ndlattice.matrix:\n{}'.format(dlattice.matrix))
print('\nrlattice.matrix:\n{}'.format(rlattice.matrix))
print('\ndlattice.reciprocal_lattice.matrix:\n{}'.format(
dlattice.reciprocal_lattice.matrix))
print('\nrlattice.reciprocal_lattice.matrix:\n{}'.format(
rlattice.reciprocal_lattice.matrix))
assert_true(np.allclose(dlattice.matrix,
rlattice.reciprocal_lattice.matrix))
assert_true(np.allclose(dlattice.reciprocal_lattice.matrix,
rlattice.matrix))
def test2():
a = np.sqrt(3) * aCC
latt = Crystal2DLattice(a=a, b=a, gamma=120)
hexlatt = Crystal2DLattice.hexagonal(a)
assert_equal(latt, hexlatt)
def test3():
a = np.sqrt(3) * aCC
latt = Crystal2DLattice(a=a, b=a, gamma=90)
square = Crystal2DLattice.square(a)
assert_equal(latt, square)
def test4():
a = np.sqrt(3) * aCC
latt = Crystal2DLattice(a=a, b=a, gamma=60)
a1 = latt.a1
a2 = latt.a2
rotated_a1 = a1.copy()
rotated_a2 = a2.copy()
xfrm = transformation_matrix(angle=-np.pi / 6)
rotated_a1.rotate(transform_matrix=xfrm)
rotated_a2.rotate(transform_matrix=xfrm)
latt.rotate(angle=-np.pi / 6)
assert_equal(latt.a1, rotated_a1)
assert_equal(latt.a2, rotated_a2)
assert_true(np.allclose(latt.orientation_matrix, xfrm))
rotated_latt = Crystal2DLattice(a1=rotated_a1, a2=rotated_a2)
assert_equal(rotated_a1, rotated_latt.a1)
assert_equal(rotated_a2, rotated_latt.a2)
assert_true(np.allclose(latt.orientation_matrix,
rotated_latt.orientation_matrix))
def test5():
a = | np.sqrt(3) | numpy.sqrt |
"""Convert Senate speech data from 114th Congress to bag of words format.
The data is provided by [1]. Specifically, we use the `hein-daily` data. To
run this script, make sure the relevant files are in
`data/senate-speeches-114/raw/`. The files needed for this script are
`speeches_114.txt`, `descr_114.txt`, and `114_SpeakerMap.txt`.
#### References
[1]: Gentzkow, Matthew, <NAME>, and <NAME>. Congressional Record
for the 43rd-114th Congresses: Parsed Speeches and Phrase Counts. Palo
Alto, CA: Stanford Libraries [distributor], 2018-01-16.
https://data.stanford.edu/congress_text
"""
import os
import setup_utils as utils
import numpy as np
import pandas as pd
from scipy import sparse
from sklearn.feature_extraction.text import CountVectorizer
project_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir))
data_dir = os.path.join(project_dir, "data/senate-speeches-114/raw")
save_dir = os.path.join(project_dir, "data/senate-speeches-114/clean")
speeches = pd.read_csv(os.path.join(data_dir, 'speeches_114.txt'),
encoding="ISO-8859-1",
sep="|",
error_bad_lines=False)
description = pd.read_csv(os.path.join(data_dir, 'descr_114.txt'),
encoding="ISO-8859-1",
sep="|")
speaker_map = pd.read_csv(os.path.join(data_dir, '114_SpeakerMap.txt'),
encoding="ISO-8859-1",
sep="|")
# Merge all data into a single dataframe.
merged_df = speeches.merge(description,
left_on='speech_id',
right_on='speech_id')
df = merged_df.merge(speaker_map, left_on='speech_id', right_on='speech_id')
# Only look at senate speeches.
senate_df = df[df['chamber_x'] == 'S']
speaker = np.array(
[' '.join([first, last]) for first, last in
list(zip(np.array(senate_df['firstname']),
np.array(senate_df['lastname'])))])
speeches = np.array(senate_df['speech'])
party = | np.array(senate_df['party']) | numpy.array |
import numpy as np
import torch
from ml.core import _set_licchavi, _train_predict, ml_run
from ml.data_utility import (
expand_dic,
expand_tens,
get_all_vids,
get_mask,
rescale_rating,
reverse_idxs,
sort_by_first,
)
from ml.dev.fake_data import generate_data
from ml.handle_data import distribute_data, select_criteria, shape_data
from ml.licchavi import Licchavi, get_model, get_s
from ml.losses import _approx_bbt_loss, _bbt_loss, get_s_loss, model_norm, models_dist
from ml.metrics import (
_random_signs,
check_equilibrium_glob,
check_equilibrium_loc,
extract_grad,
get_uncertainty_glob,
get_uncertainty_loc,
scalar_product,
)
"""
Test module for ml
Main file is "ml_train.py"
"""
TEST_DATA = [
[1, 100, 101, "test", 10, 0],
[1, 101, 102, "test", 10, 0],
[1, 104, 105, "test", 10, 0],
[0, 100, 101, "test", -10, 0],
[1, 104, 105, "test", 37/5 - 10, 0],
[2, 104, 105, "test", 10, 0],
[7, 966, 965, "test", 4 / 5 - 10, 0],
[0, 100, 101, "largely_recommended", 10, 0],
]
CRITERIAS = ["test"]
def _dic_inclusion(a, b):
"""checks if a is included in
a (dictionnary)
b (dictionnary)
Returns:
(bool): True if a included in b
"""
return all([item in b.items() for item in a.items()])
# ========== unit tests ===============
# ---------- data_utility.py ----------------
def test_rescale_rating():
assert rescale_rating(10) == 1
assert rescale_rating(-10) == -1
def test_get_all_vids():
size = 50
input = np.reshape( | np.arange(4 * size) | numpy.arange |
from model import adjusted_hstm, topic_model, supervised_lda as slda
from model.model_trainer import ModelTrainer
from evaluation.evaluator import Evaluator
# from evaluation.eval_topics import get_perplexity, get_normalized_pmi, get_topics_from_model, get_supervised_topics_from_model, shuffle_topics
import util
import argparse
import sys
import os
import numpy as np
from data.dataset import TextResponseDataset
import torch
from torch.utils.data import DataLoader
from absl import flags
from absl import app
def main(argv):
proc_file = FLAGS.procfile
pretraining_file = FLAGS.pretraining_file
base_dataset = FLAGS.data
if FLAGS.data == 'framing_corpus':
base_dataset = FLAGS.data + '_' + FLAGS.framing_topic
base_pretraining = '_pretraining.npz'
if FLAGS.pretrained_prodlda:
base_pretraining = '_prodlda_pretraining.npz'
if pretraining_file == "":
pretraining_file = '../dat/proc/' + base_dataset + base_pretraining
if proc_file == "":
proc_file = '../dat/proc/' + base_dataset + '_proc.npz'
num_topics = FLAGS.num_topics
if FLAGS.data == 'amazon':
num_topics = 30
elif FLAGS.data == 'yelp':
num_topics = 30
elif FLAGS.data == 'amazon_binary':
num_topics = 20
elif FLAGS.data == 'framing_corpus':
num_topics = 10
label_is_bool = False
if FLAGS.data in TextResponseDataset.CLASSIFICATION_SETTINGS:
label_is_bool = True
print("Running model", FLAGS.model, '..'*20)
if FLAGS.pretrained or FLAGS.pretrained_prodlda or FLAGS.model == 'hstm-all-2stage':
array = np.load(pretraining_file)
beta = | np.log(array['beta']) | numpy.log |
#
# This file is part of the profilerTools suite (see
# https://github.com/mssm-labmmol/profiler).
#
# Copyright (c) 2020 mssm-labmmol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from numpy.linalg import norm
from math import sqrt
from .fastmath import fastCross
def calcDistance(x1, y1, z1, x2, y2, z2):
diff = np.array([x2, y2, z2]) - np.array([x1, y1, z1])
out = sqrt(np.dot(diff, diff))
return out
def calcDistance2(x1, y1, z1, x2, y2, z2):
diff = np.array([x2, y2, z2]) - np.array([x1, y1, z1])
out = np.dot(diff, diff)
return out
def calcNorm(vec):
return calcDistance(vec[0], vec[1], vec[2], 0, 0, 0)
def calcAngle(x1, y1, z1, x2, y2, z2, x3, y3, z3):
v1 = np.array([x1, y1, z1]) - np.array([x2, y2, z2])
v2 = np.array([x3, y3, z3]) - np.array([x2, y2, z2])
out = np.degrees(
np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))))
return out
def calcAngleRadians(x1, y1, z1, x2, y2, z2, x3, y3, z3):
v1 = np.array([x1, y1, z1]) - np.array([x2, y2, z2])
v2 = np.array([x3, y3, z3]) - np.array([x2, y2, z2])
return np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
def calcCosine(x1, y1, z1, x2, y2, z2, x3, y3, z3):
v1 = np.array([x1, y1, z1]) - np.array([x2, y2, z2])
v2 = np.array([x3, y3, z3]) - np.array([x2, y2, z2])
out = np.dot(v1, v2) / (sqrt(np.dot(v1, v1)) * sqrt(np.dot(v2, v2)))
return out
# From: https://stackoverflow.com/questions/20305272/dihedral-torsion-angle-from-four-points-in-cartesian-coordinates-in-python
def calcDihedral(x1, y1, z1, x2, y2, z2, x3, y3, z3, x4, y4, z4):
"""Praxeolitic formula
1 sqrt, 1 cross product"""
p0 = np.array([x1, y1, z1])
p1 = np.array([x2, y2, z2])
p2 = np.array([x3, y3, z3])
p3 = np.array([x4, y4, z4])
b0 = -1.0 * (p1 - p0)
b1 = p2 - p1
b2 = p3 - p2
# normalize b1 so that it does not influence magnitude of vector
# rejections that come next
b1 /= sqrt(np.dot(b1, b1))
# vector rejections
# v = projection of b0 onto plane perpendicular to b1
# = b0 minus component that aligns with b1
# w = projection of b2 onto plane perpendicular to b1
# = b2 minus component that aligns with b1
v = b0 - np.dot(b0, b1) * b1
w = b2 - np.dot(b2, b1) * b1
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = np.dot(v, w)
y = np.dot(fastCross(np.array([
b1,
]), np.array([
v,
])), w)
out = np.degrees(np.arctan2(y, x))
return out
def calcDihedralRadians(x1, y1, z1, x2, y2, z2, x3, y3, z3, x4, y4, z4):
"""Praxeolitic formula
1 sqrt, 1 cross product"""
p0 = np.array([x1, y1, z1])
p1 = np.array([x2, y2, z2])
p2 = np.array([x3, y3, z3])
p3 = np.array([x4, y4, z4])
b0 = -1.0 * (p1 - p0)
b1 = p2 - p1
b2 = p3 - p2
# normalize b1 so that it does not influence magnitude of vector
# rejections that come next
b1 /= sqrt(np.dot(b1, b1))
# vector rejections
# v = projection of b0 onto plane perpendicular to b1
# = b0 minus component that aligns with b1
# w = projection of b2 onto plane perpendicular to b1
# = b2 minus component that aligns with b1
v = b0 - np.dot(b0, b1) * b1
w = b2 - np.dot(b2, b1) * b1
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = np.dot(v, w)
y = np.dot(fastCross(np.array([
b1,
]), np.array([
v,
])), w)
return | np.arctan2(y, x) | numpy.arctan2 |
# This is a script that analyses the multimode simulation results.
# This simulates a RZ multimode periodic plasma wave.
# The electric field from the simulation is compared to the analytic value
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pywarpx import picmi
constants = picmi.constants
nr = 64
nz = 200
rmin = 0.e0
zmin = 0.e0
rmax = +20.e-6
zmax = +40.e-6
# Parameters describing particle distribution
density = 2.e24
epsilon0 = 0.001*constants.c
epsilon1 = 0.001*constants.c
epsilon2 = 0.001*constants.c
w0 = 5.e-6
n_osc_z = 3
# Wave vector of the wave
k0 = 2.*np.pi*n_osc_z/(zmax - zmin)
# Plasma frequency
wp = np.sqrt((density*constants.q_e**2)/(constants.m_e*constants.ep0))
kp = wp/constants.c
uniform_plasma = picmi.UniformDistribution(density = density,
upper_bound = [+18e-6, +18e-6, None],
directed_velocity = [0., 0., 0.])
momentum_expressions = ["""+ epsilon0/kp*2*x/w0**2*exp(-(x**2+y**2)/w0**2)*sin(k0*z)
- epsilon1/kp*2/w0*exp(-(x**2+y**2)/w0**2)*sin(k0*z)
+ epsilon1/kp*4*x**2/w0**3*exp(-(x**2+y**2)/w0**2)*sin(k0*z)
- epsilon2/kp*8*x/w0**2*exp(-(x**2+y**2)/w0**2)*sin(k0*z)
+ epsilon2/kp*8*x*(x**2-y**2)/w0**4*exp(-(x**2+y**2)/w0**2)*sin(k0*z)""",
"""+ epsilon0/kp*2*y/w0**2*exp(-(x**2+y**2)/w0**2)*sin(k0*z)
+ epsilon1/kp*4*x*y/w0**3*exp(-(x**2+y**2)/w0**2)*sin(k0*z)
+ epsilon2/kp*8*y/w0**2*exp(-(x**2+y**2)/w0**2)*sin(k0*z)
+ epsilon2/kp*8*y*(x**2-y**2)/w0**4*exp(-(x**2+y**2)/w0**2)*sin(k0*z)""",
"""- epsilon0/kp*k0*exp(-(x**2+y**2)/w0**2)*cos(k0*z)
- epsilon1/kp*k0*2*x/w0*exp(-(x**2+y**2)/w0**2)*cos(k0*z)
- epsilon2/kp*k0*4*(x**2-y**2)/w0**2*exp(-(x**2+y**2)/w0**2)*cos(k0*z)"""]
analytic_plasma = picmi.AnalyticDistribution(density_expression = density,
upper_bound = [+18e-6, +18e-6, None],
epsilon0 = epsilon0,
epsilon1 = epsilon1,
epsilon2 = epsilon2,
kp = kp,
k0 = k0,
w0 = w0,
momentum_expressions = momentum_expressions)
electrons = picmi.Species(particle_type='electron', name='electrons', initial_distribution=analytic_plasma)
protons = picmi.Species(particle_type='proton', name='protons', initial_distribution=uniform_plasma)
grid = picmi.CylindricalGrid(number_of_cells = [nr, nz],
n_azimuthal_modes = 3,
lower_bound = [rmin, zmin],
upper_bound = [rmax, zmax],
lower_boundary_conditions = ['dirichlet', 'periodic'],
upper_boundary_conditions = ['dirichlet', 'periodic'],
moving_window_zvelocity = 0.,
warpx_max_grid_size=64)
solver = picmi.ElectromagneticSolver(grid=grid, cfl=1.)
sim = picmi.Simulation(solver = solver,
max_steps = 40,
verbose = 1,
warpx_plot_int = 40,
warpx_current_deposition_algo = 'esirkepov',
warpx_field_gathering_algo = 'energy-conserving',
warpx_particle_pusher_algo = 'boris')
sim.add_species(electrons, layout=picmi.GriddedLayout(n_macroparticle_per_cell=[2,16,2], grid=grid))
sim.add_species(protons, layout=picmi.GriddedLayout(n_macroparticle_per_cell=[2,16,2], grid=grid))
# write_inputs will create an inputs file that can be used to run
# with the compiled version.
#sim.write_input_file(file_name='inputsrz_from_PICMI')
# Alternatively, sim.step will run WarpX, controlling it from Python
sim.step()
# Below is WarpX specific code to check the results.
import pywarpx
from pywarpx.fields import *
def calcEr( z, r, k0, w0, wp, t, epsilons) :
"""
Return the radial electric field as an array
of the same length as z and r, in the half-plane theta=0
"""
Er_array = (
epsilons[0] * constants.m_e*constants.c/constants.q_e * 2*r/w0**2 *
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t )
- epsilons[1] * constants.m_e*constants.c/constants.q_e * 2/w0 *
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t )
+ epsilons[1] * constants.m_e*constants.c/constants.q_e * 4*r**2/w0**3 *
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t )
- epsilons[2] * constants.m_e*constants.c/constants.q_e * 8*r/w0**2 *
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t )
+ epsilons[2] * constants.m_e*constants.c/constants.q_e * 8*r**3/w0**4 *
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t ))
return( Er_array )
def calcEz( z, r, k0, w0, wp, t, epsilons) :
"""
Return the longitudinal electric field as an array
of the same length as z and r, in the half-plane theta=0
"""
Ez_array = (
- epsilons[0] * constants.m_e*constants.c/constants.q_e * k0 *
np.exp( -r**2/w0**2 ) * np.cos( k0*z ) * np.sin( wp*t )
- epsilons[1] * constants.m_e*constants.c/constants.q_e * k0 * 2*r/w0 *
np.exp( -r**2/w0**2 ) * np.cos( k0*z ) * np.sin( wp*t )
- epsilons[2] * constants.m_e*constants.c/constants.q_e * k0 * 4*r**2/w0**2 *
np.exp( -r**2/w0**2 ) * np.cos( k0*z ) * | np.sin( wp*t ) | numpy.sin |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import subprocess
import csv
import scipy
import scipy.stats
import aifc
import read_aif
data_folder = sys.argv[1]
N_subjects = 21
result_folder = sys.argv[2]
if not os.path.exists(result_folder):
os.mkdir(result_folder)
#calculate and read behavioural results into behaviouraldict:
#{'S01': {'snareCue_times': [46.28689342,...], ...}, 'S02': {...} }
behaviouraldict = {}
for i in range(1, N_subjects+1):
# load the results into a dictionary
try:
with np.load(os.path.join(result_folder,'S%02d' % i,
'behavioural_results.npz'), allow_pickle=True) as behave_file:
behaviouraldict['S%02d' % i] = dict(behave_file)
except:
print('Please run read_aif.py for every subjects first.')
###1. plot performance vs musical background:
#read subject background (LQ and music qualification)
#background is a dict {"N_subjects":[LQ, Quali, Level, years]}
background = {}
with open(os.path.join(data_folder,'additionalSubjectInfo.csv'),'r') as infile:
reader = csv.DictReader(infile, fieldnames=None, delimiter=';')
for row in reader:
key = "S%02d" % int(row['Subjectnr']) #same format as behaviouraldict
value = [int(row['LQ']),int(row['MusicQualification']),
int(row['MusicianshipLevel']),int(row['TrainingYears'])]
background[key] = value
raw_musicscores = np.array([v for k,v in sorted(background.items())])
z_musicscores = (raw_musicscores - np.mean(raw_musicscores,0)
)/raw_musicscores.std(0)
musicscore = z_musicscores[:,1:].mean(1) # do not include the LQ
snare_abs_performance = np.zeros(N_subjects)
snare_mean_performance = np.zeros(N_subjects)
snare_se_performance = np.zeros(N_subjects)
wb_abs_performance = np.zeros(N_subjects)
wb_mean_performance = np.zeros(N_subjects)
wb_se_performance = np.zeros(N_subjects)
snare_rel_performance = np.zeros(N_subjects)
wb_rel_performance = np.zeros(N_subjects)
for k,v in sorted(behaviouraldict.items()):
i = int(k[1:])-1 #'S01'=> entry 0
snaredev = v['snare_deviation']
snaredev = snaredev[np.isfinite(snaredev)]
wbdev = v['wdBlk_deviation']
wbdev = wbdev[np.isfinite(wbdev)]
snare_abs_performance[i] = np.abs(snaredev).mean()
snare_mean_performance[i] = snaredev.mean()
snare_se_performance[i] = snaredev.std()/np.sqrt(len(snare_mean_performance))
wb_abs_performance[i] = np.abs(wbdev).mean()
wb_mean_performance[i] = wbdev.mean()
wb_se_performance[i] = wbdev.std()/np.sqrt(len(wb_mean_performance))
snare_rel_performance[i] = | np.std(snaredev) | numpy.std |
from nibabel import four_to_three
from nibabel.processing import resample_to_output, resample_from_to
from skimage.measure import regionprops, label
from skimage.transform import resize
from tensorflow.python.keras.models import load_model
from scipy.ndimage import zoom
import os
import nibabel as nib
from os.path import join
import numpy as np
import sys
from shutil import copy
from math import ceil, floor
from copy import deepcopy
from segmentation.src.Utils.volume_utilities import padding_for_inference, padding_for_inference_both_ends
from tqdm import tqdm
def run_predictions(data, model_path, parameters):
"""
Only the prediction is done in this function, possible thresholdings and re-sampling are not included here.
:param data:
:return:
"""
return __run_predictions_tensorflow(data, model_path, parameters)
def __run_predictions_tensorflow(data, model_path, parameters):
model = load_model(model_path, compile=False)
whole_input_at_once = False
if len(parameters.new_axial_size) == 3:
whole_input_at_once = True
final_result = None
if whole_input_at_once:
final_result = __run_predictions_whole(data=data, model=model,
deep_supervision=parameters.training_deep_supervision)
else:
final_result = __run_predictions_slabbed(data=data, model=model, parameters=parameters,
deep_supervision=parameters.training_deep_supervision)
return final_result
def __run_predictions_whole(data, model, deep_supervision=False):
data_prep = np.expand_dims(data, axis=0)
data_prep = np.expand_dims(data_prep, axis=-1)
predictions = model.predict(data_prep)
if deep_supervision:
return predictions[0][0]
else:
return predictions[0]
def __run_predictions_slabbed(data, model, parameters, deep_supervision=False):
slicing_plane = parameters.slicing_plane
slab_size = parameters.training_slab_size
new_axial_size = parameters.new_axial_size
if parameters.swap_training_input:
tmp = deepcopy(new_axial_size)
new_axial_size[0] = tmp[1]
new_axial_size[1] = tmp[0]
upper_boundary = data.shape[2]
if slicing_plane == 'sagittal':
upper_boundary = data.shape[0]
elif slicing_plane == 'coronal':
upper_boundary = data.shape[1]
final_result = np.zeros(data.shape + (parameters.training_nb_classes,))
data = np.expand_dims(data, axis=-1)
count = 0
if parameters.predictions_non_overlapping:
data, pad_value = padding_for_inference(data=data, slab_size=slab_size, slicing_plane=slicing_plane)
scale = ceil(upper_boundary / slab_size)
unpad = False
for chunk in tqdm(range(scale)):
if chunk == scale-1 and pad_value != 0:
unpad = True
if slicing_plane == 'axial':
slab_CT = data[:, :, int(chunk * slab_size):int((chunk + 1) * slab_size), 0]
elif slicing_plane == 'sagittal':
tmp = data[int(chunk * slab_size):int((chunk + 1) * slab_size), :, :, 0]
slab_CT = tmp.transpose((1, 2, 0))
elif slicing_plane == 'coronal':
tmp = data[:, int(chunk * slab_size):int((chunk + 1) * slab_size), :, 0]
slab_CT = tmp.transpose((0, 2, 1))
slab_CT = np.expand_dims(np.expand_dims(slab_CT, axis=0), axis=-1)
if parameters.fix_orientation:
slab_CT = np.transpose(slab_CT, axes=(0, 3, 1, 2, 4))
slab_CT_pred = model.predict(slab_CT)
if parameters.fix_orientation:
slab_CT_pred = np.transpose(slab_CT_pred, axes=(0, 2, 3, 1, 4))
if not unpad:
for c in range(0, slab_CT_pred.shape[-1]):
if slicing_plane == 'axial':
final_result[:, :, int(chunk * slab_size):int((chunk + 1) * slab_size), c] = \
slab_CT_pred[0][:, :, :slab_size, c]
elif slicing_plane == 'sagittal':
final_result[int(chunk * slab_size):int((chunk + 1) * slab_size), :, :, c] = \
slab_CT_pred[0][:, :, :slab_size, c].transpose((2, 0, 1))
elif slicing_plane == 'coronal':
final_result[:, int(chunk * slab_size):int((chunk + 1) * slab_size), :, c] = \
slab_CT_pred[0][:, :, :slab_size, c].transpose((0, 2, 1))
else:
for c in range(0, slab_CT_pred.shape[-1]):
if slicing_plane == 'axial':
final_result[:, :, int(chunk * slab_size):, c] = \
slab_CT_pred[0][:, :, :slab_size-pad_value, c]
elif slicing_plane == 'sagittal':
final_result[int(chunk * slab_size):, :, :, c] = \
slab_CT_pred[0][:, :, :slab_size-pad_value, c].transpose((2, 0, 1))
elif slicing_plane == 'coronal':
final_result[:, int(chunk * slab_size):, :, c] = \
slab_CT_pred[0][:, :, :slab_size-pad_value, c].transpose((0, 2, 1))
count = count + 1
else:
if slab_size == 1:
for slice in tqdm(range(0, data.shape[2])):
slab_CT = data[:, :, slice, 0]
if np.sum(slab_CT > 0.1) == 0:
continue
slab_CT_pred = model.predict(np.reshape(slab_CT, (1, new_axial_size[0], new_axial_size[1], 1)))
for c in range(0, slab_CT_pred.shape[-1]):
final_result[:, :, slice, c] = slab_CT_pred[:, :, c]
else:
data = padding_for_inference_both_ends(data=data, slab_size=slab_size, slicing_plane=slicing_plane)
half_slab_size = int(slab_size / 2)
for slice in tqdm(range(half_slab_size, upper_boundary)):
if slicing_plane == 'axial':
slab_CT = data[:, :, slice - half_slab_size:slice + half_slab_size, 0]
elif slicing_plane == 'sagittal':
slab_CT = data[slice - half_slab_size:slice + half_slab_size, :, :, 0]
slab_CT = slab_CT.transpose((1, 2, 0))
elif slicing_plane == 'coronal':
slab_CT = data[:, slice - half_slab_size:slice + half_slab_size, :, 0]
slab_CT = slab_CT.transpose((0, 2, 1))
slab_CT = np.reshape(slab_CT, (1, new_axial_size[0], new_axial_size[1], slab_size, 1))
if | np.sum(slab_CT > 0.1) | numpy.sum |
"""
Implementations of the IPFP algorithm to solve for equilibrium and do comparative statics
in several variants of the `Choo and Siow 2006 <https://www.jstor.org/stable/10.1086/498585?seq=1>`_ model:
* homoskedastic with singles (as in CS 2006)
* homoskedastic without singles
* gender-heteroskedastic: with a scale parameter on the error term for women
* gender- and type-heteroskedastic: with a scale parameter on the error term for women
each solver, when fed the joint surplus and margins,
returns the equilibrium matching patterns, the adding-up errors on the margins,
and if requested (gr=True) the derivatives of the matching patterns in all primitives.
"""
import numpy as np
from math import sqrt
import sys
import scipy.linalg as spla
from ipfp_utils import print_stars, npexp, der_npexp, npmaxabs, \
nplog, nppow, der_nppow, nprepeat_col, nprepeat_row, describe_array
def ipfp_homo_nosingles_solver(Phi, men_margins, women_margins,
tol=1e-9, gr=False, verbose=False,
maxiter=1000):
"""
solve for equilibrium in a Choo and Siow market without singles
given systematic surplus and margins
:param np.array Phi: matrix of systematic surplus, shape (ncat_men, ncat_women)
:param np.array men_margins: vector of men margins, shape (ncat_men)
:param np.array women_margins: vector of women margins, shape (ncat_women)
:param float tol: tolerance on change in solution
:param boolean gr: if True, also evaluate derivatives of muxy wrt Phi
:param boolean verbose: prints stuff
:param int maxiter: maximum number of iterations
:return:
* muxy the matching patterns, shape (ncat_men, ncat_women)
* marg_err_x, marg_err_y the errors on the margins
* and the gradients of muxy wrt Phi if gr=True
"""
ncat_men = men_margins.shape[0]
ncat_women = women_margins.shape[0]
n_couples = np.sum(men_margins)
# check that there are as many men as women
if np.abs(np.sum(women_margins) - n_couples) > n_couples * tol:
print_stars(
f"{ipfp_homo_nosingles_solver}: there should be as many men as women")
if Phi.shape != (ncat_men, ncat_women):
print_stars(
f"ipfp_hetero_solver: the shape of Phi should be ({ncat_men}, {ncat_women}")
sys.exit(1)
ephi2 = npexp(Phi / 2.0)
ephi2T = ephi2.T
#############################################################################
# we solve the equilibrium equations muxy = ephi2 * tx * ty
# starting with a reasonable initial point for tx and ty: : tx = ty = bigc
# it is important that it fit the number of individuals
#############################################################################
bigc = sqrt(n_couples / np.sum(ephi2))
txi = np.full(ncat_men, bigc)
tyi = np.full(ncat_women, bigc)
err_diff = bigc
tol_diff = tol * err_diff
niter = 0
while (err_diff > tol_diff) and (niter < maxiter):
sx = ephi2 @ tyi
tx = men_margins / sx
sy = ephi2T @ tx
ty = women_margins / sy
err_x = npmaxabs(tx - txi)
err_y = npmaxabs(ty - tyi)
err_diff = err_x + err_y
txi, tyi = tx, ty
niter += 1
muxy = ephi2 * np.outer(txi, tyi)
marg_err_x = np.sum(muxy, 1) - men_margins
marg_err_y = np.sum(muxy, 0) - women_margins
if verbose:
print(f"After {niter} iterations:")
print(f"\tMargin error on x: {npmaxabs(marg_err_x)}")
print(f"\tMargin error on y: {npmaxabs(marg_err_y)}")
if not gr:
return muxy, marg_err_x, marg_err_y
else:
sxi = ephi2 @ tyi
syi = ephi2T @ txi
n_sum_categories = ncat_men + ncat_women
n_prod_categories = ncat_men * ncat_women
# start with the LHS of the linear system
lhs = np.zeros((n_sum_categories, n_sum_categories))
lhs[:ncat_men, :ncat_men] = np.diag(sxi)
lhs[:ncat_men, ncat_men:] = ephi2 * txi.reshape((-1, 1))
lhs[ncat_men:, ncat_men:] = np.diag(syi)
lhs[ncat_men:, :ncat_men] = ephi2T * tyi.reshape((-1, 1))
# now fill the RHS
n_cols_rhs = n_prod_categories
rhs = np.zeros((n_sum_categories, n_cols_rhs))
# to compute derivatives of (txi, tyi) wrt Phi
der_ephi2 = der_npexp(Phi / 2.0) / \
(2.0 * ephi2) # 1/2 with safeguards
ivar = 0
for iman in range(ncat_men):
rhs[iman, ivar:(ivar + ncat_women)] = - \
muxy[iman, :] * der_ephi2[iman, :]
ivar += ncat_women
ivar1 = ncat_men
ivar2 = 0
for iwoman in range(ncat_women):
rhs[ivar1, ivar2:n_cols_rhs:ncat_women] = - \
muxy[:, iwoman] * der_ephi2[:, iwoman]
ivar1 += 1
ivar2 += 1
# solve for the derivatives of txi and tyi
dt_dT = spla.solve(lhs, rhs)
dt = dt_dT[:ncat_men, :]
dT = dt_dT[ncat_men:, :]
# now construct the derivatives of muxy
dmuxy = np.zeros((n_prod_categories, n_cols_rhs))
ivar = 0
for iman in range(ncat_men):
dt_man = dt[iman, :]
dmuxy[ivar:(ivar + ncat_women),
:] = np.outer((ephi2[iman, :] * tyi), dt_man)
ivar += ncat_women
for iwoman in range(ncat_women):
dT_woman = dT[iwoman, :]
dmuxy[iwoman:n_prod_categories:ncat_women,
:] += np.outer((ephi2[:, iwoman] * txi), dT_woman)
# add the term that comes from differentiating ephi2
muxy_vec2 = (muxy * der_ephi2).reshape(n_prod_categories)
dmuxy += np.diag(muxy_vec2)
return muxy, marg_err_x, marg_err_y, dmuxy
def ipfp_homo_solver(Phi, men_margins, women_margins, tol=1e-9,
gr=False, verbose=False, maxiter=1000):
"""
solve for equilibrium in a Choo and Siow market
given systematic surplus and margins
:param np.array Phi: matrix of systematic surplus, shape (ncat_men, ncat_women)
:param np.array men_margins: vector of men margins, shape (ncat_men)
:param np.array women_margins: vector of women margins, shape (ncat_women)
:param float tol: tolerance on change in solution
:param boolean gr: if True, also evaluate derivatives of muxy wrt Phi
:param boolean verbose: prints stuff
:param int maxiter: maximum number of iterations
:return:
* (muxy, mux0, mu0y) the matching patterns
* marg_err_x, marg_err_y the errors on the margins
* and the gradients of (muxy, mux0, mu0y) wrt (men_margins, women_margins, Phi) if gr=True
"""
ncat_men = men_margins.size
ncat_women = women_margins.size
if Phi.shape != (ncat_men, ncat_women):
print_stars(
f"ipfp_homo_solver: the shape of Phi should be ({ncat_men}, {ncat_women}")
sys.exit(1)
ephi2 = npexp(Phi / 2.0)
#############################################################################
# we solve the equilibrium equations muxy = ephi2 * tx * ty
# where mux0=tx**2 and mu0y=ty**2
# starting with a reasonable initial point for tx and ty: tx = ty = bigc
# it is important that it fit the number of individuals
#############################################################################
ephi2T = ephi2.T
nindivs = np.sum(men_margins) + np.sum(women_margins)
bigc = sqrt(nindivs / (ncat_men + ncat_women + 2.0 * np.sum(ephi2)))
txi = np.full(ncat_men, bigc)
tyi = np.full(ncat_women, bigc)
err_diff = bigc
tol_diff = tol * bigc
niter = 0
while (err_diff > tol_diff) and (niter < maxiter):
sx = ephi2 @ tyi
tx = (np.sqrt(sx * sx + 4.0 * men_margins) - sx) / 2.0
sy = ephi2T @ tx
ty = (np.sqrt(sy * sy + 4.0 * women_margins) - sy) / 2.0
err_x = npmaxabs(tx - txi)
err_y = npmaxabs(ty - tyi)
err_diff = err_x + err_y
txi = tx
tyi = ty
niter += 1
mux0 = txi * txi
mu0y = tyi * tyi
muxy = ephi2 * np.outer(txi, tyi)
marg_err_x = mux0 + np.sum(muxy, 1) - men_margins
marg_err_y = mu0y + np.sum(muxy, 0) - women_margins
if verbose:
print(f"After {niter} iterations:")
print(f"\tMargin error on x: {npmaxabs(marg_err_x)}")
print(f"\tMargin error on y: {npmaxabs(marg_err_y)}")
if not gr:
return (muxy, mux0, mu0y), marg_err_x, marg_err_y
else: # we compute the derivatives
sxi = ephi2 @ tyi
syi = ephi2T @ txi
n_sum_categories = ncat_men + ncat_women
n_prod_categories = ncat_men * ncat_women
# start with the LHS of the linear system
lhs = np.zeros((n_sum_categories, n_sum_categories))
lhs[:ncat_men, :ncat_men] = np.diag(2.0 * txi + sxi)
lhs[:ncat_men, ncat_men:] = ephi2 * txi.reshape((-1, 1))
lhs[ncat_men:, ncat_men:] = np.diag(2.0 * tyi + syi)
lhs[ncat_men:, :ncat_men] = ephi2T * tyi.reshape((-1, 1))
# now fill the RHS
n_cols_rhs = n_sum_categories + n_prod_categories
rhs = np.zeros((n_sum_categories, n_cols_rhs))
# to compute derivatives of (txi, tyi) wrt men_margins
rhs[:ncat_men, :ncat_men] = np.eye(ncat_men)
# to compute derivatives of (txi, tyi) wrt women_margins
rhs[ncat_men:n_sum_categories,
ncat_men:n_sum_categories] = | np.eye(ncat_women) | numpy.eye |
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
from collections import OrderedDict
from ..utils.ml.models.ratio import DenseDoublyParameterizedRatioModel
from ..utils.ml.eval import evaluate_ratio_model
from ..utils.ml.utils import get_optimizer, get_loss
from ..utils.various import load_and_check, shuffle, restrict_samplesize
from ..utils.ml.trainer import DoubleParameterizedRatioTrainer
from .base import ConditionalEstimator, TheresAGoodReasonThisDoesntWork
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
logger = logging.getLogger(__name__)
class DoubleParameterizedRatioEstimator(ConditionalEstimator):
"""
A neural estimator of the likelihood ratio as a function of the observation x, the numerator hypothesis theta0, and
the denominator hypothesis theta1.
Parameters
----------
features : list of int or None, optional
Indices of observables (features) that are used as input to the neural networks. If None, all observables
are used. Default value: None.
n_hidden : tuple of int, optional
Units in each hidden layer in the neural networks. If method is 'nde' or 'scandal', this refers to the
setup of each individual MADE layer. Default value: (100,).
activation : {'tanh', 'sigmoid', 'relu'}, optional
Activation function. Default value: 'tanh'.
"""
def train(
self,
method,
x,
y,
theta0,
theta1,
r_xz=None,
t_xz0=None,
t_xz1=None,
x_val=None,
y_val=None,
theta0_val=None,
theta1_val=None,
r_xz_val=None,
t_xz0_val=None,
t_xz1_val=None,
alpha=1.0,
optimizer="amsgrad",
n_epochs=50,
batch_size=128,
initial_lr=0.001,
final_lr=0.0001,
nesterov_momentum=None,
validation_split=0.25,
early_stopping=True,
scale_inputs=True,
shuffle_labels=False,
limit_samplesize=None,
memmap=False,
verbose="some",
scale_parameters=True,
n_workers=8,
clip_gradient=None,
early_stopping_patience=None,
):
"""
Trains the network.
Parameters
----------
method : str
The inference method used for training. Allowed values are 'alice', 'alices', 'carl', 'cascal', 'rascal',
and 'rolr'.
x : ndarray or str
Observations, or filename of a pickled numpy array.
y : ndarray or str
Class labels (0 = numeerator, 1 = denominator), or filename of a pickled numpy array.
theta0 : ndarray or str
Numerator parameter point, or filename of a pickled numpy array.
theta1 : ndarray or str
Denominator parameter point, or filename of a pickled numpy array.
r_xz : ndarray or str or None, optional
Joint likelihood ratio, or filename of a pickled numpy array. Default value: None.
t_xz0 : ndarray or str or None, optional
Joint scores at theta0, or filename of a pickled numpy array. Default value: None.
t_xz1 : ndarray or str or None, optional
Joint scores at theta1, or filename of a pickled numpy array. Default value: None.
x_val : ndarray or str or None, optional
Validation observations, or filename of a pickled numpy array. If None
and validation_split > 0, validation data will be randomly selected from the training data.
Default value: None.
y_val : ndarray or str or None, optional
Validation labels (0 = numerator, 1 = denominator), or filename of a pickled numpy array. If None
and validation_split > 0, validation data will be randomly selected from the training data.
Default value: None.
theta0_val : ndarray or str or None, optional
Validation numerator parameter points, or filename of a pickled numpy array. If None
and validation_split > 0, validation data will be randomly selected from the training data.
Default value: None.
theta1_val : ndarray or str or None, optional
Validation denominator parameter points, or filename of a pickled numpy array. If None
and validation_split > 0, validation data will be randomly selected from the training data.
Default value: None.
r_xz_val : ndarray or str or None, optional
Validation joint likelihood ratio, or filename of a pickled numpy array. If None
and validation_split > 0, validation data will be randomly selected from the training data.
Default value: None.
t_xz0_val : ndarray or str or None, optional
Validation joint scores at theta0, or filename of a pickled numpy array. If None
and validation_split > 0, validation data will be randomly selected from the training data.
Default value: None.
t_xz1_val : ndarray or str or None, optional
Validation joint scores at theta1, or filename of a pickled numpy array. If None
and validation_split > 0, validation data will be randomly selected from the training data.
Default value: None.
alpha : float, optional
Hyperparameter weighting the score error in the loss function of the 'alices', 'rascal', and 'cascal'
methods. Default value: 1.
optimizer : {"adam", "amsgrad", "sgd"}, optional
Optimization algorithm. Default value: "amsgrad".
n_epochs : int, optional
Number of epochs. Default value: 50.
batch_size : int, optional
Batch size. Default value: 128.
initial_lr : float, optional
Learning rate during the first epoch, after which it exponentially decays to final_lr. Default value:
0.001.
final_lr : float, optional
Learning rate during the last epoch. Default value: 0.0001.
nesterov_momentum : float or None, optional
If trainer is "sgd", sets the Nesterov momentum. Default value: None.
validation_split : float or None, optional
Fraction of samples used for validation and early stopping (if early_stopping is True). If None, the entire
sample is used for training and early stopping is deactivated. Default value: 0.25.
early_stopping : bool, optional
Activates early stopping based on the validation loss (only if validation_split is not None). Default value:
True.
scale_inputs : bool, optional
Scale the observables to zero mean and unit variance. Default value: True.
shuffle_labels : bool, optional
If True, the labels (`y`, `r_xz`, `t_xz`) are shuffled, while the observations (`x`) remain in their
normal order. This serves as a closure test, in particular as cross-check against overfitting: an estimator
trained with shuffle_labels=True should predict to likelihood ratios around 1 and scores around 0.
limit_samplesize : int or None, optional
If not None, only this number of samples (events) is used to train the estimator. Default value: None.
memmap : bool, optional.
If True, training files larger than 1 GB will not be loaded into memory at once. Default value: False.
verbose : {"all", "many", "some", "few", "none}, optional
Determines verbosity of training. Default value: "some".
Returns
-------
None
"""
logger.info("Starting training")
logger.info(" Method: %s", method)
if method in ["cascal", "rascal", "alices"]:
logger.info(" alpha: %s", alpha)
logger.info(" Batch size: %s", batch_size)
logger.info(" Optimizer: %s", optimizer)
logger.info(" Epochs: %s", n_epochs)
logger.info(" Learning rate: %s initially, decaying to %s", initial_lr, final_lr)
if optimizer == "sgd":
logger.info(" Nesterov momentum: %s", nesterov_momentum)
logger.info(" Validation split: %s", validation_split)
logger.info(" Early stopping: %s", early_stopping)
logger.info(" Scale inputs: %s", scale_inputs)
logger.info(" Shuffle labels %s", shuffle_labels)
if limit_samplesize is None:
logger.info(" Samples: all")
else:
logger.info(" Samples: %s", limit_samplesize)
# Load training data
logger.info("Loading training data")
memmap_threshold = 1.0 if memmap else None
theta0 = load_and_check(theta0, memmap_files_larger_than_gb=memmap_threshold)
theta1 = load_and_check(theta1, memmap_files_larger_than_gb=memmap_threshold)
x = load_and_check(x, memmap_files_larger_than_gb=memmap_threshold)
y = load_and_check(y, memmap_files_larger_than_gb=memmap_threshold)
r_xz = load_and_check(r_xz, memmap_files_larger_than_gb=memmap_threshold)
t_xz0 = load_and_check(t_xz0, memmap_files_larger_than_gb=memmap_threshold)
t_xz1 = load_and_check(t_xz1, memmap_files_larger_than_gb=memmap_threshold)
self._check_required_data(method, r_xz, t_xz0, t_xz1)
# Infer dimensions of problem
n_samples = x.shape[0]
n_observables = x.shape[1]
n_parameters = theta0.shape[1]
logger.info("Found %s samples with %s parameters and %s observables", n_samples, n_parameters, n_observables)
# Limit sample size
if limit_samplesize is not None and limit_samplesize < n_samples:
logger.info("Only using %s of %s training samples", limit_samplesize, n_samples)
x, theta0, theta1, y, r_xz, t_xz0, t_xz1 = restrict_samplesize(
limit_samplesize, x, theta0, theta1, y, r_xz, t_xz0, t_xz1
)
# Validation data
external_validation = (
x_val is not None and y_val is not None and theta0_val is not None and theta1_val is not None
)
if external_validation:
theta0_val = load_and_check(theta0_val, memmap_files_larger_than_gb=memmap_threshold)
theta1_val = load_and_check(theta1_val, memmap_files_larger_than_gb=memmap_threshold)
x_val = load_and_check(x_val, memmap_files_larger_than_gb=memmap_threshold)
y_val = load_and_check(y_val, memmap_files_larger_than_gb=memmap_threshold)
r_xz_val = load_and_check(r_xz_val, memmap_files_larger_than_gb=memmap_threshold)
t_xz0_val = load_and_check(t_xz0_val, memmap_files_larger_than_gb=memmap_threshold)
t_xz1_val = load_and_check(t_xz1_val, memmap_files_larger_than_gb=memmap_threshold)
logger.info("Found %s separate validation samples", x_val.shape[0])
assert x_val.shape[1] == n_observables
assert theta0_val.shape[1] == n_parameters
assert theta1_val.shape[1] == n_parameters
if r_xz is not None:
assert r_xz_val is not None, "When providing r_xz and sep. validation data, also provide r_xz_val"
if t_xz0 is not None:
assert t_xz0_val is not None, "When providing t_xz0 and sep. validation data, also provide t_xz0_val"
if t_xz1 is not None:
assert t_xz1_val is not None, "When providing t_xz1 and sep. validation data, also provide t_xz1_val"
# Scale features
if scale_inputs:
self.initialize_input_transform(x, overwrite=False)
x = self._transform_inputs(x)
if external_validation:
x_val = self._transform_inputs(x_val)
else:
self.initialize_input_transform(x, False, overwrite=False)
# Scale parameters
if scale_parameters:
logger.info("Rescaling parameters")
self.initialize_parameter_transform(np.concatenate((theta0, theta1), 0))
theta0 = self._transform_parameters(theta0)
theta1 = self._transform_parameters(theta1)
t_xz0 = self._transform_score(t_xz0, inverse=False)
t_xz1 = self._transform_score(t_xz1, inverse=False)
if external_validation:
t_xz0_val = self._transform_score(t_xz0_val, inverse=False)
t_xz1_val = self._transform_score(t_xz1_val, inverse=False)
else:
self.initialize_parameter_transform(np.concatenate((theta0, theta1), 0), False)
# Shuffle labels
if shuffle_labels:
logger.info("Shuffling labels")
y, r_xz, t_xz0, t_xz1 = shuffle(y, r_xz, t_xz0, t_xz1)
# Features
if self.features is not None:
x = x[:, self.features]
logger.info("Only using %s of %s observables", x.shape[1], n_observables)
n_observables = x.shape[1]
if external_validation:
x_val = x_val[:, self.features]
# Check consistency of input with model
if self.n_observables is None:
self.n_observables = n_observables
if self.n_parameters is None:
self.n_parameters = n_parameters
if n_parameters != self.n_parameters:
raise RuntimeError(
"Number of parameters does not match model: {} vs {}".format(n_parameters, self.n_parameters)
)
if n_observables != self.n_observables:
raise RuntimeError(
"Number of observables does not match model: {} vs {}".format(n_observables, self.n_observables)
)
# Data
data = self._package_training_data(method, x, theta0, theta1, y, r_xz, t_xz0, t_xz1)
if external_validation:
data_val = self._package_training_data(
method, x_val, theta0_val, theta1_val, y_val, r_xz_val, t_xz0_val, t_xz1_val
)
else:
data_val = None
# Create model
if self.model is None:
logger.info("Creating model")
self._create_model()
# Losses
loss_functions, loss_labels, loss_weights = get_loss(method + "2", alpha)
# Optimizer
opt, opt_kwargs = get_optimizer(optimizer, nesterov_momentum)
# Train model
logger.info("Training model")
trainer = DoubleParameterizedRatioTrainer(self.model, n_workers=n_workers)
result = trainer.train(
data=data,
data_val=data_val,
loss_functions=loss_functions,
loss_weights=loss_weights,
loss_labels=loss_labels,
epochs=n_epochs,
batch_size=batch_size,
optimizer=opt,
optimizer_kwargs=opt_kwargs,
initial_lr=initial_lr,
final_lr=final_lr,
validation_split=validation_split,
early_stopping=early_stopping,
verbose=verbose,
clip_gradient=clip_gradient,
early_stopping_patience=early_stopping_patience,
)
return result
def evaluate_log_likelihood_ratio(self, x, theta0, theta1, test_all_combinations=True, evaluate_score=False):
"""
Evaluates the log likelihood ratio as a function of the observation x, the numerator hypothesis theta0, and
the denominator hypothesis theta1.
Parameters
----------
x : str or ndarray
Observations or filename of a pickled numpy array.
theta0 : ndarray or str
Numerator parameter points or filename of a pickled numpy array.
theta1 : ndarray or str
Denominator parameter points or filename of a pickled numpy array.
test_all_combinations : bool, optional
If False, the number of samples in the observable and theta
files has to match, and the likelihood ratio is evaluated only for the combinations
`r(x_i | theta0_i, theta1_i)`. If True, `r(x_i | theta0_j, theta1_j)` for all pairwise combinations `i, j`
are evaluated. Default value: True.
evaluate_score : bool, optional
Sets whether in addition to the likelihood ratio the score is evaluated. Default value: False.
Returns
-------
log_likelihood_ratio : ndarray
The estimated log likelihood ratio. If test_all_combinations is True, the result has shape
`(n_thetas, n_x)`. Otherwise, it has shape `(n_samples,)`.
score0 : ndarray or None
None if evaluate_score is False. Otherwise the derived estimated score at `theta0`. If test_all_combinations
is True, the result has shape `(n_thetas, n_x, n_parameters)`. Otherwise, it has shape
`(n_samples, n_parameters)`.
score1 : ndarray or None
None if evaluate_score is False. Otherwise the derived estimated score at `theta1`. If test_all_combinations
is True, the result has shape `(n_thetas, n_x, n_parameters)`. Otherwise, it has shape
`(n_samples, n_parameters)`.
"""
if self.model is None:
raise ValueError("No model -- train or load model before evaluating it!")
# Load training data
logger.debug("Loading evaluation data")
x = load_and_check(x)
theta0 = load_and_check(theta0)
theta1 = load_and_check(theta1)
# Scale observables
x = self._transform_inputs(x)
# Restrict features
if self.features is not None:
x = x[:, self.features]
# Balance thetas
if len(theta1) > len(theta0):
theta0 = [theta0[i % len(theta0)] for i in range(len(theta1))]
elif len(theta1) < len(theta0):
theta1 = [theta1[i % len(theta1)] for i in range(len(theta0))]
all_log_r_hat = []
all_t_hat0 = []
all_t_hat1 = []
if test_all_combinations:
logger.debug("Starting ratio evaluation for %s x-theta combinations", len(theta0) * len(x))
for i, (this_theta0, this_theta1) in enumerate(zip(theta0, theta1)):
logger.debug(
"Starting ratio evaluation for thetas %s / %s: %s vs %s",
i + 1,
len(theta0),
this_theta0,
this_theta1,
)
_, log_r_hat, t_hat0, t_hat1 = evaluate_ratio_model(
model=self.model,
method_type="double_parameterized_ratio",
theta0s=[this_theta0],
theta1s=[this_theta1],
xs=x,
evaluate_score=evaluate_score,
)
all_log_r_hat.append(log_r_hat)
all_t_hat0.append(t_hat0)
all_t_hat1.append(t_hat1)
all_log_r_hat = np.array(all_log_r_hat)
all_t_hat0 = | np.array(all_t_hat0) | numpy.array |
from typing import Tuple
import os
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import Normalize
from torchvision import datasets
mnist_mean, mnist_stddev = 0.1307, 0.3081
def get_mnist_data() -> Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:
print("[+] Fetching data...")
path = os.path.join(os.path.dirname(
os.path.abspath(__file__)), "../data")
# 60000 x 28 x 28
train_loader = datasets.MNIST(path, train=True, download=True)
train_size = train_loader.train_labels.size(0)
# 10000 x 28 x 28
test_loader = datasets.MNIST(path, train=False, download=True)
test_size = test_loader.test_labels.size(0)
train_data = np.zeros(train_loader.train_data.shape)
train_labels = np.zeros(train_loader.train_labels.shape)
test_data = | np.zeros(test_loader.test_data.shape) | numpy.zeros |
"""
This module is an example of a barebones function plugin for napari
It implements the ``napari_experimental_provide_function`` hook specification.
see: https://napari.org/docs/dev/plugins/hook_specifications.html
Replace code below according to your needs.
"""
from __future__ import print_function, division
from typing import TYPE_CHECKING, DefaultDict
from unicodedata import name
import six
# import modules
import sys # input, output, errors, and files
import os # interacting with file systems
import time # getting time
import datetime
import inspect # get passed parameters
import yaml # parameter importing
import json # for importing tiff metadata
try:
import cPickle as pickle # loading and saving python objects
except:
import pickle
import numpy as np # numbers package
import struct # for interpretting strings as binary data
import re # regular expressions
from pprint import pprint # for human readable file output
import traceback # for error messaging
import warnings # error messaging
import copy # not sure this is needed
import h5py # working with HDF5 files
import pandas as pd
import networkx as nx
import collections
# scipy and image analysis
from scipy.signal import find_peaks_cwt # used in channel finding
from scipy.optimize import curve_fit # fitting ring profile
from scipy.optimize import leastsq # fitting 2d gaussian
from scipy import ndimage as ndi # labeling and distance transform
from skimage import io
from skimage import segmentation # used in make_masks and segmentation
from skimage.transform import rotate
from skimage.feature import match_template # used to align images
from skimage.feature import blob_log # used for foci finding
from skimage.filters import threshold_otsu, median # segmentation
from skimage import filters
from skimage import morphology # many functions is segmentation used from this
from skimage.measure import regionprops # used for creating lineages
from skimage.measure import profile_line # used for ring an nucleoid analysis
from skimage import util, measure, transform, feature
import tifffile as tiff
from sklearn import metrics
# deep learning
import tensorflow as tf # ignore message about how tf was compiled
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import models
from tensorflow.keras import losses
from tensorflow.keras import utils
from tensorflow.keras import backend as K
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # supress warnings
# Parralelization modules
import multiprocessing
from multiprocessing import Pool
# Plotting for debug
import matplotlib as mpl
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 12}
mpl.rc('font', **font)
mpl.rcParams['pdf.fonttype'] = 42
from matplotlib.patches import Ellipse
from pathlib import Path
import time
import matplotlib.pyplot as plt
# import modules
import os
import glob
import re
import numpy as np
import tifffile as tiff
import pims_nd2
from skimage import io, measure, morphology
import tifffile as tiff
from scipy import stats
from pprint import pprint # for human readable file output
import multiprocessing
from multiprocessing import Pool
import numpy as np
import warnings
from tensorflow.python.keras import models
from enum import Enum
import numpy as np
import multiprocessing
from multiprocessing import Pool
import os
from napari_plugin_engine import napari_hook_implementation
from skimage.filters import threshold_otsu # segmentation
from skimage import morphology # many functions is segmentation used from this
from skimage import segmentation # used in make_masks and segmentation
from scipy import ndimage as ndi # labeling and distance transform
import matplotlib.gridspec as gridspec
from skimage.exposure import rescale_intensity # for displaying in GUI
from skimage import io, morphology, segmentation
# import mm3_helpers as mm3
import napari
# This is the actual plugin function, where we export our function
# (The functions themselves are defined below)
@napari_hook_implementation
def napari_experimental_provide_function():
# we can return a single function
# or a tuple of (function, magicgui_options)
# or a list of multiple functions with or without options, as shown here:
#return [Segment, threshold, image_arithmetic]
return [Compile, ChannelPicker, Segment]
# 1. First example, a simple function that thresholds an image and creates a labels layer
def threshold(data: "napari.types.ImageData", threshold: int) -> "napari.types.LabelsData":
"""Threshold an image and return a mask."""
return (data > threshold).astype(int)
# print a warning
def warning(*objs):
print(time.strftime("%H:%M:%S WARNING:", time.localtime()), *objs, file=sys.stderr)
# print information
def information(*objs):
print(time.strftime("%H:%M:%S", time.localtime()), *objs, file=sys.stdout)
def julian_day_number():
"""
Need this to solve a bug in pims_nd2.nd2reader.ND2_Reader instance initialization.
The bug is in /usr/local/lib/python2.7/site-packages/pims_nd2/ND2SDK.py in function `jdn_to_datetime_local`, when the year number in the metadata (self._lim_metadata_desc) is not in the correct range. This causes a problem when calling self.metadata.
https://en.wikipedia.org/wiki/Julian_day
"""
dt=datetime.datetime.now()
tt=dt.timetuple()
jdn=(1461.*(tt.tm_year + 4800. + (tt.tm_mon - 14.)/12))/4. + (367.*(tt.tm_mon - 2. - 12.*((tt.tm_mon -14.)/12)))/12. - (3.*((tt.tm_year + 4900. + (tt.tm_mon - 14.)/12.)/100.))/4. + tt.tm_mday - 32075
return jdn
def get_plane(filepath):
pattern = r'(c\d+).tif'
res = re.search(pattern,filepath)
if (res != None):
return res.group(1)
else:
return None
def get_fov(filepath):
pattern = r'xy(\d+)\w*.tif'
res = re.search(pattern,filepath)
if (res != None):
return int(res.group(1))
else:
return None
def get_time(filepath):
pattern = r't(\d+)xy\w+.tif'
res = re.search(pattern,filepath)
if (res != None):
return np.int_(res.group(1))
else:
return None
# loads and image stack from TIFF or HDF5 using mm3 conventions
def load_stack(fov_id, peak_id, color='c1', image_return_number=None):
'''
Loads an image stack.
Supports reading TIFF stacks or HDF5 files.
Parameters
----------
fov_id : int
The FOV id
peak_id : int
The peak (channel) id. Dummy None value incase color='empty'
color : str
The image stack type to return. Can be:
c1 : phase stack
cN : where n is an integer for arbitrary color channel
sub : subtracted images
seg : segmented images
empty : get the empty channel for this fov, slightly different
Returns
-------
image_stack : np.ndarray
The image stack through time. Shape is (t, y, x)
'''
# things are slightly different for empty channels
if 'empty' in color:
if params['output'] == 'TIFF':
img_filename = params['experiment_name'] + '_xy%03d_%s.tif' % (fov_id, color)
with tiff.TiffFile(os.path.join(params['empty_dir'],img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r') as h5f:
img_stack = h5f[color][:]
return img_stack
# load normal images for either TIFF or HDF5
if params['output'] == 'TIFF':
if color[0] == 'c':
img_dir = params['chnl_dir']
elif 'sub' in color:
img_dir = params['sub_dir']
elif 'foci' in color:
img_dir = params['foci_seg_dir']
elif 'seg' in color:
img_dir = params['seg_dir']
img_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, color)
with tiff.TiffFile(os.path.join(img_dir, img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'], 'xy%03d.hdf5' % fov_id), 'r') as h5f:
# normal naming
# need to use [:] to get a copy, else it references the closed hdf5 dataset
img_stack = h5f['channel_%04d/p%04d_%s' % (peak_id, peak_id, color)][:]
return img_stack
# load the time table and add it to the global params
def load_time_table():
'''Add the time table dictionary to the params global dictionary.
This is so it can be used during Cell creation.
'''
# try first for yaml, then for pkl
try:
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'rb') as time_table_file:
params['time_table'] = yaml.safe_load(time_table_file)
except:
with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'rb') as time_table_file:
params['time_table'] = pickle.load(time_table_file)
return
# function for loading the channel masks
def load_channel_masks():
'''Load channel masks dictionary. Should be .yaml but try pickle too.
'''
information("Loading channel masks dictionary.")
# try loading from .yaml before .pkl
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.yaml'))
with open(os.path.join(params['ana_dir'], 'channel_masks.yaml'), 'r') as cmask_file:
channel_masks = yaml.safe_load(cmask_file)
except:
warning('Could not load channel masks dictionary from .yaml.')
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.pkl'))
with open(os.path.join(params['ana_dir'], 'channel_masks.pkl'), 'rb') as cmask_file:
channel_masks = pickle.load(cmask_file)
except ValueError:
warning('Could not load channel masks dictionary from .pkl.')
return channel_masks
# function for loading the specs file
def load_specs():
'''Load specs file which indicates which channels should be analyzed, used as empties, or ignored.'''
try:
with open(os.path.join(params['ana_dir'], 'specs.yaml'), 'r') as specs_file:
specs = yaml.safe_load(specs_file)
except:
try:
with open(os.path.join(params['ana_dir'], 'specs.pkl'), 'rb') as specs_file:
specs = pickle.load(specs_file)
except ValueError:
warning('Could not load specs file.')
return specs
### functions for dealing with raw TIFF images
# get params is the major function which processes raw TIFF images
def get_initial_tif_params(image_filename):
'''This is a function for getting the information
out of an image for later trap identification, cropping, and aligning with Unet. It loads a tiff file and pulls out the image metadata.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
#print(image_data.shape) # uncomment for debug
#if len(image_data.shape) == 2:
# img_shape = [image_data.shape[0],image_data.shape[1]]
#else:
img_shape = [image_data.shape[1],image_data.shape[2]]
plane_list = [str(i+1) for i in range(image_data.shape[0])]
#print(plane_list) # uncomment for debug
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : plane_list, # list of plane names
'shape' : img_shape} # image shape x y in pixels
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# get params is the major function which processes raw TIFF images
def get_tif_params(image_filename, find_channels=True):
'''This is a damn important function for getting the information
out of an image. It loads a tiff file, pulls out the image data, and the metadata,
including the location of the channels if flagged.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
'channels': cp_dict, # dictionary of channel locations, in the case of Unet-based channel segmentation, it's a dictionary of channel labels
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
# look for channels if flagged
if find_channels:
# fix the image orientation and get the number of planes
image_data = fix_orientation(image_data)
# if the image data has more than 1 plane restrict image_data to phase,
# which should have highest mean pixel data
if len(image_data.shape) > 2:
#ph_index = np.argmax([np.mean(image_data[ci]) for ci in range(image_data.shape[0])])
ph_index = int(params['phase_plane'][1:]) - 1
image_data = image_data[ph_index]
# get shape of single plane
img_shape = [image_data.shape[0], image_data.shape[1]]
# find channels on the processed image
chnl_loc_dict = find_channel_locs(image_data)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : image_metadata['planes'], # list of plane names
'shape' : img_shape, # image shape x y in pixels
# 'channels' : {1 : {'A' : 1, 'B' : 2}, 2 : {'C' : 3, 'D' : 4}}}
'channels' : chnl_loc_dict} # dictionary of channel locations
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# finds metdata in a tiff image which has been expoted with Nikon Elements.
def get_tif_metadata_elements(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by Nikon Elements as a stacked tiff, each for one tpoint.
tif is an opened tif file (using the package tifffile)
arguments:
fname (tifffile.TiffFile): TIFF file object from which data will be extracted
returns:
dictionary of values:
'jdn' (float)
'x' (float)
'y' (float)
'plane_names' (list of strings)
Called by
mm3.Compile
'''
# image Metadata
idata = { 'fov': -1,
't' : -1,
'jd': -1 * 0.0,
'x': -1 * 0.0,
'y': -1 * 0.0,
'planes': []}
# get the fov and t simply from the file name
idata['fov'] = int(tif.fname.split('xy')[1].split('.tif')[0])
idata['t'] = int(tif.fname.split('xy')[0].split('t')[-1])
# a page is plane, or stack, in the tiff. The other metdata is hidden down in there.
for page in tif:
for tag in page.tags.values():
#print("Checking tag",tag.name,tag.value)
t = tag.name, tag.value
t_string = u""
time_string = u""
# Interesting tag names: 65330, 65331 (binary data; good stuff), 65332
# we wnat to work with the tag of the name 65331
# if the tag name is not in the set of tegs we find interesting then skip this cycle of the loop
if tag.name not in ('65331', '65332', 'strip_byte_counts', 'image_width', 'orientation', 'compression', 'new_subfile_type', 'fill_order', 'max_sample_value', 'bits_per_sample', '65328', '65333'):
#print("*** " + tag.name)
#print(tag.value)
pass
#if tag.name == '65330':
# return tag.value
if tag.name in ('65331'):
# make info list a list of the tag values 0 to 65535 by zipoing up a paired list of two bytes, at two byte intervals i.e. fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b
# note that 0X100 is hex for 256
infolist = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
# get char values for each element in infolist
for c_entry in range(0, len(infolist)):
# the element corresponds to an ascii char for a letter or bracket (and a few other things)
if infolist[c_entry] < 127 and infolist[c_entry] > 64:
# add the letter to the unicode string t_string
t_string += chr(infolist[c_entry])
#elif infolist[c_entry] == 0:
# continue
else:
t_string += " "
# this block will find the dTimeAbsolute and print the subsequent integers
# index 170 is counting seconds, and rollover of index 170 leads to increment of index 171
# rollover of index 171 leads to increment of index 172
# get the position of the array by finding the index of the t_string at which dTimeAbsolute is listed not that 2*len(dTimeAbsolute)=26
#print(t_string)
arraypos = t_string.index("dXPos") * 2 + 16
xarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in xarr)
idata['x'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dYPos") * 2 + 16
yarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in yarr)
idata['y'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dTimeAbsolute") * 2 + 26
shortarray = tag.value[arraypos+2:arraypos+10]
b = ''.join(chr(i) for i in shortarray)
idata['jd'] = float(struct.unpack('<d', b)[0])
# extract plane names
il = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
li = [a+b*0x100 for a,b in zip(tag.value[1::2], tag.value[2::2])]
strings = list(zip(il, li))
allchars = ""
for c_entry in range(0, len(strings)):
if 31 < strings[c_entry][0] < 127:
allchars += chr(strings[c_entry][0])
elif 31 < strings[c_entry][1] < 127:
allchars += chr(strings[c_entry][1])
else:
allchars += " "
allchars = re.sub(' +',' ', allchars)
words = allchars.split(" ")
planes = []
for idx in [i for i, x in enumerate(words) if x == "sOpticalConfigName"]:
planes.append(words[idx+1])
idata['planes'] = planes
return idata
# finds metdata in a tiff image which has been expoted with nd2ToTIFF.py.
def get_tif_metadata_nd2ToTIFF(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by the mm3 function mm3_nd2ToTIFF.py. All the metdata
is found in that script and saved in json format to the tiff, so it is simply extracted here
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
'planes' (list of strings)
Called by
mm3_Compile.get_tif_params
'''
# get the first page of the tiff and pull out image description
# this dictionary should be in the above form
for tag in tif.pages[0].tags:
if tag.name=="ImageDescription":
idata=tag.value
break
#print(idata)
idata = json.loads(idata)
return idata
# Finds metadata from the filename
def get_tif_metadata_filename(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This just gets the tiff metadata from the filename and is a backup option when the known format of the metadata is not known.
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
Called by
mm3_Compile.get_tif_params
'''
idata = {'fov' : get_fov(tif.filename), # fov id
't' : get_time(tif.filename), # time point
'jd' : -1 * 0.0, # absolute julian time
'x' : -1 * 0.0, # x position on stage [um]
'y' : -1 * 0.0} # y position on stage [um]
return idata
# make a lookup time table for converting nominal time to elapsed time in seconds
def make_time_table(analyzed_imgs):
'''
Loops through the analyzed images and uses the jd time in the metadata to find the elapsed
time in seconds that each picture was taken. This is later used for more accurate elongation
rate calculation.
Parametrs
---------
analyzed_imgs : dict
The output of get_tif_params.
params['use_jd'] : boolean
If set to True, 'jd' time will be used from the image metadata to use to create time table. Otherwise the 't' index will be used, and the parameter 'seconds_per_time_index' will be used from the parameters.yaml file to convert to seconds.
Returns
-------
time_table : dict
Look up dictionary with keys for the FOV and then the time point.
'''
information('Making time table...')
# initialize
time_table = {}
first_time = float('inf')
# need to go through the data once to find the first time
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
if idata['jd'] < first_time:
first_time = idata['jd']
else:
if idata['t'] < first_time:
first_time = idata['t']
# init dictionary for specific times per FOV
if idata['fov'] not in time_table:
time_table[idata['fov']] = {}
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
# convert jd time to elapsed time in seconds
t_in_seconds = np.around((idata['jd'] - first_time) * 24*60*60, decimals=0).astype('uint32')
else:
t_in_seconds = np.around((idata['t'] - first_time) * params['moviemaker']['seconds_per_time_index'], decimals=0).astype('uint32')
time_table[int(idata['fov'])][int(idata['t'])] = int(t_in_seconds)
# save to .pkl. This pkl will be loaded into the params
# with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'wb') as time_table_file:
# pickle.dump(time_table, time_table_file, protocol=pickle.HIGHEST_PROTOCOL)
# with open(os.path.join(params['ana_dir'], 'time_table.txt'), 'w') as time_table_file:
# pprint(time_table, stream=time_table_file)
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'w') as time_table_file:
yaml.dump(data=time_table, stream=time_table_file, default_flow_style=False, tags=None)
information('Time table saved.')
return time_table
# saves traps sliced via Unet
def save_tiffs(imgDict, analyzed_imgs, fov_id):
savePath = os.path.join(params['experiment_directory'],
params['analysis_directory'],
params['chnl_dir'])
img_names = [key for key in analyzed_imgs.keys()]
image_params = analyzed_imgs[img_names[0]]
for peak,img in six.iteritems(imgDict):
img = img.astype('uint16')
if not os.path.isdir(savePath):
os.mkdir(savePath)
for planeNumber in image_params['planes']:
channel_filename = os.path.join(savePath, params['experiment_name'] + '_xy{0:0=3}_p{1:0=4}_c{2}.tif'.format(fov_id, peak, planeNumber))
io.imsave(channel_filename, img[:,:,:,int(planeNumber)-1])
# slice_and_write cuts up the image files one at a time and writes them out to tiff stacks
def tiff_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images per channel.
Loads all tiffs from and FOV into memory and then slices all time points at once.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# go through list of images and get the file path
for n, image in enumerate(images_to_write):
# analyzed_imgs dictionary will be found in main scope. [0] is the key, [1] is jd
image_params = analyzed_imgs[image[0]]
information("Loading %s." % image_params['filepath'].split('/')[-1])
if n == 1:
# declare identification variables for saving using first image
fov_id = image_params['fov']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
# change axis so it goes Y, X, Plane
image_data = np.rollaxis(image_data, 0, 3)
# add it to list. The images should be in time order
image_fov_stack.append(image_data)
# concatenate the list into one big ass stack
image_fov_stack = np.stack(image_fov_stack, axis=0)
# cut out the channels as per channel masks for this fov
for peak, channel_loc in six.iteritems(channel_masks[fov_id]):
#information('Slicing and saving channel peak %s.' % channel_filename.split('/')[-1])
information('Slicing and saving channel peak %d.' % peak)
# channel masks should only contain ints, but you can use this for hard fix
# for i in range(len(channel_loc)):
# for j in range(len(channel_loc[i])):
# channel_loc[i][j] = int(channel_loc[i][j])
# slice out channel.
# The function should recognize the shape length as 4 and cut all time points
channel_stack = cut_slice(image_fov_stack, channel_loc)
# save a different time stack for all colors
for color_index in range(channel_stack.shape[3]):
# this is the filename for the channel
# # chnl_dir and p will be looked for in the scope above (__main__)
channel_filename = os.path.join(params['chnl_dir'], params['experiment_name'] + '_xy%03d_p%04d_c%1d.tif' % (fov_id, peak, color_index+1))
# save stack
tiff.imsave(channel_filename, channel_stack[:,:,:,color_index], compress=4)
return
# saves traps sliced via Unet to an hdf5 file
def save_hdf5(imgDict, img_names, analyzed_imgs, fov_id, channel_masks):
'''Writes out 4D stacks of images to an HDF5 file.
Called by
mm3_Compile.py
'''
savePath = params['hdf5_dir']
if not os.path.isdir(savePath):
os.mkdir(savePath)
img_times = [analyzed_imgs[key]['t'] for key in img_names]
img_jds = [analyzed_imgs[key]['jd'] for key in img_names]
fov_ids = [analyzed_imgs[key]['fov'] for key in img_names]
# get image_params from first image from current fov
image_params = analyzed_imgs[img_names[0]]
# establish some variables for hdf5 attributes
fov_id = image_params['fov']
x_loc = image_params['x']
y_loc = image_params['y']
image_shape = image_params['shape']
image_planes = image_params['planes']
fov_channel_masks = channel_masks[fov_id]
with h5py.File(os.path.join(savePath,'{}_xy{:0=2}.hdf5'.format(params['experiment_name'],fov_id)), 'w', libver='earliest') as h5f:
# add in metadata for this FOV
# these attributes should be common for all channel
h5f.attrs.create('fov_id', fov_id)
h5f.attrs.create('stage_x_loc', x_loc)
h5f.attrs.create('stage_y_loc', y_loc)
h5f.attrs.create('image_shape', image_shape)
# encoding is because HDF5 has problems with numpy unicode
h5f.attrs.create('planes', [plane.encode('utf8') for plane in image_planes])
h5f.attrs.create('peaks', sorted([key for key in imgDict.keys()]))
# this is for things that change across time, for these create a dataset
img_names = np.asarray(img_names)
img_names = np.expand_dims(img_names, 1)
img_names = img_names.astype('S100')
h5ds = h5f.create_dataset(u'filenames', data=img_names,
chunks=True, maxshape=(None, 1), dtype='S100',
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times', data=np.expand_dims(img_times, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times_jd', data=np.expand_dims(img_jds, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
# cut out the channels as per channel masks for this fov
for peak,channel_stack in six.iteritems(imgDict):
channel_stack = channel_stack.astype('uint16')
# create group for this trap
h5g = h5f.create_group('channel_%04d' % peak)
# add attribute for peak_id, channel location
# add attribute for peak_id, channel location
h5g.attrs.create('peak_id', peak)
channel_loc = fov_channel_masks[peak]
h5g.attrs.create('channel_loc', channel_loc)
# save a different dataset for all colors
for color_index in range(channel_stack.shape[3]):
# create the dataset for the image. Review docs for these options.
h5ds = h5g.create_dataset(u'p%04d_c%1d' % (peak, color_index+1),
data=channel_stack[:,:,:,color_index],
chunks=(1, channel_stack.shape[1], channel_stack.shape[2]),
maxshape=(None, channel_stack.shape[1], channel_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# h5ds.attrs.create('plane', image_planes[color_index].encode('utf8'))
# write the data even though we have more to write (free up memory)
h5f.flush()
return
# same thing as tiff_stack_slice_and_write but do it for hdf5
def hdf5_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images to an HDF5 file.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# make arrays for filenames and times
image_filenames = []
image_times = [] # times is still an integer but may be indexed arbitrarily
image_jds = [] # jds = julian dates (times)
# go through list of images, load and fix them, and create arrays of metadata
for n, image in enumerate(images_to_write):
image_name = image[0] # [0] is the key, [1] is jd
# analyzed_imgs dictionary will be found in main scope.
image_params = analyzed_imgs[image_name]
information("Loading %s." % image_params['filepath'].split('/')[-1])
# add information to metadata arrays
image_filenames.append(image_name)
image_times.append(image_params['t'])
image_jds.append(image_params['jd'])
# declare identification variables for saving using first image
if n == 1:
# same across fov
fov_id = image_params['fov']
x_loc = image_params['x']
y_loc = image_params['y']
image_shape = image_params['shape']
image_planes = image_params['planes']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
#change axis so it goes X, Y, Plane
image_data = np.rollaxis(image_data, 0, 3)
# add it to list. The images should be in time order
image_fov_stack.append(image_data)
# concatenate the list into one big ass stack
image_fov_stack = np.stack(image_fov_stack, axis=0)
# create the HDF5 file for the FOV, first time this is being done.
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'w', libver='earliest') as h5f:
# add in metadata for this FOV
# these attributes should be common for all channel
h5f.attrs.create('fov_id', fov_id)
h5f.attrs.create('stage_x_loc', x_loc)
h5f.attrs.create('stage_y_loc', y_loc)
h5f.attrs.create('image_shape', image_shape)
# encoding is because HDF5 has problems with numpy unicode
h5f.attrs.create('planes', [plane.encode('utf8') for plane in image_planes])
h5f.attrs.create('peaks', sorted(channel_masks[fov_id].keys()))
# this is for things that change across time, for these create a dataset
h5ds = h5f.create_dataset(u'filenames', data=np.expand_dims(image_filenames, 1),
chunks=True, maxshape=(None, 1), dtype='S100',
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times', data=np.expand_dims(image_times, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times_jd', data=np.expand_dims(image_jds, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
# cut out the channels as per channel masks for this fov
for peak, channel_loc in six.iteritems(channel_masks[fov_id]):
#information('Slicing and saving channel peak %s.' % channel_filename.split('/')[-1])
information('Slicing and saving channel peak %d.' % peak)
# create group for this channel
h5g = h5f.create_group('channel_%04d' % peak)
# add attribute for peak_id, channel location
h5g.attrs.create('peak_id', peak)
h5g.attrs.create('channel_loc', channel_loc)
# channel masks should only contain ints, but you can use this for a hard fix
# for i in range(len(channel_loc)):
# for j in range(len(channel_loc[i])):
# channel_loc[i][j] = int(channel_loc[i][j])
# slice out channel.
# The function should recognize the shape length as 4 and cut all time points
channel_stack = cut_slice(image_fov_stack, channel_loc)
# save a different dataset for all colors
for color_index in range(channel_stack.shape[3]):
# create the dataset for the image. Review docs for these options.
h5ds = h5g.create_dataset(u'p%04d_c%1d' % (peak, color_index+1),
data=channel_stack[:,:,:,color_index],
chunks=(1, channel_stack.shape[1], channel_stack.shape[2]),
maxshape=(None, channel_stack.shape[1], channel_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# h5ds.attrs.create('plane', image_planes[color_index].encode('utf8'))
# write the data even though we have more to write (free up memory)
h5f.flush()
return
def tileImage(img, subImageNumber):
divisor = int(np.sqrt(subImageNumber))
M = img.shape[0]//divisor
N = img.shape[0]//divisor
print(img.shape, M, N, divisor, subImageNumber)
ans = ([img[x:x+M,y:y+N] for x in range(0,img.shape[0],M) for y in range(0,img.shape[1],N)])
tiles=[]
for m in ans:
if m.shape[0]==512 and m.shape[1]==512:
tiles.append(m)
tiles=np.asarray(tiles)
#print(tiles)
return(tiles)
def get_weights(img, subImageNumber):
divisor = int(np.sqrt(subImageNumber))
M = img.shape[0]//divisor
N = img.shape[0]//divisor
weights = np.ones((img.shape[0],img.shape[1]),dtype='uint8')
for i in range(divisor-1):
weights[(M*(i+1))-25:(M*(i+1)+25),:] = 0
weights[:,(N*(i+1))-25:(N*(i+1)+25)] = 0
return(weights)
def permute_image(img, trap_align_metadata):
# are there three dimensions?
if len(img.shape) == 3:
if img.shape[0] < 3: # for tifs with fewer than three imageing channels, the first dimension separates channels
# img = np.transpose(img, (1,2,0))
img = img[trap_align_metadata['phase_plane_index'],:,:] # grab just the phase channel
else:
img = img[:,:,trap_align_metadata['phase_plane_index']] # grab just the phase channel
return(img)
def imageConcatenatorFeatures(imgStack, subImageNumber = 64):
rowNumPerImage = int(np.sqrt(subImageNumber)) # here I'm assuming our large images are square, with equal number of crops in each dimension
#print(rowNumPerImage)
imageNum = int(imgStack.shape[0]/subImageNumber) # total number of sub-images divided by the number of sub-images in each original large image
iterNum = int(imageNum*rowNumPerImage)
imageDims = int(np.sqrt(imgStack.shape[1]*imgStack.shape[2]*subImageNumber))
featureNum = int(imgStack.shape[3])
bigImg = np.zeros(shape=(imageNum, imageDims, imageDims, featureNum), dtype='float32') # create array to store reconstructed images
featureRowDicts = []
for j in range(featureNum):
rowDict = {}
for i in range(iterNum):
baseNum = int(i*iterNum/imageNum)
# concatenate columns of 256x256 images to build each 256x2048 row
rowDict[i] = np.column_stack((imgStack[baseNum,:,:,j],imgStack[baseNum+1,:,:,j],
imgStack[baseNum+2,:,:,j], imgStack[baseNum+3,:,:,j]))#,
#imgStack[baseNum+4,:,:,j],imgStack[baseNum+5,:,:,j],
#imgStack[baseNum+6,:,:,j],imgStack[baseNum+7,:,:,j]))
featureRowDicts.append(rowDict)
for j in range(featureNum):
for i in range(imageNum):
baseNum = int(i*rowNumPerImage)
# concatenate appropriate 256x2048 rows to build a 2048x2048 image and place it into bigImg
bigImg[i,:,:,j] = np.row_stack((featureRowDicts[j][baseNum],featureRowDicts[j][baseNum+1],
featureRowDicts[j][baseNum+2],featureRowDicts[j][baseNum+3]))#,
#featureRowDicts[j][baseNum+4],featureRowDicts[j][baseNum+5],
#featureRowDicts[j][baseNum+6],featureRowDicts[j][baseNum+7]))
return(bigImg)
def imageConcatenatorFeatures2(imgStack, subImageNumber = 81):
rowNumPerImage = int(np.sqrt(subImageNumber)) # here I'm assuming our large images are square, with equal number of crops in each dimension
imageNum = int(imgStack.shape[0]/subImageNumber) # total number of sub-images divided by the number of sub-images in each original large image
iterNum = int(imageNum*rowNumPerImage)
imageDims = int(np.sqrt(imgStack.shape[1]*imgStack.shape[2]*subImageNumber))
featureNum = int(imgStack.shape[3])
bigImg = np.zeros(shape=(imageNum, imageDims, imageDims, featureNum), dtype='float32') # create array to store reconstructed images
featureRowDicts = []
for j in range(featureNum):
rowDict = {}
for i in range(iterNum):
baseNum = int(i*iterNum/imageNum)
# concatenate columns of 256x256 images to build each 256x2048 row
rowDict[i] = np.column_stack((imgStack[baseNum,:,:,j],imgStack[baseNum+1,:,:,j],
imgStack[baseNum+2,:,:,j], imgStack[baseNum+3,:,:,j],
imgStack[baseNum+4,:,:,j]))#,imgStack[baseNum+5,:,:,j],
#imgStack[baseNum+6,:,:,j],imgStack[baseNum+7,:,:,j],
#imgStack[baseNum+8,:,:,j]))
featureRowDicts.append(rowDict)
for j in range(featureNum):
for i in range(imageNum):
baseNum = int(i*rowNumPerImage)
# concatenate appropriate 256x2048 rows to build a 2048x2048 image and place it into bigImg
bigImg[i,:,:,j] = np.row_stack((featureRowDicts[j][baseNum],featureRowDicts[j][baseNum+1],
featureRowDicts[j][baseNum+2],featureRowDicts[j][baseNum+3],
featureRowDicts[j][baseNum+4]))#,featureRowDicts[j][baseNum+5],
#featureRowDicts[j][baseNum+6],featureRowDicts[j][baseNum+7],
#featureRowDicts[j][baseNum+8]))
return(bigImg)
def get_weights_array(arr=np.zeros((2048,2048)), shiftDistance=128, subImageNumber=64, padSubImageNumber=81):
originalImageWeights = get_weights(arr, subImageNumber=subImageNumber)
shiftLeftWeights = np.pad(originalImageWeights, pad_width=((0,0),(0,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))[:,shiftDistance:]
shiftRightWeights = np.pad(originalImageWeights, pad_width=((0,0),(shiftDistance,0)),
mode='constant', constant_values=((0,0),(0,0)))[:,:(-1*shiftDistance)]
shiftUpWeights = np.pad(originalImageWeights, pad_width=((0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[shiftDistance:,:]
shiftDownWeights = np.pad(originalImageWeights, pad_width=((shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[:(-1*shiftDistance),:]
expandedImageWeights = get_weights(np.zeros((arr.shape[0]+2*shiftDistance,arr.shape[1]+2*shiftDistance)), subImageNumber=padSubImageNumber)[shiftDistance:-shiftDistance,shiftDistance:-shiftDistance]
allWeights = np.stack((originalImageWeights, expandedImageWeights, shiftUpWeights, shiftDownWeights, shiftLeftWeights,shiftRightWeights), axis=-1)
stackWeights = np.stack((allWeights,allWeights),axis=0)
stackWeights = np.stack((stackWeights,stackWeights,stackWeights),axis=3)
return(stackWeights)
# predicts locations of channels in an image using deep learning model
def get_frame_predictions(img,model,stackWeights, shiftDistance=256, subImageNumber=16, padSubImageNumber=25, debug=False):
pred = predict_first_image_channels(img, model, shiftDistance=shiftDistance,
subImageNumber=subImageNumber, padSubImageNumber=padSubImageNumber, debug=debug)[0,...]
# print(pred.shape)
if debug:
print(pred.shape)
compositePrediction = np.average(pred, axis=3, weights=stackWeights)
# print(compositePrediction.shape)
padSize = (compositePrediction.shape[0]-img.shape[0])//2
compositePrediction = util.crop(compositePrediction,((padSize,padSize),
(padSize,padSize),
(0,0)))
# print(compositePrediction.shape)
return(compositePrediction)
def apply_median_filter_normalize(imgs):
selem = morphology.disk(3)
for i in range(imgs.shape[0]):
# Store sample
tmpImg = imgs[i,:,:,0]
medImg = median(tmpImg, selem)
tmpImg = medImg/np.max(medImg)
tmpImg = np.expand_dims(tmpImg, axis=-1)
imgs[i,:,:,:] = tmpImg
return(imgs)
def predict_first_image_channels(img, model,
subImageNumber=16, padSubImageNumber=25,
shiftDistance=128, batchSize=1,
debug=False):
imgSize = img.shape[0]
padSize = (2048-imgSize)//2 # how much to pad on each side to get up to 2048x2048?
imgStack = np.pad(img, pad_width=((padSize,padSize),(padSize,padSize)),
mode='constant', constant_values=((0,0),(0,0))) # pad the images to make them 2048x2048
# pad the stack by 128 pixels on each side to get complemetary crops that I can run the network on. This
# should help me fill in low-confidence regions where the crop boundaries were for the original image
imgStackExpand = np.pad(imgStack, pad_width=((shiftDistance,shiftDistance),(shiftDistance,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))
imgStackShiftRight = np.pad(imgStack, pad_width=((0,0),(0,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))[:,shiftDistance:]
imgStackShiftLeft = np.pad(imgStack, pad_width=((0,0),(shiftDistance,0)),
mode='constant', constant_values=((0,0),(0,0)))[:,:-shiftDistance]
imgStackShiftDown = np.pad(imgStack, pad_width=((0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[shiftDistance:,:]
imgStackShiftUp = np.pad(imgStack, pad_width=((shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[:-shiftDistance,:]
#print(imgStackShiftUp.shape)
crops = tileImage(imgStack, subImageNumber=subImageNumber)
print("Crops: ", crops.shape)
crops = np.expand_dims(crops, -1)
data_gen_args = {'batch_size':params['compile']['channel_prediction_batch_size'],
'n_channels':1,
'normalize_to_one':True,
'shuffle':False}
predict_gen_args = {'verbose':1,
'use_multiprocessing':True,
'workers':params['num_analyzers']}
img_generator = TrapSegmentationDataGenerator(crops, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
prediction = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
#print(prediction.shape)
cropsExpand = tileImage(imgStackExpand, subImageNumber=padSubImageNumber)
cropsExpand = np.expand_dims(cropsExpand, -1)
img_generator = TrapSegmentationDataGenerator(cropsExpand, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionExpand = imageConcatenatorFeatures2(predictions, subImageNumber=padSubImageNumber)
predictionExpand = util.crop(predictionExpand, ((0,0),(shiftDistance,shiftDistance),(shiftDistance,shiftDistance),(0,0)))
#print(predictionExpand.shape)
cropsShiftLeft = tileImage(imgStackShiftLeft, subImageNumber=subImageNumber)
cropsShiftLeft = np.expand_dims(cropsShiftLeft, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftLeft, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionLeft = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionLeft = np.pad(predictionLeft, pad_width=((0,0),(0,0),(0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:,shiftDistance:,:]
#print(predictionLeft.shape)
cropsShiftRight = tileImage(imgStackShiftRight, subImageNumber=subImageNumber)
cropsShiftRight = np.expand_dims(cropsShiftRight, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftRight, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionRight = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionRight = np.pad(predictionRight, pad_width=((0,0),(0,0),(shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:,:(-1*shiftDistance),:]
#print(predictionRight.shape)
cropsShiftUp = tileImage(imgStackShiftUp, subImageNumber=subImageNumber)
#print(cropsShiftUp.shape)
cropsShiftUp = np.expand_dims(cropsShiftUp, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftUp, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionUp = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionUp = np.pad(predictionUp, pad_width=((0,0),(0,shiftDistance),(0,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,shiftDistance:,:,:]
#print(predictionUp.shape)
cropsShiftDown = tileImage(imgStackShiftDown, subImageNumber=subImageNumber)
cropsShiftDown = np.expand_dims(cropsShiftDown, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftDown, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionDown = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionDown = np.pad(predictionDown, pad_width=((0,0),(shiftDistance,0),(0,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:(-1*shiftDistance),:,:]
#print(predictionDown.shape)
allPredictions = np.stack((prediction, predictionExpand,
predictionUp, predictionDown,
predictionLeft, predictionRight), axis=-1)
return(allPredictions)
# takes initial U-net centroids for trap locations, and creats bounding boxes for each trap at the defined height and width
def get_frame_trap_bounding_boxes(trapLabels, trapProps, trapAreaThreshold=2000, trapWidth=27, trapHeight=256):
badTrapLabels = [reg.label for reg in trapProps if reg.area < trapAreaThreshold] # filter out small "trap" regions
goodTraps = trapLabels.copy()
for label in badTrapLabels:
goodTraps[goodTraps == label] = 0 # re-label bad traps as background (0)
goodTrapProps = measure.regionprops(goodTraps)
trapCentroids = [(int(np.round(reg.centroid[0])),int(np.round(reg.centroid[1]))) for reg in goodTrapProps] # get centroids as integers
trapBboxes = []
for centroid in trapCentroids:
rowIndex = centroid[0]
colIndex = centroid[1]
minRow = rowIndex-trapHeight//2
maxRow = rowIndex+trapHeight//2
minCol = colIndex-trapWidth//2
maxCol = colIndex+trapWidth//2
if trapWidth % 2 != 0:
maxCol += 1
coordArray = np.array([minRow,maxRow,minCol,maxCol])
# remove any traps at edges of image
if np.any(coordArray > goodTraps.shape[0]):
continue
if np.any(coordArray < 0):
continue
trapBboxes.append((minRow,minCol,maxRow,maxCol))
return(trapBboxes)
# this function performs image alignment as defined by the shifts passed as an argument
def crop_traps(fileNames, trapProps, labelledTraps, bboxesDict, trap_align_metadata):
frameNum = trap_align_metadata['frame_count']
channelNum = trap_align_metadata['plane_number']
trapImagesDict = {key:np.zeros((frameNum,
trap_align_metadata['trap_height'],
trap_align_metadata['trap_width'],
channelNum)) for key in bboxesDict}
trapClosedEndPxDict = {}
flipImageDict = {}
trapMask = labelledTraps
for frame in range(frameNum):
if (frame+1) % 20 == 0:
print("Cropping trap regions for frame number {} of {}.".format(frame+1, frameNum))
imgPath = os.path.join(params['experiment_directory'],params['image_directory'],fileNames[frame])
fullFrameImg = io.imread(imgPath)
if len(fullFrameImg.shape) == 3:
if fullFrameImg.shape[0] < 3: # for tifs with less than three imaging channels, the first dimension separates channels
fullFrameImg = np.transpose(fullFrameImg, (1,2,0))
trapClosedEndPxDict[fileNames[frame]] = {key:{} for key in bboxesDict.keys()}
for key in trapImagesDict.keys():
bbox = bboxesDict[key][frame]
trapImagesDict[key][frame,:,:,:] = fullFrameImg[bbox[0]:bbox[2],bbox[1]:bbox[3],:]
#tmpImg = np.reshape(fullFrameImg[trapMask==key], (trapHeight,trapWidth,channelNum))
if frame == 0:
medianProfile = np.median(trapImagesDict[key][frame,:,:,0],axis=1) # get intensity of middle column of trap
maxIntensityRow = np.argmax(medianProfile)
if maxIntensityRow > trap_align_metadata['trap_height']//2:
flipImageDict[key] = 0
else:
flipImageDict[key] = 1
if flipImageDict[key] == 1:
trapImagesDict[key][frame,:,:,:] = trapImagesDict[key][frame,::-1,:,:]
trapClosedEndPxDict[fileNames[frame]][key]['closed_end_px'] = bbox[0]
trapClosedEndPxDict[fileNames[frame]][key]['open_end_px'] = bbox[2]
else:
trapClosedEndPxDict[fileNames[frame]][key]['closed_end_px'] = bbox[2]
trapClosedEndPxDict[fileNames[frame]][key]['open_end_px'] = bbox[0]
continue
return(trapImagesDict, trapClosedEndPxDict)
# gets shifted bounding boxes to crop traps through time
def shift_bounding_boxes(bboxesDict, shifts, imgSize):
bboxesShiftDict = {}
for key in bboxesDict.keys():
bboxesShiftDict[key] = []
bboxes = bboxesDict[key]
for i in range(shifts.shape[0]):
if i == 0:
bboxesShiftDict[key].append(bboxes)
else:
minRow = bboxes[0]+shifts[i,0]
minCol = bboxes[1]+shifts[i,1]
maxRow = bboxes[2]+shifts[i,0]
maxCol = bboxes[3]+shifts[i,1]
bboxesShiftDict[key].append((minRow,
minCol,
maxRow,
maxCol))
if np.any(np.asarray([minRow,minCol,maxRow,maxCol]) < 0):
print("channel {} removed: out of frame".format(key))
del bboxesShiftDict[key]
break
if np.any(np.asarray([minRow,minCol,maxRow,maxCol]) > imgSize):
print("channel {} removed: out of frame".format(key))
del bboxesShiftDict[key]
break
return(bboxesShiftDict)
# finds the location of channels in a tif
def find_channel_locs(image_data):
'''Finds the location of channels from a phase contrast image. The channels are returned in
a dictionary where the key is the x position of the channel in pixel and the value is a
dicionary with the open and closed end in pixels in y.
Called by
mm3_Compile.get_tif_params
'''
# declare temp variables from yaml parameter dict.
chan_w = params['compile']['channel_width']
chan_sep = params['compile']['channel_separation']
crop_wp = int(params['compile']['channel_width_pad'] + chan_w/2)
chan_snr = params['compile']['channel_detection_snr']
# Detect peaks in the x projection (i.e. find the channels)
projection_x = image_data.sum(axis=0).astype(np.int32)
# find_peaks_cwt is a function which attempts to find the peaks in a 1-D array by
# convolving it with a wave. here the wave is the default Mexican hat wave
# but the minimum signal to noise ratio is specified
# *** The range here should be a parameter or changed to a fraction.
peaks = find_peaks_cwt(projection_x, np.arange(chan_w-5,chan_w+5), min_snr=chan_snr)
# If the left-most peak position is within half of a channel separation,
# discard the channel from the list.
if peaks[0] < (chan_sep / 2):
peaks = peaks[1:]
# If the diference between the right-most peak position and the right edge
# of the image is less than half of a channel separation, discard the channel.
if image_data.shape[1] - peaks[-1] < (chan_sep / 2):
peaks = peaks[:-1]
# Find the average channel ends for the y-projected image
projection_y = image_data.sum(axis=1)
# find derivative, must use int32 because it was unsigned 16b before.
proj_y_d = np.diff(projection_y.astype(np.int32))
# use the top third to look for closed end, is pixel location of highest deriv
onethirdpoint_y = int(projection_y.shape[0]/3.0)
default_closed_end_px = proj_y_d[:onethirdpoint_y].argmax()
# use bottom third to look for open end, pixel location of lowest deriv
twothirdpoint_y = int(projection_y.shape[0]*2.0/3.0)
default_open_end_px = twothirdpoint_y + proj_y_d[twothirdpoint_y:].argmin()
default_length = default_open_end_px - default_closed_end_px # used for checks
# go through peaks and assign information
# dict for channel dimensions
chnl_loc_dict = {}
# key is peak location, value is dict with {'closed_end_px': px, 'open_end_px': px}
for peak in peaks:
# set defaults
chnl_loc_dict[peak] = {'closed_end_px': default_closed_end_px,
'open_end_px': default_open_end_px}
# redo the previous y projection finding with just this channel
channel_slice = image_data[:, peak-crop_wp:peak+crop_wp]
slice_projection_y = channel_slice.sum(axis = 1)
slice_proj_y_d = np.diff(slice_projection_y.astype(np.int32))
slice_closed_end_px = slice_proj_y_d[:onethirdpoint_y].argmax()
slice_open_end_px = twothirdpoint_y + slice_proj_y_d[twothirdpoint_y:].argmin()
slice_length = slice_open_end_px - slice_closed_end_px
# check if these values make sense. If so, use them. If not, use default
# make sure lenght is not 30 pixels bigger or smaller than default
# *** This 15 should probably be a parameter or at least changed to a fraction.
if slice_length + 15 < default_length or slice_length - 15 > default_length:
continue
# make sure ends are greater than 15 pixels from image edge
if slice_closed_end_px < 15 or slice_open_end_px > image_data.shape[0] - 15:
continue
# if you made it to this point then update the entry
chnl_loc_dict[peak] = {'closed_end_px' : slice_closed_end_px,
'open_end_px' : slice_open_end_px}
return chnl_loc_dict
# make masks from initial set of images (same images as clusters)
def make_masks(analyzed_imgs):
'''
Make masks goes through the channel locations in the image metadata and builds a consensus
Mask for each image per fov, which it returns as dictionary named channel_masks.
The keys in this dictionary are fov id, and the values is a another dictionary. This dict's keys are channel locations (peaks) and the values is a [2][2] array:
[[minrow, maxrow],[mincol, maxcol]] of pixel locations designating the corner of each mask
for each channel on the whole image
One important consequence of these function is that the channel ids and the size of the
channel slices are decided now. Updates to mask must coordinate with these values.
Parameters
analyzed_imgs : dict
image information created by get_params
Returns
channel_masks : dict
dictionary of consensus channel masks.
Called By
mm3_Compile.py
Calls
'''
information("Determining initial channel masks...")
# declare temp variables from yaml parameter dict.
crop_wp = int(params['compile']['channel_width_pad'] + params['compile']['channel_width']/2)
chan_lp = int(params['compile']['channel_length_pad'])
#intiaize dictionary
channel_masks = {}
# get the size of the images (hope they are the same)
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
image_rows = img_v['shape'][0] # x pixels
image_cols = img_v['shape'][1] # y pixels
break # just need one. using iteritems mean the whole dict doesn't load
# get the fov ids
fovs = []
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
if img_v['fov'] not in fovs:
fovs.append(img_v['fov'])
# max width and length across all fovs. channels will get expanded by these values
# this important for later updates to the masks, which should be the same
max_chnl_mask_len = 0
max_chnl_mask_wid = 0
# for each fov make a channel_mask dictionary from consensus mask
for fov in fovs:
# initialize a the dict and consensus mask
channel_masks_1fov = {} # dict which holds channel masks {peak : [[y1, y2],[x1,x2]],...}
consensus_mask = np.zeros([image_rows, image_cols]) # mask for labeling
# bring up information for each image
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
# skip this one if it is not of the current fov
if img_v['fov'] != fov:
continue
# for each channel in each image make a single mask
img_chnl_mask = np.zeros([image_rows, image_cols])
# and add the channel mask to it
for chnl_peak, peak_ends in six.iteritems(img_v['channels']):
# pull out the peak location and top and bottom location
# and expand by padding (more padding done later for width)
x1 = max(chnl_peak - crop_wp, 0)
x2 = min(chnl_peak + crop_wp, image_cols)
y1 = max(peak_ends['closed_end_px'] - chan_lp, 0)
y2 = min(peak_ends['open_end_px'] + chan_lp, image_rows)
# add it to the mask for this image
img_chnl_mask[y1:y2, x1:x2] = 1
# add it to the consensus mask
consensus_mask += img_chnl_mask
# Normalize concensus mask between 0 and 1.
consensus_mask = consensus_mask.astype('float32') / float(np.amax(consensus_mask))
# threshhold and homogenize each channel mask within the mask, label them
# label when value is above 0.1 (so 90% occupancy), transpose.
# the [0] is for the array ([1] is the number of regions)
# It transposes and then transposes again so regions are labeled left to right
# clear border it to make sure the channels are off the edge
consensus_mask = ndi.label(consensus_mask)[0]
# go through each label
for label in np.unique(consensus_mask):
if label == 0: # label zero is the background
continue
binary_core = consensus_mask == label
# clean up the rough edges
poscols = np.any(binary_core, axis = 0) # column positions where true (any)
posrows = np.any(binary_core, axis = 1) # row positions where true (any)
# channel_id givin by horizontal position
# this is important. later updates to the positions will have to check
# if their channels contain this median value to match up
channel_id = int(np.median(np.where(poscols)[0]))
# store the edge locations of the channel mask in the dictionary. Will be ints
min_row = np.min(np.where(posrows)[0])
max_row = np.max(np.where(posrows)[0])
min_col = np.min(np.where(poscols)[0])
max_col = np.max(np.where(poscols)[0])
# if the min/max cols are within the image bounds,
# add the mask, as 4 points, to the dictionary
if min_col > 0 and max_col < image_cols:
channel_masks_1fov[channel_id] = [[min_row, max_row], [min_col, max_col]]
# find the largest channel width and height while you go round
max_chnl_mask_len = int(max(max_chnl_mask_len, max_row - min_row))
max_chnl_mask_wid = int(max(max_chnl_mask_wid, max_col - min_col))
# add channel_mask dictionary to the fov dictionary, use copy to play it safe
channel_masks[fov] = channel_masks_1fov.copy()
# update all channel masks to be the max size
cm_copy = channel_masks.copy()
for fov, peaks in six.iteritems(channel_masks):
# f_id = int(fov)
for peak, chnl_mask in six.iteritems(peaks):
# p_id = int(peak)
# just add length to the open end (bottom of image, low column)
if chnl_mask[0][1] - chnl_mask[0][0] != max_chnl_mask_len:
cm_copy[fov][peak][0][1] = chnl_mask[0][0] + max_chnl_mask_len
# enlarge widths around the middle, but make sure you don't get floats
if chnl_mask[1][1] - chnl_mask[1][0] != max_chnl_mask_wid:
wid_diff = max_chnl_mask_wid - (chnl_mask[1][1] - chnl_mask[1][0])
if wid_diff % 2 == 0:
cm_copy[fov][peak][1][0] = max(chnl_mask[1][0] - wid_diff/2, 0)
cm_copy[fov][peak][1][1] = min(chnl_mask[1][1] + wid_diff/2, image_cols - 1)
else:
cm_copy[fov][peak][1][0] = max(chnl_mask[1][0] - (wid_diff-1)/2, 0)
cm_copy[fov][peak][1][1] = min(chnl_mask[1][1] + (wid_diff+1)/2, image_cols - 1)
# convert all values to ints
chnl_mask[0][0] = int(chnl_mask[0][0])
chnl_mask[0][1] = int(chnl_mask[0][1])
chnl_mask[1][0] = int(chnl_mask[1][0])
chnl_mask[1][1] = int(chnl_mask[1][1])
# cm_copy[fov][peak] = {'y_top': chnl_mask[0][0],
# 'y_bot': chnl_mask[0][1],
# 'x_left': chnl_mask[1][0],
# 'x_right': chnl_mask[1][1]}
# print(type(cm_copy[fov][peak][1][0]), cm_copy[fov][peak][1][0])
#save the channel mask dictionary to a pickle and a text file
# with open(os.path.join(params['ana_dir'], 'channel_masks.pkl'), 'wb') as cmask_file:
# pickle.dump(cm_copy, cmask_file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(params['ana_dir'], 'channel_masks.txt'), 'w') as cmask_file:
pprint(cm_copy, stream=cmask_file)
with open(os.path.join(params['ana_dir'], 'channel_masks.yaml'), 'w') as cmask_file:
yaml.dump(data=cm_copy, stream=cmask_file, default_flow_style=False, tags=None)
information("Channel masks saved.")
return cm_copy
# get each fov_id, peak_id, frame's mask bounding box from bounding boxes arrived at by convolutional neural network
def make_channel_masks_CNN(bboxes_dict):
'''
The keys in this dictionary are peak_ids and the values of each is an array of shape (frameNumber,2,2):
Each frameNumber's 2x2 slice of the array represents the given peak_id's [[minrow, maxrow],[mincol, maxcol]].
One important consequence of these function is that the channel ids and the size of the
channel slices are decided now. Updates to mask must coordinate with these values.
Parameters
analyzed_imgs : dict
image information created by get_params
Returns
channel_masks : dict
dictionary of consensus channel masks.
Called By
mm3_Compile.py
Calls
'''
# initialize the new channel_masks dict
channel_masks = {}
# reorder elements of tuples in bboxes_dict to match [[minrow, maxrow], [mincol, maxcol]] convention above
peak_ids = [peak_id for peak_id in bboxes_dict.keys()]
peak_ids.sort()
bbox_array = np.zeros((len(bboxes_dict[peak_ids[0]]),2,2), dtype='uint16')
for peak_id in peak_ids:
# get each frame's bounding boxes for the given peak_id
frame_bboxes = bboxes_dict[peak_id]
for frame_index in range(len(frame_bboxes)):
# replace the values in bbox_array with the proper ones from frame_bboxes
minrow = frame_bboxes[frame_index][0]
maxrow = frame_bboxes[frame_index][2]
mincol = frame_bboxes[frame_index][1]
maxcol = frame_bboxes[frame_index][3]
bbox_array[frame_index,0,0] = minrow
bbox_array[frame_index,0,1] = maxrow
bbox_array[frame_index,1,0] = mincol
bbox_array[frame_index,1,1] = maxcol
channel_masks[peak_id] = bbox_array
return(channel_masks)
### functions about trimming, padding, and manipulating images
# define function for flipping the images on an FOV by FOV basis
def fix_orientation(image_data):
'''
Fix the orientation. The standard direction for channels to open to is down.
called by
process_tif
get_params
'''
# user parameter indicates how things should be flipped
image_orientation = params['compile']['image_orientation']
# if this is just a phase image give in an extra layer so rest of code is fine
flat = False # flag for if the image is flat or multiple levels
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
flat = True
# setting image_orientation to 'auto' will use autodetection
if image_orientation == "auto":
# use 'phase_plane' to find the phase plane in image_data, assuming c1, c2, c3... naming scheme here.
try:
ph_channel = int(re.search('[0-9]', params['phase_plane']).group(0)) - 1
except:
# Pick the plane to analyze with the highest mean px value (should be phase)
ph_channel = np.argmax([np.mean(image_data[ci]) for ci in range(image_data.shape[0])])
# flip based on the index of the higest average row value
# this should be closer to the opening
if np.argmax(image_data[ph_channel].mean(axis = 1)) < image_data[ph_channel].shape[0] / 2:
image_data = image_data[:,::-1,:]
else:
pass # no need to do anything
# flip if up is chosen
elif image_orientation == "up":
return image_data[:,::-1,:]
# do not flip the images if "down is the specified image orientation"
elif image_orientation == "down":
pass
if flat:
image_data = image_data[0] # just return that first layer
return image_data
# cuts out channels from the image
def cut_slice(image_data, channel_loc):
'''Takes an image and cuts out the channel based on the slice location
slice location is the list with the peak information, in the form
[][y1, y2],[x1, x2]]. Returns the channel slice as a numpy array.
The numpy array will be a stack if there are multiple planes.
if you want to slice all the channels from a picture with the channel_masks
dictionary use a loop like this:
for channel_loc in channel_masks[fov_id]: # fov_id is the fov of the image
channel_slice = cut_slice[image_pixel_data, channel_loc]
# ... do something with the slice
NOTE: this function will try to determine what the shape of your
image is and slice accordingly. It expects the images are in the order
[t, x, y, c]. It assumes images with three dimensions are [x, y, c] not
[t, x, y].
'''
# case where image is in form [x, y]
if len(image_data.shape) == 2:
# make slice object
channel_slicer = np.s_[channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1]]
# case where image is in form [x, y, c]
elif len(image_data.shape) == 3:
channel_slicer = np.s_[channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1],:]
# case where image in form [t, x , y, c]
elif len(image_data.shape) == 4:
channel_slicer = np.s_[:,channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1],:]
# slice based on appropriate slicer object.
channel_slice = image_data[channel_slicer]
# pad y of channel if slice happened to be outside of image
y_difference = (channel_loc[0][1] - channel_loc[0][0]) - channel_slice.shape[1]
if y_difference > 0:
paddings = [[0, 0], # t
[0, y_difference], # y
[0, 0], # x
[0, 0]] # c
channel_slice = np.pad(channel_slice, paddings, mode='edge')
return channel_slice
# calculate cross correlation between pixels in channel stack
def channel_xcorr(fov_id, peak_id):
'''
Function calculates the cross correlation of images in a
stack to the first image in the stack. The output is an
array that is the length of the stack with the best cross
correlation between that image and the first image.
The very first value should be 1.
'''
pad_size = params['subtract']['alignment_pad']
# Use this number of images to calculate cross correlations
number_of_images = 20
# load the phase contrast images
image_data = load_stack(fov_id, peak_id, color=params['phase_plane'])
# if there are more images than number_of_images, use number_of_images images evenly
# spaced across the range
if image_data.shape[0] > number_of_images:
spacing = int(image_data.shape[0] / number_of_images)
image_data = image_data[::spacing,:,:]
if image_data.shape[0] > number_of_images:
image_data = image_data[:number_of_images,:,:]
# we will compare all images to this one, needs to be padded to account for image drift
first_img = np.pad(image_data[0,:,:], pad_size, mode='reflect')
xcorr_array = [] # array holds cross correlation vaues
for img in image_data:
# use match_template to find all cross correlations for the
# current image against the first image.
xcorr_array.append(np.max(match_template(first_img, img)))
return xcorr_array
### functions about subtraction
# average empty channels from stacks, making another TIFF stack
def average_empties_stack(fov_id, specs, color='c1', align=True):
'''Takes the fov file name and the peak names of the designated empties,
averages them and saves the image
Parameters
fov_id : int
FOV number
specs : dict
specifies whether a channel should be analyzed (1), used for making
an average empty (0), or ignored (-1).
color : string
Which plane to use.
align : boolean
Flag that is passed to the worker function average_empties, indicates
whether images should be aligned be for averaging (use False for fluorescent images)
Returns
True if succesful.
Saves empty stack to analysis folder
'''
information("Creating average empty channel for FOV %d." % fov_id)
# get peak ids of empty channels for this fov
empty_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 0: # 0 means it should be used for empty
empty_peak_ids.append(peak_id)
empty_peak_ids = sorted(empty_peak_ids) # sort for repeatability
# depending on how many empties there are choose what to do
# if there is no empty the user is going to have to copy another empty stack
if len(empty_peak_ids) == 0:
information("No empty channel designated for FOV %d." % fov_id)
return False
# if there is just one then you can just copy that channel
elif len(empty_peak_ids) == 1:
peak_id = empty_peak_ids[0]
information("One empty channel (%d) designated for FOV %d." % (peak_id, fov_id))
# load the one phase contrast as the empties
avg_empty_stack = load_stack(fov_id, peak_id, color=color)
# but if there is more than one empty you need to align and average them per timepoint
elif len(empty_peak_ids) > 1:
# load the image stacks into memory
empty_stacks = [] # list which holds phase image stacks of designated empties
for peak_id in empty_peak_ids:
# load data and append to list
image_data = load_stack(fov_id, peak_id, color=color)
empty_stacks.append(image_data)
information("%d empty channels designated for FOV %d." % (len(empty_stacks), fov_id))
# go through time points and create list of averaged empties
avg_empty_stack = [] # list will be later concatentated into numpy array
time_points = range(image_data.shape[0]) # index is time
for t in time_points:
# get images from one timepoint at a time and send to alignment and averaging
imgs = [stack[t] for stack in empty_stacks]
avg_empty = average_empties(imgs, align=align) # function is in mm3
avg_empty_stack.append(avg_empty)
# concatenate list and then save out to tiff stack
avg_empty_stack = np.stack(avg_empty_stack, axis=0)
# save out data
if params['output'] == 'TIFF':
# make new name and save it
empty_filename = params['experiment_name'] + '_xy%03d_empty_%s.tif' % (fov_id, color)
tiff.imsave(os.path.join(params['empty_dir'],empty_filename), avg_empty_stack, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# delete the dataset if it exists (important for debug)
if 'empty_%s' % color in h5f:
del h5f[u'empty_%s' % color]
# the empty channel should be it's own dataset
h5ds = h5f.create_dataset(u'empty_%s' % color,
data=avg_empty_stack,
chunks=(1, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
maxshape=(None, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# give attribute which says which channels contribute
h5ds.attrs.create('empty_channels', empty_peak_ids)
h5f.close()
information("Saved empty channel for FOV %d." % fov_id)
return True
# averages a list of empty channels
def average_empties(imgs, align=True):
'''
This function averages a set of images (empty channels) and returns a single image
of the same size. It first aligns the images to the first image before averaging.
Alignment is done by enlarging the first image using edge padding.
Subsequent images are then aligned to this image and the offset recorded.
These images are padded such that they are the same size as the first (padded) image but
with the image in the correct (aligned) place. Edge padding is again used.
The images are then placed in a stack and aveaged. This image is trimmed so it is the size
of the original images
Called by
average_empties_stack
'''
aligned_imgs = [] # list contains the aligned, padded images
if align:
# pixel size to use for padding (ammount that alignment could be off)
pad_size = params['subtract']['alignment_pad']
for n, img in enumerate(imgs):
# if this is the first image, pad it and add it to the stack
if n == 0:
ref_img = np.pad(img, pad_size, mode='reflect') # padded reference image
aligned_imgs.append(ref_img)
# otherwise align this image to the first padded image
else:
# find correlation between a convolution of img against the padded reference
match_result = match_template(ref_img, img)
# find index of highest correlation (relative to top left corner of img)
y, x = np.unravel_index(np.argmax(match_result), match_result.shape)
# pad img so it aligns and is the same size as reference image
pad_img = np.pad(img, ((y, ref_img.shape[0] - (y + img.shape[0])),
(x, ref_img.shape[1] - (x + img.shape[1]))), mode='reflect')
aligned_imgs.append(pad_img)
else:
# don't align, just link the names to go forward easily
aligned_imgs = imgs
# stack the aligned data along 3rd axis
aligned_imgs = np.dstack(aligned_imgs)
# get a mean image along 3rd axis
avg_empty = np.nanmean(aligned_imgs, axis=2)
# trim off the padded edges (only if images were alinged, otherwise there was no padding)
if align:
avg_empty = avg_empty[pad_size:-1*pad_size, pad_size:-1*pad_size]
# change type back to unsigned 16 bit not floats
avg_empty = avg_empty.astype(dtype='uint16')
return avg_empty
# this function is used when one FOV doesn't have an empty
def copy_empty_stack(from_fov, to_fov, color='c1'):
'''Copy an empty stack from one FOV to another'''
# load empty stack from one FOV
information('Loading empty stack from FOV {} to save for FOV {}.'.format(from_fov, to_fov))
avg_empty_stack = load_stack(from_fov, 0, color='empty_{}'.format(color))
# save out data
if params['output'] == 'TIFF':
# make new name and save it
empty_filename = params['experiment_name'] + '_xy%03d_empty_%s.tif' % (to_fov, color)
tiff.imsave(os.path.join(params['empty_dir'],empty_filename), avg_empty_stack, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % to_fov), 'r+')
# delete the dataset if it exists (important for debug)
if 'empty_%s' % color in h5f:
del h5f[u'empty_%s' % color]
# the empty channel should be it's own dataset
h5ds = h5f.create_dataset(u'empty_%s' % color,
data=avg_empty_stack,
chunks=(1, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
maxshape=(None, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# give attribute which says which channels contribute. Just put 0
h5ds.attrs.create('empty_channels', [0])
h5f.close()
information("Saved empty channel for FOV %d." % to_fov)
# Do subtraction for an fov over many timepoints
def subtract_fov_stack(fov_id, specs, color='c1', method='phase'):
'''
For a given FOV, loads the precomputed empty stack and does subtraction on
all peaks in the FOV designated to be analyzed
Parameters
----------
color : string, 'c1', 'c2', etc.
This is the channel to subtraction. will be appended to the word empty.
Called by
mm3_Subtract.py
Calls
mm3.subtract_phase
'''
information('Subtracting peaks for FOV %d.' % fov_id)
# load empty stack feed dummy peak number to get empty
avg_empty_stack = load_stack(fov_id, 0, color='empty_{}'.format(color))
# determine which peaks are to be analyzed
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1: # 0 means it should be used for empty, -1 is ignore
ana_peak_ids.append(peak_id)
ana_peak_ids = sorted(ana_peak_ids) # sort for repeatability
information("Subtracting %d channels for FOV %d." % (len(ana_peak_ids), fov_id))
# just break if there are to peaks to analize
if not ana_peak_ids:
return False
# load images for the peak and get phase images
for peak_id in ana_peak_ids:
information('Subtracting peak %d.' % peak_id)
image_data = load_stack(fov_id, peak_id, color=color)
# make a list for all time points to send to a multiprocessing pool
# list will length of image_data with tuples (image, empty)
subtract_pairs = zip(image_data, avg_empty_stack)
# # set up multiprocessing pool to do subtraction. Should wait until finished
# pool = Pool(processes=params['num_analyzers'])
# if method == 'phase':
# subtracted_imgs = pool.map(subtract_phase, subtract_pairs, chunksize=10)
# elif method == 'fluor':
# subtracted_imgs = pool.map(subtract_fluor, subtract_pairs, chunksize=10)
# pool.close() # tells the process nothing more will be added.
# pool.join() # blocks script until everything has been processed and workers exit
# linear loop for debug
subtracted_imgs = [subtract_phase(subtract_pair) for subtract_pair in subtract_pairs]
# stack them up along a time axis
subtracted_stack = np.stack(subtracted_imgs, axis=0)
# save out the subtracted stack
if params['output'] == 'TIFF':
sub_filename = params['experiment_name'] + '_xy%03d_p%04d_sub_%s.tif' % (fov_id, peak_id, color)
tiff.imsave(os.path.join(params['sub_dir'],sub_filename), subtracted_stack, compress=4) # save it
if fov_id==1 and peak_id<50:
napari.current_viewer().add_image(subtracted_stack, name='Subtracted' + '_xy1_p'+str(peak_id)+'_sub_'+str(color)+'.tif', visible=True)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put subtracted channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_sub_%s' % (peak_id, color) in h5g:
del h5g['p%04d_sub_%s' % (peak_id, color)]
h5ds = h5g.create_dataset(u'p%04d_sub_%s' % (peak_id, color),
data=subtracted_stack,
chunks=(1, subtracted_stack.shape[1], subtracted_stack.shape[2]),
maxshape=(None, subtracted_stack.shape[1], subtracted_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
information("Saved subtracted channel %d." % peak_id)
if params['output'] == 'HDF5':
h5f.close()
return True
# subtracts one phase contrast image from another.
def subtract_phase(image_pair):
'''subtract_phase aligns and subtracts a .
Modified from subtract_phase_only by jt on 20160511
The subtracted image returned is the same size as the image given. It may however include
data points around the edge that are meaningless but not marked.
We align the empty channel to the phase channel, then subtract.
Parameters
image_pair : tuple of length two with; (image, empty_mean)
Returns
channel_subtracted : np.array
The subtracted image
Called by
subtract_fov_stack
'''
# get out data and pad
cropped_channel, empty_channel = image_pair # [channel slice, empty slice]
# this is for aligning the empty channel to the cell channel.
### Pad cropped channel.
pad_size = params['subtract']['alignment_pad'] # pixel size to use for padding (ammount that alignment could be off)
padded_chnl = np.pad(cropped_channel, pad_size, mode='reflect')
# ### Align channel to empty using match template.
# use match template to get a correlation array and find the position of maximum overlap
match_result = match_template(padded_chnl, empty_channel)
# get row and colum of max correlation value in correlation array
y, x = np.unravel_index(np.argmax(match_result), match_result.shape)
# pad the empty channel according to alignment to be overlayed on padded channel.
empty_paddings = [[y, padded_chnl.shape[0] - (y + empty_channel.shape[0])],
[x, padded_chnl.shape[1] - (x + empty_channel.shape[1])]]
aligned_empty = np.pad(empty_channel, empty_paddings, mode='reflect')
# now trim it off so it is the same size as the original channel
aligned_empty = aligned_empty[pad_size:-1*pad_size, pad_size:-1*pad_size]
### Compute the difference between the empty and channel phase contrast images
# subtract cropped cell image from empty channel.
channel_subtracted = aligned_empty.astype('int32') - cropped_channel.astype('int32')
# channel_subtracted = cropped_channel.astype('int32') - aligned_empty.astype('int32')
# just zero out anything less than 0. This is what Sattar does
channel_subtracted[channel_subtracted < 0] = 0
channel_subtracted = channel_subtracted.astype('uint16') # change back to 16bit
return channel_subtracted
# subtract one fluorescence image from another.
def subtract_fluor(image_pair):
''' subtract_fluor does a simple subtraction of one image to another. Unlike subtract_phase,
there is no alignment. Also, the empty channel is subtracted from the full channel.
Parameters
image_pair : tuple of length two with; (image, empty_mean)
Returns
channel_subtracted : np.array
The subtracted image.
Called by
subtract_fov_stack
'''
# get out data and pad
cropped_channel, empty_channel = image_pair # [channel slice, empty slice]
# check frame size of cropped channel and background, always keep crop channel size the same
crop_size = np.shape(cropped_channel)[:2]
empty_size = np.shape(empty_channel)[:2]
if crop_size != empty_size:
if crop_size[0] > empty_size[0] or crop_size[1] > empty_size[1]:
pad_row_length = max(crop_size[0] - empty_size[0], 0) # prevent negatives
pad_column_length = max(crop_size[1] - empty_size[1], 0)
empty_channel = np.pad(empty_channel,
[[np.int(.5*pad_row_length), pad_row_length-np.int(.5*pad_row_length)],
[np.int(.5*pad_column_length), pad_column_length-np.int(.5*pad_column_length)],
[0,0]], 'edge')
# mm3.information('size adjusted 1')
empty_size = np.shape(empty_channel)[:2]
if crop_size[0] < empty_size[0] or crop_size[1] < empty_size[1]:
empty_channel = empty_channel[:crop_size[0], :crop_size[1],]
### Compute the difference between the empty and channel phase contrast images
# subtract cropped cell image from empty channel.
channel_subtracted = cropped_channel.astype('int32') - empty_channel.astype('int32')
# channel_subtracted = cropped_channel.astype('int32') - aligned_empty.astype('int32')
# just zero out anything less than 0.
channel_subtracted[channel_subtracted < 0] = 0
channel_subtracted = channel_subtracted.astype('uint16') # change back to 16bit
return channel_subtracted
### functions that deal with segmentation and lineages
# Do segmentation for an channel time stack
def segment_chnl_stack(fov_id, peak_id):
'''
For a given fov and peak (channel), do segmentation for all images in the
subtracted .tif stack.
Called by
mm3_Segment.py
Calls
mm3.segment_image
'''
information('Segmenting FOV %d, channel %d.' % (fov_id, peak_id))
# load subtracted images
sub_stack = load_stack(fov_id, peak_id, color='sub_{}'.format(params['phase_plane']))
# set up multiprocessing pool to do segmentation. Will do everything before going on.
#pool = Pool(processes=params['num_analyzers'])
# send the 3d array to multiprocessing
#segmented_imgs = pool.map(segment_image, sub_stack, chunksize=8)
#pool.close() # tells the process nothing more will be added.
#pool.join() # blocks script until everything has been processed and workers exit
# image by image for debug
segmented_imgs = []
for sub_image in sub_stack:
segmented_imgs.append(segment_image(sub_image))
# stack them up along a time axis
segmented_imgs = np.stack(segmented_imgs, axis=0)
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stack
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['seg_dir'],seg_filename),
segmented_imgs, compress=5)
if fov_id==1 and peak_id<50:
napari.current_viewer().add_image(segmented_imgs, name='Segmented' + '_xy1_p'+str(peak_id)+'_sub_'+str(params['seg_img'])+'.tif', visible=True)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
information("Saved segmented channel %d." % peak_id)
return True
# segmentation algorithm
def segment_image(image):
'''Segments a subtracted image and returns a labeled image
Parameters
image : a ndarray which is an image. This should be the subtracted image
Returns
labeled_image : a ndarray which is also an image. Labeled values, which
should correspond to cells, all have the same integer value starting with 1.
Non labeled area should have value zero.
'''
# load in segmentation parameters
OTSU_threshold = params['segment']['otsu']['OTSU_threshold']
first_opening_size = params['segment']['otsu']['first_opening_size']
distance_threshold = params['segment']['otsu']['distance_threshold']
second_opening_size = params['segment']['otsu']['second_opening_size']
min_object_size = params['segment']['otsu']['min_object_size']
# threshold image
try:
thresh = threshold_otsu(image) # finds optimal OTSU threshhold value
except:
return np.zeros_like(image)
threshholded = image > OTSU_threshold*thresh # will create binary image
# if there are no cells, good to clear the border
# because otherwise the OTSU is just for random bullshit, most
# likely on the side of the image
threshholded = segmentation.clear_border(threshholded)
# Opening = erosion then dialation.
# opening smooths images, breaks isthmuses, and eliminates protrusions.
# "opens" dark gaps between bright features.
morph = morphology.binary_opening(threshholded, morphology.disk(first_opening_size))
# if this image is empty at this point (likely if there were no cells), just return
# zero array
if np.amax(morph) == 0:
return np.zeros_like(image)
### Calculate distance matrix, use as markers for random walker (diffusion watershed)
# Generate the markers based on distance to the background
distance = ndi.distance_transform_edt(morph)
# threshold distance image
distance_thresh = np.zeros_like(distance)
distance_thresh[distance < distance_threshold] = 0
distance_thresh[distance >= distance_threshold] = 1
# do an extra opening on the distance
distance_opened = morphology.binary_opening(distance_thresh,
morphology.disk(second_opening_size))
# remove artifacts connected to image border
cleared = segmentation.clear_border(distance_opened)
# remove small objects. Remove small objects wants a
# labeled image and will fail if there is only one label. Return zero image in that case
# could have used try/except but remove_small_objects loves to issue warnings.
cleared, label_num = morphology.label(cleared, connectivity=1, return_num=True)
if label_num > 1:
cleared = morphology.remove_small_objects(cleared, min_size=min_object_size)
else:
# if there are no labels, then just return the cleared image as it is zero
return np.zeros_like(image)
# relabel now that small objects and labels on edges have been cleared
markers = morphology.label(cleared, connectivity=1)
# just break if there is no label
if np.amax(markers) == 0:
return np.zeros_like(image)
# the binary image for the watershed, which uses the unmodified OTSU threshold
threshholded_watershed = threshholded
threshholded_watershed = segmentation.clear_border(threshholded_watershed)
# label using the random walker (diffusion watershed) algorithm
try:
# set anything outside of OTSU threshold to -1 so it will not be labeled
markers[threshholded_watershed == 0] = -1
# here is the main algorithm
labeled_image = segmentation.random_walker(-1*image, markers)
# put negative values back to zero for proper image
labeled_image[labeled_image == -1] = 0
except:
return np.zeros_like(image)
return labeled_image
# loss functions for model
def dice_coeff(y_true, y_pred):
smooth = 1.
# Flatten
y_true_f = tf.reshape(y_true, [-1])
y_pred_f = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
def tversky_loss(y_true, y_pred):
alpha = 0.5
beta = 0.5
ones = K.ones((512,512,3)) #K.ones(K.shape(y_true))
p0 = y_pred # proba that voxels are class i
p1 = ones-y_pred # proba that voxels are not class i
g0 = y_true
g1 = ones-y_true
num = K.sum(p0*g0, (0,1,2))
den = num + alpha*K.sum(p0*g1,(0,1,2)) + beta*K.sum(p1*g0,(0,1,2))
T = K.sum(num/den) # when summing over classes, T has dynamic range [0 Ncl]
Ncl = K.cast(K.shape(y_true)[-1], 'float32')
return Ncl-T
def cce_tversky_loss(y_true, y_pred):
loss = losses.categorical_crossentropy(y_true, y_pred) + tversky_loss(y_true, y_pred)
return loss
def get_pad_distances(unet_shape, img_height, img_width):
'''Finds padding and trimming sizes to make the input image the same as the size expected by the U-net model.
Padding is done evenly to the top and bottom of the image. Trimming is only done from the right or bottom.
'''
half_width_pad = (unet_shape[1]-img_width)/2
if half_width_pad > 0:
left_pad = int(np.floor(half_width_pad))
right_pad = int(np.ceil(half_width_pad))
right_trim = 0
else:
left_pad = 0
right_pad = 0
right_trim = img_width - unet_shape[1]
half_height_pad = (unet_shape[0]-img_height)/2
if half_height_pad > 0:
top_pad = int(np.floor(half_height_pad))
bottom_pad = int(np.ceil(half_height_pad))
bottom_trim = 0
else:
top_pad = 0
bottom_pad = 0
bottom_trim = img_height - unet_shape[0]
pad_dict = {'top_pad' : top_pad,
'bottom_pad' : bottom_pad,
'right_pad' : right_pad,
'left_pad' : left_pad,
'bottom_trim' : bottom_trim,
'right_trim' : right_trim}
return pad_dict
#@profile
def segment_cells_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model):
batch_size = params['segment']['batch_size']
cellClassThreshold = params['segment']['cell_class_threshold']
if cellClassThreshold == 'None': # yaml imports None as a string
cellClassThreshold = False
min_object_size = params['segment']['min_object_size']
# arguments to data generator
# data_gen_args = {'batch_size':batch_size,
# 'n_channels':1,
# 'normalize_to_one':False,
# 'shuffle':False}
# arguments to predict_generator
predict_args = dict(use_multiprocessing=True,
workers=params['num_analyzers'],
verbose=1)
for peak_id in ana_peak_ids:
information('Segmenting peak {}.'.format(peak_id))
img_stack = load_stack(fov_id, peak_id, color=params['phase_plane'])
if params['segment']['normalize_to_one']:
med_stack = np.zeros(img_stack.shape)
selem = morphology.disk(1)
for frame_idx in range(img_stack.shape[0]):
tmpImg = img_stack[frame_idx,...]
med_stack[frame_idx,...] = median(tmpImg, selem)
# robust normalization of peak's image stack to 1
max_val = np.max(med_stack)
img_stack = img_stack/max_val
img_stack[img_stack > 1] = 1
# trim and pad image to correct size
img_stack = img_stack[:, :unet_shape[0], :unet_shape[1]]
img_stack = np.pad(img_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])),
mode='constant')
img_stack = np.expand_dims(img_stack, -1) # TF expects images to be 4D
# set up image generator
# image_generator = CellSegmentationDataGenerator(img_stack, **data_gen_args)
image_datagen = ImageDataGenerator()
image_generator = image_datagen.flow(x=img_stack,
batch_size=batch_size,
shuffle=False) # keep same order
# predict cell locations. This has multiprocessing built in but I need to mess with the parameters to see how to best utilize it. ***
predictions = model.predict_generator(image_generator, **predict_args)
# post processing
# remove padding including the added last dimension
predictions = predictions[:, pad_dict['top_pad']:unet_shape[0]-pad_dict['bottom_pad'],
pad_dict['left_pad']:unet_shape[1]-pad_dict['right_pad'], 0]
# pad back incase the image had been trimmed
predictions = np.pad(predictions,
((0,0),
(0,pad_dict['bottom_trim']),
(0,pad_dict['right_trim'])),
mode='constant')
if params['segment']['save_predictions']:
pred_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['pred_img'])
if not os.path.isdir(params['pred_dir']):
os.makedirs(params['pred_dir'])
int_preds = (predictions * 255).astype('uint8')
tiff.imsave(os.path.join(params['pred_dir'], pred_filename),
int_preds, compress=4)
# binarized and label (if there is a threshold value, otherwise, save a grayscale for debug)
if cellClassThreshold:
predictions[predictions >= cellClassThreshold] = 1
predictions[predictions < cellClassThreshold] = 0
predictions = predictions.astype('uint8')
segmented_imgs = np.zeros(predictions.shape, dtype='uint8')
# process and label each frame of the channel
for frame in range(segmented_imgs.shape[0]):
# get rid of small holes
predictions[frame,:,:] = morphology.remove_small_holes(predictions[frame,:,:], min_object_size)
# get rid of small objects.
predictions[frame,:,:] = morphology.remove_small_objects(morphology.label(predictions[frame,:,:], connectivity=1), min_size=min_object_size)
# remove labels which touch the boarder
predictions[frame,:,:] = segmentation.clear_border(predictions[frame,:,:])
# relabel now
segmented_imgs[frame,:,:] = morphology.label(predictions[frame,:,:], connectivity=1)
else: # in this case you just want to scale the 0 to 1 float image to 0 to 255
information('Converting predictions to grayscale.')
segmented_imgs = np.around(predictions * 100)
# both binary and grayscale should be 8bit. This may be ensured above and is unneccesary
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stacks
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['seg_dir'], seg_filename),
segmented_imgs, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
#@profile
def segment_fov_unet(fov_id, specs, model, color=None):
'''
Segments the channels from one fov using the U-net CNN model.
Parameters
----------
fov_id : int
specs : dict
model : TensorFlow model
'''
information('Segmenting FOV {} with U-net.'.format(fov_id))
if color is None:
color = params['phase_plane']
# load segmentation parameters
unet_shape = (params['segment']['trained_model_image_height'],
params['segment']['trained_model_image_width'])
### determine stitching of images.
# need channel shape, specifically the width. load first for example
# this assumes that all channels are the same size for this FOV, which they should
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
break # just break out with the current peak_id
img_stack = load_stack(fov_id, peak_id, color=color)
img_height = img_stack.shape[1]
img_width = img_stack.shape[2]
pad_dict = get_pad_distances(unet_shape, img_height, img_width)
# dermine how many channels we have to analyze for this FOV
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
ana_peak_ids.append(peak_id)
ana_peak_ids.sort() # sort for repeatability
#ana_peak_ids = ana_peak_ids[:2]
segment_cells_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model)
information("Finished segmentation for FOV {}.".format(fov_id))
return
def segment_foci_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model):
# batch_size = params['foci']['batch_size']
focusClassThreshold = params['foci']['focus_threshold']
if focusClassThreshold == 'None': # yaml imports None as a string
focusClassThreshold = False
# arguments to data generator
data_gen_args = {'batch_size':params['foci']['batch_size'],
'n_channels':1,
'normalize_to_one':False,
'shuffle':False}
# arguments to predict_generator
predict_args = dict(use_multiprocessing=False,
# workers=params['num_analyzers'],
verbose=1)
for peak_id in ana_peak_ids:
information('Segmenting foci in peak {}.'.format(peak_id))
# print(peak_id) # debugging a shape error at some traps
img_stack = load_stack(fov_id, peak_id, color=params['foci']['foci_plane'])
# pad image to correct size
img_stack = np.pad(img_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])),
mode='constant')
img_stack = np.expand_dims(img_stack, -1)
# set up image generator
image_generator = FocusSegmentationDataGenerator(img_stack, **data_gen_args)
# predict foci locations.
predictions = model.predict_generator(image_generator, **predict_args)
# post processing
# remove padding including the added last dimension
predictions = predictions[:, pad_dict['top_pad']:unet_shape[0]-pad_dict['bottom_pad'],
pad_dict['left_pad']:unet_shape[1]-pad_dict['right_pad'], 0]
if params['foci']['save_predictions']:
pred_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['pred_img'])
if not os.path.isdir(params['foci_pred_dir']):
os.makedirs(params['foci_pred_dir'])
int_preds = (predictions * 255).astype('uint8')
tiff.imsave(os.path.join(params['foci_pred_dir'], pred_filename),
int_preds, compress=4)
# binarized and label (if there is a threshold value, otherwise, save a grayscale for debug)
if focusClassThreshold:
predictions[predictions >= focusClassThreshold] = 1
predictions[predictions < focusClassThreshold] = 0
predictions = predictions.astype('uint8')
segmented_imgs = np.zeros(predictions.shape, dtype='uint8')
# process and label each frame of the channel
for frame in range(segmented_imgs.shape[0]):
# get rid of small holes
# predictions[frame,:,:] = morphology.remove_small_holes(predictions[frame,:,:], min_object_size)
# get rid of small objects.
# predictions[frame,:,:] = morphology.remove_small_objects(morphology.label(predictions[frame,:,:], connectivity=1), min_size=min_object_size)
# remove labels which touch the boarder
predictions[frame,:,:] = segmentation.clear_border(predictions[frame,:,:])
# relabel now
segmented_imgs[frame,:,:] = morphology.label(predictions[frame,:,:], connectivity=2)
else: # in this case you just want to scale the 0 to 1 float image to 0 to 255
information('Converting predictions to grayscale.')
segmented_imgs = np.around(predictions * 100)
# both binary and grayscale should be 8bit. This may be ensured above and is unneccesary
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stacks
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['foci_seg_dir'], seg_filename),
segmented_imgs, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
def segment_fov_foci_unet(fov_id, specs, model, color=None):
'''
Segments the channels from one fov using the U-net CNN model.
Parameters
----------
fov_id : int
specs : dict
model : TensorFlow model
'''
information('Segmenting FOV {} with U-net.'.format(fov_id))
if color is None:
color = params['phase_plane']
# load segmentation parameters
unet_shape = (params['segment']['trained_model_image_height'],
params['segment']['trained_model_image_width'])
### determine stitching of images.
# need channel shape, specifically the width. load first for example
# this assumes that all channels are the same size for this FOV, which they should
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
break # just break out with the current peak_id
img_stack = load_stack(fov_id, peak_id, color=color)
img_height = img_stack.shape[1]
img_width = img_stack.shape[2]
# find padding and trimming distances
pad_dict = get_pad_distances(unet_shape, img_height, img_width)
# timepoints = img_stack.shape[0]
# dermine how many channels we have to analyze for this FOV
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
ana_peak_ids.append(peak_id)
ana_peak_ids.sort() # sort for repeatability
k = segment_foci_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model)
information("Finished segmentation for FOV {}.".format(fov_id))
return(k)
# class for image generation for predicting cell locations in phase-contrast images
class CellSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
img_array,
batch_size=32,
n_channels=1,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.batch_size = batch_size
self.img_array = img_array
self.img_number = img_array.shape[0]
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return(int(np.ceil(self.img_number / self.batch_size)))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
except IndexError:
X = X[:i,...]
break
# ensure image is uint8
if tmpImg.dtype=="uint16":
tmpImg = tmpImg / 2**16 * 2**8
tmpImg = tmpImg.astype('uint8')
if self.normalize_to_one:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
medImg = median(tmpImg, self.selem)
tmpImg = tmpImg/np.max(medImg)
tmpImg[tmpImg > 1] = 1
X[i,:,:,0] = tmpImg
return (X)
class TemporalCellDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
fileName,
batch_size=32,
dim=(32,32,32),
n_channels=1,
n_classes=10,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.fileName = fileName
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(self.batch_size / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate data
X = self.__data_generation()
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
pass
def __data_generation(self):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.dim[2], self.n_channels))
full_stack = io.imread(self.fileName)
if full_stack.dtype=="uint16":
full_stack = full_stack / 2**16 * 2**8
full_stack = full_stack.astype('uint8')
img_height = full_stack.shape[1]
img_width = full_stack.shape[2]
pad_dict = get_pad_distances(self.dim, img_height, img_width)
full_stack = np.pad(full_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])
),
mode='constant')
full_stack = full_stack.transpose(1,2,0)
# Generate data
for i in range(self.batch_size):
if i == 0:
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,0,0] = full_stack[:,:,0]
for j in range(1,self.dim[2]):
tmpImg[:,:,j,0] = full_stack[:,:,j]
elif i == (self.batch_size - 1):
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,-1,0] = full_stack[:,:,-1]
for j in range(self.dim[2]-1):
tmpImg[:,:,j,0] = full_stack[:,:,j]
else:
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,:,0] = full_stack[:,:,(i-1):(i+2)]
X[i,:,:,:,:] = tmpImg
return X
# class for image generation for predicting cell locations in phase-contrast images
class FocusSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
img_array,
batch_size=32,
n_channels=1,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.batch_size = batch_size
self.img_array = img_array
self.img_number = img_array.shape[0]
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return(int(np.ceil(self.img_number / self.batch_size)))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels), 'uint16')
if self.normalize_to_one:
max_pixels = []
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
if self.normalize_to_one:
# tmpMedian = filters.median(tmpImg, self.selem)
tmpMax = np.max(tmpImg)
max_pixels.append(tmpMax)
except IndexError:
X = X[:i,...]
break
# ensure image is uint8
# if tmpImg.dtype=="uint16":
# tmpImg = tmpImg / 2**16 * 2**8
# tmpImg = tmpImg.astype('uint8')
# if self.normalize_to_one:
# with warnings.catch_warnings():
# warnings.simplefilter('ignore')
# medImg = median(tmpImg, self.selem)
# tmpImg = tmpImg/np.max(medImg)
# tmpImg[tmpImg > 1] = 1
X[i,:,:,0] = tmpImg
if self.normalize_to_one:
channel_max = np.max(max_pixels) / (2**8 - 1)
# print("Channel max: {}".format(channel_max))
# print("Array max: {}".format(np.max(X)))
X = X/channel_max
# print("Normalized array max: {}".format(np.max(X)))
X[X > 1] = 1
return (X)
# class for image generation for predicting trap locations in phase-contrast images
class TrapSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self, img_array, batch_size=32,
n_channels=1, normalize_to_one=False, shuffle=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.img_number = img_array.shape[0]
self.img_array = img_array
self.batch_size = batch_size
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(3)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(self.img_number / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
except IndexError:
X = X[:i,...]
break
if self.normalize_to_one:
medImg = median(tmpImg, self.selem)
tmpImg = medImg/np.max(medImg)
X[i,:,:,0] = tmpImg
return (X)
# class for image generation for classifying traps as good, empty, out-of-focus, or defective
class TrapKymographPredictionDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self, list_fileNames, batch_size=32, dim=(32,32,32), n_channels=1,
n_classes=10, shuffle=False):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.list_fileNames = list_fileNames
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(len(self.list_fileNames) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_fileNames_temp = [self.list_fileNames[k] for k in indexes]
# Generate data
X = self.__data_generation(list_fileNames_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_fileNames))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_fileNames_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i, fName in enumerate(list_fileNames_temp):
# Store sample
tmpImg = io.imread(fName)
tmpImgShape = tmpImg.shape
if tmpImgShape[0] < self.dim[0]:
t_end = tmpImgShape[0]
else:
t_end = self.dim[0]
X[i,:t_end,:,:] = np.expand_dims(tmpImg[:t_end,:,tmpImg.shape[-1]//2], axis=-1)
return X
def absolute_diff(y_true, y_pred):
y_true_sum = K.sum(y_true)
y_pred_sum = K.sum(y_pred)
diff = K.abs(y_pred_sum - y_true_sum)/tf.to_float(tf.size(y_true))
return diff
def all_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred) + absolute_diff(y_true, y_pred)
return loss
def absolute_dice_loss(y_true, y_pred):
loss = dice_loss(y_true, y_pred) + absolute_diff(y_true, y_pred)
return loss
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def f2_m(y_true, y_pred, beta=2):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
numer = (1+beta**2)*recall*precision
denom = recall + (beta**2)*precision + K.epsilon()
return numer/denom
def f_precision_m(y_true, y_pred, beta=0.5):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
numer = (1+beta**2)*recall*precision
denom = recall + (beta**2)*precision + K.epsilon()
return numer/denom
# finds lineages for all peaks in a fov
def make_lineages_fov(fov_id, specs):
'''
For a given fov, create the lineages from the segmented images.
Called by
mm3_Segment.py
Calls
mm3.make_lineage_chnl_stack
'''
ana_peak_ids = [] # channels to be analyzed
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1: # 1 means analyze
ana_peak_ids.append(peak_id)
ana_peak_ids = sorted(ana_peak_ids) # sort for repeatability
information('Creating lineage for FOV %d with %d channels.' % (fov_id, len(ana_peak_ids)))
# just break if there are no peaks to analize
if not ana_peak_ids:
# returning empty dictionary will add nothing to current cells dictionary
return {}
# This is a list of tuples (fov_id, peak_id) to send to the Pool command
fov_and_peak_ids_list = [(fov_id, peak_id) for peak_id in ana_peak_ids]
# set up multiprocessing pool. will complete pool before going on
#pool = Pool(processes=params['num_analyzers'])
# create the lineages for each peak individually
# the output is a list of dictionaries
#lineages = pool.map(make_lineage_chnl_stack, fov_and_peak_ids_list, chunksize=8)
#pool.close() # tells the process nothing more will be added.
#pool.join() # blocks script until everything has been processed and workers exit
# This is the non-parallelized version (useful for debug)
lineages = []
for fov_and_peak_ids in fov_and_peak_ids_list:
lineages.append(make_lineage_chnl_stack(fov_and_peak_ids))
# combine all dictionaries into one dictionary
Cells = {} # create dictionary to hold all information
for cell_dict in lineages: # for all the other dictionaries in the list
Cells.update(cell_dict) # updates Cells with the entries in cell_dict
return Cells
# get number of cells in each frame and total number of pairwise interactions
def get_cell_counts(regionprops_list):
cell_count_list = [len(time_regions) for time_regions in regionprops_list]
interaction_count_list = []
for i,cell_count in enumerate(cell_count_list):
if i+1 == len(cell_count_list):
break
interaction_count_list.append(cell_count*cell_count_list[i+1])
total_cells = np.sum(cell_count_list)
total_interactions = np.sum(interaction_count_list)
return(total_cells, total_interactions, cell_count_list, interaction_count_list)
# get cells' information for track prediction
def gather_interactions_and_events(regionprops_list):
total_cells, total_interactions, cell_count_list, interaction_count_list = get_cell_counts(regionprops_list)
# instantiate an array with a 2x4 array for each pair of cells'
# min_y, max_y, centroid_y, and area
# in reality it would be much, much more efficient to
# look this information up in the data generator at run time
# for now, this will work
pairwise_cell_data = np.zeros((total_interactions,2,5,1))
# make a dictionary, the keys of which will be row indices so that we
# can quickly look up which timepoints/cells correspond to which
# rows of our model's ouput
pairwise_cell_lookup = {}
# populate arrays
interaction_count = 0
cell_count = 0
for frame, frame_regions in enumerate(regionprops_list):
for region in frame_regions:
cell_label = region.label
y,x = region.centroid
bbox = region.bbox
orientation = region.orientation
min_y = bbox[0]
max_y = bbox[2]
area = region.area
cell_label = region.label
cell_info = (min_y, max_y, y, area, orientation)
cell_count += 1
try:
frame_plus_one_regions = regionprops_list[frame+1]
except IndexError as e:
# print(e)
break
for region_plus_one in frame_plus_one_regions:
paired_cell_label = region_plus_one.label
y,x = region_plus_one.centroid
bbox = region_plus_one.bbox
min_y = bbox[0]
max_y = bbox[2]
area = region_plus_one.area
paired_cell_label = region_plus_one.label
pairwise_cell_data[interaction_count,0,:,0] = cell_info
pairwise_cell_data[interaction_count,1,:,0] = (min_y, max_y, y, area, orientation)
pairwise_cell_lookup[interaction_count] = {'frame':frame, 'cell_label':cell_label, 'paired_cell_label':paired_cell_label}
interaction_count += 1
return(pairwise_cell_data, pairwise_cell_lookup)
# look up which cells are interacting according to the track model
def cell_interaction_lookup(predictions, lookup_table):
'''
Accepts prediction matrix and
'''
frame = []
cell_label = []
paired_cell_label = []
interaction_type = []
# loop over rows of predictions
for row_index in range(predictions.shape[0]):
row_predictions = predictions[row_index]
row_relationship = np.where(row_predictions > 0.95)[0]
if row_relationship.size == 0:
continue
elif row_relationship[0] == 3:
continue
elif row_relationship[0] == 0:
interaction_type.append('migration')
elif row_relationship[0] == 1:
interaction_type.append('child')
elif row_relationship[0] == 2:
interaction_type.append('false_join')
frame.append(lookup_table[row_index]['frame'])
cell_label.append(lookup_table[row_index]['cell_label'])
paired_cell_label.append(lookup_table[row_index]['paired_cell_label'])
track_df = pd.DataFrame(data={'frame':frame,
'cell_label':cell_label,
'paired_cell_label':paired_cell_label,
'interaction_type':interaction_type})
return(track_df)
def get_tracking_model_dict():
model_dict = {}
if not 'migrate_model' in model_dict:
model_dict['migrate_model'] = models.load_model(params['tracking']['migrate_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'child_model' in model_dict:
model_dict['child_model'] = models.load_model(params['tracking']['child_model'],
custom_objects={'bce_dice_loss':bce_dice_loss,
'f2_m':f2_m})
if not 'appear_model' in model_dict:
model_dict['appear_model'] = models.load_model(params['tracking']['appear_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'die_model' in model_dict:
model_dict['die_model'] = models.load_model(params['tracking']['die_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'disappear_model' in model_dict:
model_dict['disappear_model'] = models.load_model(params['tracking']['disappear_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'born_model' in model_dict:
model_dict['born_model'] = models.load_model(params['tracking']['born_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
# if not 'zero_cell_model' in model_dict:
# model_dict['zero_cell_model'] = models.load_model(params['tracking']['zero_cell_model'],
# custom_objects={'absolute_dice_loss':absolute_dice_loss,
# 'f2_m':f2_m})
# if not 'one_cell_model' in model_dict:
# model_dict['one_cell_model'] = models.load_model(params['tracking']['one_cell_model'],
# custom_objects={'bce_dice_loss':bce_dice_loss,
# 'f2_m':f2_m})
# if not 'two_cell_model' in model_dict:
# model_dict['two_cell_model'] = models.load_model(params['tracking']['two_cell_model'],
# custom_objects={'all_loss':all_loss,
# 'f2_m':f2_m})
# if not 'geq_three_cell_model' in model_dict:
# model_dict['geq_three_cell_model'] = models.load_model(params['tracking']['geq_three_cell_model'],
# custom_objects={'bce_dice_loss':bce_dice_loss,
# 'f2_m':f2_m})
return(model_dict)
# Creates lineage for a single channel
def make_lineage_chnl_stack(fov_and_peak_id):
'''
Create the lineage for a set of segmented images for one channel. Start by making the regions in the first time points potenial cells. Go forward in time and map regions in the timepoint to the potential cells in previous time points, building the life of a cell. Used basic checks such as the regions should overlap, and grow by a little and not shrink too much. If regions do not link back in time, discard them. If two regions map to one previous region, check if it is a sensible division event.
Parameters
----------
fov_and_peak_ids : tuple.
(fov_id, peak_id)
Returns
-------
Cells : dict
A dictionary of all the cells from this lineage, divided and undivided
'''
# load in parameters
# if leaf regions see no action for longer than this, drop them
lost_cell_time = params['track']['lost_cell_time']
# only cells with y positions below this value will recieve the honor of becoming new
# cells, unless they are daughters of current cells
new_cell_y_cutoff = params['track']['new_cell_y_cutoff']
# only regions with labels less than or equal to this value will be considered to start cells
new_cell_region_cutoff = params['track']['new_cell_region_cutoff']
# get the specific ids from the tuple
fov_id, peak_id = fov_and_peak_id
# start time is the first time point for this series of TIFFs.
start_time_index = min(params['time_table'][fov_id].keys())
information('Creating lineage for FOV %d, channel %d.' % (fov_id, peak_id))
# load segmented data
image_data_seg = load_stack(fov_id, peak_id, color=params['track']['seg_img'])
# image_data_seg = load_stack(fov_id, peak_id, color='seg')
# Calculate all data for all time points.
# this list will be length of the number of time points
regions_by_time = [regionprops(label_image=timepoint) for timepoint in image_data_seg] # removed coordinates='xy'
# Set up data structures.
Cells = {} # Dict that holds all the cell objects, divided and undivided
cell_leaves = [] # cell ids of the current leaves of the growing lineage tree
# go through regions by timepoint and build lineages
# timepoints start with the index of the first image
for t, regions in enumerate(regions_by_time, start=start_time_index):
# if there are cell leaves who are still waiting to be linked, but
# too much time has passed, remove them.
for leaf_id in cell_leaves:
if t - Cells[leaf_id].times[-1] > lost_cell_time:
cell_leaves.remove(leaf_id)
# make all the regions leaves if there are no current leaves
if not cell_leaves:
for region in regions:
if region.centroid[0] < new_cell_y_cutoff and region.label <= new_cell_region_cutoff:
# Create cell and put in cell dictionary
cell_id = create_cell_id(region, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region, t, parent_id=None)
# add thes id to list of current leaves
cell_leaves.append(cell_id)
# Determine if the regions are children of current leaves
else:
### create mapping between regions and leaves
leaf_region_map = {}
leaf_region_map = {leaf_id : [] for leaf_id in cell_leaves}
# get the last y position of current leaves and create tuple with the id
current_leaf_positions = [(leaf_id, Cells[leaf_id].centroids[-1][0]) for leaf_id in cell_leaves]
# go through regions, they will come off in Y position order
for r, region in enumerate(regions):
# create tuple which is cell_id of closest leaf, distance
current_closest = (None, float('inf'))
# check this region against all positions of all current leaf regions,
# find the closest one in y.
for leaf in current_leaf_positions:
# calculate distance between region and leaf
y_dist_region_to_leaf = abs(region.centroid[0] - leaf[1])
# if the distance is closer than before, update
if y_dist_region_to_leaf < current_closest[1]:
current_closest = (leaf[0], y_dist_region_to_leaf)
# update map with the closest region
leaf_region_map[current_closest[0]].append((r, y_dist_region_to_leaf))
# go through the current leaf regions.
# limit by the closest two current regions if there are three regions to the leaf
for leaf_id, region_links in six.iteritems(leaf_region_map):
if len(region_links) > 2:
closest_two_regions = sorted(region_links, key=lambda x: x[1])[:2]
# but sort by region order so top region is first
closest_two_regions = sorted(closest_two_regions, key=lambda x: x[0])
# replace value in dictionary
leaf_region_map[leaf_id] = closest_two_regions
# for the discarded regions, put them as new leaves
# if they are near the closed end of the channel
discarded_regions = sorted(region_links, key=lambda x: x[1])[2:]
for discarded_region in discarded_regions:
region = regions[discarded_region[0]]
if region.centroid[0] < new_cell_y_cutoff and region.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
else:
# since the regions are ordered, none of the remaining will pass
break
### iterate over the leaves, looking to see what regions connect to them.
for leaf_id, region_links in six.iteritems(leaf_region_map):
# if there is just one suggested descendant,
# see if it checks out and append the data
if len(region_links) == 1:
region = regions[region_links[0][0]] # grab the region from the list
# check if the pairing makes sense based on size and position
# this function returns true if things are okay
if check_growth_by_region(Cells[leaf_id], region):
# grow the cell by the region in this case
Cells[leaf_id].grow(region, t)
# there may be two daughters, or maybe there is just one child and a new cell
elif len(region_links) == 2:
# grab these two daughters
region1 = regions[region_links[0][0]]
region2 = regions[region_links[1][0]]
# check_division returns 3 if cell divided,
# 1 if first region is just the cell growing and the second is trash
# 2 if the second region is the cell, and the first is trash
# or 0 if it cannot be determined.
check_division_result = check_division(Cells[leaf_id], region1, region2)
if check_division_result == 3:
# create two new cells and divide the mother
daughter1_id = create_cell_id(region1, t, peak_id, fov_id)
daughter2_id = create_cell_id(region2, t, peak_id, fov_id)
Cells[daughter1_id] = Cell(daughter1_id, region1, t,
parent_id=leaf_id)
Cells[daughter2_id] = Cell(daughter2_id, region2, t,
parent_id=leaf_id)
Cells[leaf_id].divide(Cells[daughter1_id], Cells[daughter2_id], t)
# remove mother from current leaves
cell_leaves.remove(leaf_id)
# add the daughter ids to list of current leaves if they pass cutoffs
if region1.centroid[0] < new_cell_y_cutoff and region1.label <= new_cell_region_cutoff:
cell_leaves.append(daughter1_id)
if region2.centroid[0] < new_cell_y_cutoff and region2.label <= new_cell_region_cutoff:
cell_leaves.append(daughter2_id)
# 1 means that daughter 1 is just a continuation of the mother
# The other region should be a leaf it passes the requirements
elif check_division_result == 1:
Cells[leaf_id].grow(region1, t)
if region2.centroid[0] < new_cell_y_cutoff and region2.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region2, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region2, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
# ditto for 2
elif check_division_result == 2:
Cells[leaf_id].grow(region2, t)
if region1.centroid[0] < new_cell_y_cutoff and region1.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region1, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region1, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
# return the dictionary with all the cells
return Cells
### Cell class and related functions
# this is the object that holds all information for a detection
class Detection():
'''
The Detection is a single detection in a single frame.
'''
# initialize (birth) the cell
def __init__(self, detection_id, region, t):
'''The detection must be given a unique detection_id and passed the region
information from the segmentation
Parameters
__________
detection_id : str
detection_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point
r is region label for that segmentation
Use the function create_detection_id to return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
'''
# create all the attributes
# id
self.id = detection_id
# identification convenience
self.fov = int(detection_id.split('f')[1].split('p')[0])
self.peak = int(detection_id.split('p')[1].split('t')[0])
self.t = t
self.cell_count = 1
# self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
if region is not None:
self.label = region.label
self.bbox = region.bbox
self.area = region.area
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.length = length_tmp
self.width = width_tmp
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volume = (length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 + (4/3) * np.pi * (width_tmp/2)**3
# angle of the fit elipsoid and centroid location
self.orientation = region.orientation
self.centroid = region.centroid
else:
self.label = None
self.bbox = None
self.area = None
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = (None, None)
self.length = None
self.width = None
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volume = None
# angle of the fit elipsoid and centroid location
self.orientation = None
self.centroid = None
# this is the object that holds all information for a cell
class Cell():
'''
The Cell class is one cell that has been born. It is not neccesarily a cell that
has divided.
'''
# initialize (birth) the cell
def __init__(self, cell_id, region, t, parent_id=None):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell_id : str
cell_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point at time of birth
r is region label for that segmentation
Use the function create_cell_id to do return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
parent_id : str
id of the parent if there is one.
'''
# create all the attributes
# id
self.id = cell_id
# identification convenience
self.fov = int(cell_id.split('f')[1].split('p')[0])
self.peak = int(cell_id.split('p')[1].split('t')[0])
self.birth_label = int(cell_id.split('r')[1])
# parent id may be none
self.parent = parent_id
# daughters is updated when cell divides
# if this is none then the cell did not divide
self.daughters = None
# birth and division time
self.birth_time = t
self.division_time = None # filled out if cell divides
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# these are special datatype, as they include information from the daugthers for division
# computed upon division
self.times_w_div = None
self.lengths_w_div = None
self.widths_w_div = None
# this information is the "production" information that
# we want to extract at the end. Some of this is for convenience.
# This is only filled out if a cell divides.
self.sb = None # in um
self.sd = None # this should be combined lengths of daughters, in um
self.delta = None
self.tau = None
self.elong_rate = None
self.septum_position = None
self.width = None
self.death = None
def grow(self, region, t):
'''Append data from a region to this cell.
use cell.times[-1] to get most current value'''
self.times.append(t)
self.abs_times.append(params['time_table'][self.fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
#calculating cell length and width by using <NAME>
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
def die(self, region, t):
'''
Annotate cell as dying from current t to next t.
'''
self.death = t
def divide(self, daughter1, daughter2, t):
'''Divide the cell and update stats.
daugther1 and daugther2 are instances of the Cell class.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.division_time = daughter1.birth_time
# update times
self.times_w_div = self.times + [self.division_time]
self.abs_times.append(params['time_table'][self.fov][self.division_time])
# flesh out the stats for this cell
# size at birth
self.sb = self.lengths[0] * params['pxl2um']
# force the division length to be the combined lengths of the daughters
self.sd = (daughter1.lengths[0] + daughter2.lengths[0]) * params['pxl2um']
# delta is here for convenience
self.delta = self.sd - self.sb
# generation time. Use more accurate times and convert to minutes
self.tau = np.float64((self.abs_times[-1] - self.abs_times[0]) / 60.0)
# include the data points from the daughters
self.lengths_w_div = [l * params['pxl2um'] for l in self.lengths] + [self.sd]
self.widths_w_div = [w * params['pxl2um'] for w in self.widths] + [((daughter1.widths[0] + daughter2.widths[0])/2) * params['pxl2um']]
# volumes for all timepoints, in um^3
self.volumes_w_div = []
for i in range(len(self.lengths_w_div)):
self.volumes_w_div.append((self.lengths_w_div[i] - self.widths_w_div[i]) *
np.pi * (self.widths_w_div[i]/2)**2 +
(4/3) * np.pi * (self.widths_w_div[i]/2)**3)
# calculate elongation rate.
try:
times = np.float64((np.array(self.abs_times) - self.abs_times[0]) / 60.0)
log_lengths = np.float64(np.log(self.lengths_w_div))
p = np.polyfit(times, log_lengths, 1) # this wants float64
self.elong_rate = p[0] * 60.0 # convert to hours
except:
self.elong_rate = np.float64('NaN')
warning('Elongation rate calculate failed for {}.'.format(self.id))
# calculate the septum position as a number between 0 and 1
# which indicates the size of daughter closer to the closed end
# compared to the total size
self.septum_position = daughter1.lengths[0] / (daughter1.lengths[0] + daughter2.lengths[0])
# calculate single width over cell's life
self.width = np.mean(self.widths_w_div)
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.sb = self.sb.astype(convert_to)
self.sd = self.sd.astype(convert_to)
self.delta = self.delta.astype(convert_to)
self.elong_rate = self.elong_rate.astype(convert_to)
self.tau = self.tau.astype(convert_to)
self.septum_position = self.septum_position.astype(convert_to)
self.width = self.width.astype(convert_to)
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.lengths_w_div = [length.astype(convert_to) for length in self.lengths_w_div]
self.widths = [width.astype(convert_to) for width in self.widths]
self.widths_w_div = [width.astype(convert_to) for width in self.widths_w_div]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
self.volumes_w_div = [vol.astype(convert_to) for vol in self.volumes_w_div]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def print_info(self):
'''prints information about the cell'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
class CellTree():
def __init__(self):
self.cells = {}
self.scores = [] # probably needs to be different
self.score = 0
self.cell_id_list = []
def add_cell(self, cell):
self.cells[cell.id] = cell
self.cell_id_list.append(cell.id)
self.cell_id_list.sort()
def update_score(self):
pass
def get_cell(self, cell_id):
return(self.cells[cell_id])
def get_top_from_cell(self, cell_id):
pass
# this is the object that holds all information for a cell
class CellFromGraph():
'''
The CellFromGraph class is one cell that has been born.
It is not neccesarily a cell that has divided.
'''
# initialize (birth) the cell
def __init__(self, cell_id, region, t, parent=None):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell_id : str
cell_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point at time of birth
r is region label for that segmentation
Use the function create_cell_id to do return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
parent_id : str
id of the parent if there is one.
'''
# create all the attributes
# id
self.id = cell_id
# identification convenience
self.fov = int(cell_id.split('f')[1].split('p')[0])
self.peak = int(cell_id.split('p')[1].split('t')[0])
self.birth_label = int(region.label)
self.regions = [region]
# parent is a CellFromGraph object, can be None
self.parent = parent
# daughters is updated when cell divides
# if this is none then the cell did not divide
self.daughters = None
# birth and division time
self.birth_time = t
self.division_time = None # filled out if cell divides
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# these are special datatype, as they include information from the daugthers for division
# computed upon division
self.times_w_div = None
self.lengths_w_div = None
self.widths_w_div = None
# this information is the "production" information that
# we want to extract at the end. Some of this is for convenience.
# This is only filled out if a cell divides.
self.sb = None # in um
self.sd = None # this should be combined lengths of daughters, in um
self.delta = None
self.tau = None
self.elong_rate = None
self.septum_position = None
self.width = None
self.death = None
self.disappear = None
self.area_mean_fluorescence = {}
self.volume_mean_fluorescence = {}
self.total_fluorescence = {}
self.foci = {}
def __len__(self):
return(len(self.times))
def add_parent(self, parent):
self.parent = parent
def grow(self, region, t):
'''Append data from a region to this cell.
use cell.times[-1] to get most current value'''
self.times.append(t)
self.abs_times.append(params['time_table'][self.fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
self.regions.append(region)
#calculating cell length and width by using Feret Diamter
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
def die(self, region, t):
'''
Annotate cell as dying from current t to next t.
'''
self.death = t
def disappears(self, region, t):
'''
Annotate cell as disappearing from current t to next t.
'''
self.disappear = t
def add_daughter(self, daughter, t):
if self.daughters is None:
self.daughters = [daughter]
else:
self.daughters.append(daughter)
assert len(self.daughters) < 3, "Too many daughter cells in cell {}".format(self.id)
# sort daughters by y position, with smaller y-value first.
# this will cause the daughter closer to the closed end of the trap to be listed first.
self.daughters.sort(key=lambda cell: cell.centroids[0][0])
self.divide(t)
def divide(self, t):
'''Divide the cell and update stats.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
# self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.division_time = self.daughters[0].birth_time
# update times
self.times_w_div = self.times + [self.division_time]
self.abs_times.append(params['time_table'][self.fov][self.division_time])
# flesh out the stats for this cell
# size at birth
self.sb = self.lengths[0] * params['pxl2um']
# force the division length to be the combined lengths of the daughters
self.sd = (self.daughters[0].lengths[0] + self.daughters[1].lengths[0]) * params['pxl2um']
# delta is here for convenience
self.delta = self.sd - self.sb
# generation time. Use more accurate times and convert to minutes
self.tau = np.float64((self.abs_times[-1] - self.abs_times[0]) / 60.0)
# include the data points from the daughters
self.lengths_w_div = [l * params['pxl2um'] for l in self.lengths] + [self.sd]
self.widths_w_div = [w * params['pxl2um'] for w in self.widths] + [((self.daughters[0].widths[0] + self.daughters[1].widths[0])/2) * params['pxl2um']]
# volumes for all timepoints, in um^3
self.volumes_w_div = []
for i in range(len(self.lengths_w_div)):
self.volumes_w_div.append((self.lengths_w_div[i] - self.widths_w_div[i]) *
np.pi * (self.widths_w_div[i]/2)**2 +
(4/3) * np.pi * (self.widths_w_div[i]/2)**3)
# calculate elongation rate.
try:
times = np.float64((np.array(self.abs_times) - self.abs_times[0]) / 60.0) # convert times to minutes
log_lengths = np.float64(np.log(self.lengths_w_div))
p = np.polyfit(times, log_lengths, 1) # this wants float64
self.elong_rate = p[0] * 60.0 # convert to hours
except:
self.elong_rate = np.float64('NaN')
warning('Elongation rate calculate failed for {}.'.format(self.id))
# calculate the septum position as a number between 0 and 1
# which indicates the size of daughter closer to the closed end
# compared to the total size
self.septum_position = self.daughters[0].lengths[0] / (self.daughters[0].lengths[0] + self.daughters[1].lengths[0])
# calculate single width over cell's life
self.width = np.mean(self.widths_w_div)
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.sb = self.sb.astype(convert_to)
self.sd = self.sd.astype(convert_to)
self.delta = self.delta.astype(convert_to)
self.elong_rate = self.elong_rate.astype(convert_to)
self.tau = self.tau.astype(convert_to)
self.septum_position = self.septum_position.astype(convert_to)
self.width = self.width.astype(convert_to)
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.lengths_w_div = [length.astype(convert_to) for length in self.lengths_w_div]
self.widths = [width.astype(convert_to) for width in self.widths]
self.widths_w_div = [width.astype(convert_to) for width in self.widths_w_div]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
self.volumes_w_div = [vol.astype(convert_to) for vol in self.volumes_w_div]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def add_focus(self, focus, t):
'''Adds a focus to the cell. See function foci_info_unet'''
self.foci[focus.id] = focus
def print_info(self):
'''prints information about the cell'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
if self.daughters is not None:
print('daughters = {}'.format(', '.join('{}'.format(daughter.id) for daughter in self.daughters)))
if self.parent is not None:
print('parent = {}'.format(self.parent.id))
def make_wide_df(self):
data = {}
data['id'] = self.id
data['fov'] = self.fov
data['trap'] = self.peak
data['parent'] = self.parent
data['child1'] = None
data['child2'] = None
data['division_time'] = self.division_time
data['birth_label'] = self.birth_label
data['birth_time'] = self.birth_time
data['sb'] = self.sb
data['sd'] = self.sd
data['delta'] = self.delta
data['tau'] = self.tau
data['elong_rate'] = self.elong_rate
data['septum_position'] = self.septum_position
data['death'] = self.death
data['disappear'] = self.disappear
if self.daughters is not None:
data['child1'] = self.daughters[0]
if len(self.daughters) == 2:
data['child2'] = self.daughters[1]
df = pd.DataFrame(data, index=[self.id])
return(df)
def make_long_df(self):
data = {}
data['id'] = [self.id]*len(self.times)
data['times'] = self.times
data['length'] = self.lengths
data['volume'] = self.volumes
data['area'] = self.areas
# if a cell divides then there is one extra value in abs_times
if self.division_time is None:
data['seconds'] = self.abs_times
else:
data['seconds'] = self.abs_times[:-1]
# if there is fluorescence data, place it into the dataframe
if len(self.area_mean_fluorescence.keys()) != 0:
for fluorescence_channel in self.area_mean_fluorescence.keys():
data['{}_area_mean_fluorescence'.format(fluorescence_channel)] = self.area_mean_fluorescence[fluorescence_channel]
data['{}_volume_mean_fluorescence'.format(fluorescence_channel)] = self.volume_mean_fluorescence[fluorescence_channel]
data['{}_total_fluorescence'.format(fluorescence_channel)] = self.total_fluorescence[fluorescence_channel]
df = pd.DataFrame(data, index=data['id'])
return(df)
# this is the object that holds all information for a fluorescent focus
# this class can eventually be used in focus tracking, much like the Cell class
# is used for cell tracking
class Focus():
'''
The Focus class holds information on fluorescent foci.
A single focus can be present in multiple different cells.
'''
# initialize the focus
def __init__(self,
cell,
region,
seg_img,
intensity_image,
t):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell : a Cell object
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
seg_img : 2D numpy array
Labelled image of cell segmentations
intensity_image : 2D numpy array
Fluorescence image with foci
'''
# create all the attributes
# id
focus_id = create_focus_id(region,
t,
cell.peak,
cell.fov,
experiment_name=params['experiment_name'])
self.id = focus_id
# identification convenience
self.appear_label = int(region.label)
self.regions = [region]
self.fov = cell.fov
self.peak = cell.peak
# cell is a CellFromGraph object
# cells are added later using the .add_cell method
self.cells = [cell]
# daughters is updated when focus splits
# if this is none then the focus did not split
self.parent = None
self.daughters = None
self.merger_partner = None
# appearance and split time
self.appear_time = t
self.split_time = None # filled out if focus splits
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][cell.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating focus length and width by using Feret Diamter.
# These values are in pixels
# NOTE: in the future, update to straighten a focus an get straightened length/width
# print(region)
length_tmp = region.major_axis_length
width_tmp = region.minor_axis_length
# length_tmp, width_tmp = feretdiameter(region)
# if length_tmp == None:
# warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate focus volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# special information for focci
self.elong_rate = None
self.disappear = None
self.area_mean_fluorescence = []
self.volume_mean_fluorescence = []
self.total_fluorescence = []
self.median_fluorescence = []
self.sd_fluorescence = []
self.disp_l = []
self.disp_w = []
self.calculate_fluorescence(seg_img, intensity_image, region)
def __len__(self):
return(len(self.times))
def __str__(self):
return(self.print_info())
def add_cell(self, cell):
self.cells.append(cell)
def add_parent_focus(self, parent):
self.parent = parent
def merge(self, partner):
self.merger_partner = partner
def grow(self,
region,
t,
seg_img,
intensity_image,
current_cell):
'''Append data from a region to this focus.
use self.times[-1] to get most current value.'''
if current_cell is not self.cells[-1]:
self.add_cell(current_cell)
self.times.append(t)
self.abs_times.append(params['time_table'][self.cells[-1].fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
self.regions.append(region)
#calculating focus length and width by using Feret Diamter
length_tmp = region.major_axis_length
width_tmp = region.minor_axis_length
# length_tmp, width_tmp = feretdiameter(region)
# if length_tmp == None:
# warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
self.calculate_fluorescence(seg_img, intensity_image, region)
def calculate_fluorescence(self,
seg_img,
intensity_image,
region):
total_fluor = np.sum(intensity_image[seg_img == region.label])
self.total_fluorescence.append(total_fluor)
self.area_mean_fluorescence.append(total_fluor/self.areas[-1])
self.volume_mean_fluorescence.append(total_fluor/self.volumes[-1])
self.median_fluorescence.append(np.median(intensity_image[seg_img == region.label]))
self.sd_fluorescence.append(np.std(intensity_image[seg_img == region.label]))
# get the focus' displacement from center of cell
# find x and y position relative to the whole image (convert from small box)
# calculate distance of foci from middle of cell (scikit image)
orientation = region.orientation
if orientation < 0:
orientation = np.pi+orientation
cell_idx = self.cells[-1].times.index(self.times[-1]) # final time in self.times is current time
cell_centroid = self.cells[-1].centroids[cell_idx]
focus_centroid = region.centroid
disp_y = (focus_centroid[0]-cell_centroid[0])*np.sin(orientation) - (focus_centroid[1]-cell_centroid[1])*np.cos(orientation)
disp_x = (focus_centroid[0]-cell_centroid[0])*np.cos(orientation) + (focus_centroid[1]-cell_centroid[1])*np.sin(orientation)
# append foci information to the list
self.disp_l = np.append(self.disp_l, disp_y)
self.disp_w = np.append(self.disp_w, disp_x)
def disappears(self, region, t):
'''
Annotate focus as disappearing from current t to next t.
'''
self.disappear = t
def add_daughter(self, daughter, t):
if self.daughters is None:
self.daughters = [daughter]
else:
self.daughters.append(daughter)
# sort daughters by y position, with smaller y-value first.
# this will cause the daughter closer to the closed end of the trap to be listed first.
self.daughters.sort(key=lambda focus: focus.centroids[0][0])
self.divide(t)
def divide(self, t):
'''Divide the cell and update stats.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
# self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.split_time = self.daughters[0].appear_time
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.widths = [width.astype(convert_to) for width in self.widths]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def print_info(self):
'''prints information about the focus'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
if self.daughters is not None:
print('daughters = {}'.format(', '.join('{}'.format(daughter.id) for daughter in self.daughters)))
if self.cells is not None:
print('cells = {}'.format([cell.id for cell in self.cells]))
def make_wide_df(self):
data = {}
data['id'] = self.id
data['cells'] = self.cells
data['parent'] = self.parent
data['child1'] = None
data['child2'] = None
# data['division_time'] = self.division_time
data['appear_label'] = self.appear_label
data['appear_time'] = self.appear_time
data['disappear'] = self.disappear
if self.daughters is not None:
data['child1'] = self.daughters[0]
if len(self.daughters) == 2:
data['child2'] = self.daughters[1]
df = pd.DataFrame(data, index=[self.id])
return(df)
def make_long_df(self):
data = {}
data['id'] = [self.id]*len(self.times)
data['time'] = self.times
# data['cell'] = self.cells
data['length'] = self.lengths
data['volume'] = self.volumes
data['area'] = self.areas
data['seconds'] = self.abs_times
data['area_mean_fluorescence'] = self.area_mean_fluorescence
data['volume_mean_fluorescence'] = self.volume_mean_fluorescence
data['total_fluorescence'] = self.total_fluorescence
data['median_fluorescence'] = self.median_fluorescence
data['sd_fluorescence'] = self.sd_fluorescence
data['disp_l'] = self.disp_l
data['disp_w'] = self.disp_w
# print(data['id'])
df = pd.DataFrame(data, index=data['id'])
return(df)
class PredictTrackDataGenerator(utils.Sequence):
'''Generates data for running tracking class preditions
Input is a stack of labeled images'''
def __init__(self,
data,
batch_size=32,
dim=(4,5,9)):
'Initialization'
self.batch_size = batch_size
self.data = data
self.dim = dim
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(len(self.data) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate keys of the batch
batch_indices = self.indices[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
X = self.__data_generation(batch_indices)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indices = np.arange(len(self.data))
def __data_generation(self, batch_indices):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
# shape is (batch_size, max_cell_num, frame_num, cell_feature_num, 1)
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.dim[2], 1))
# Generate data
for idx in batch_indices:
start_idx = idx-2
end_idx = idx+3
# print(start_idx, end_idx)
if start_idx < 0:
batch_frame_list = []
for empty_idx in range(abs(start_idx)):
batch_frame_list.append([])
batch_frame_list.extend(self.data[0:end_idx])
elif end_idx > len(self.data):
batch_frame_list = self.data[start_idx:len(self.data)+1]
for empty_idx in range(abs(end_idx - len(self.data))):
batch_frame_list.extend([])
else:
batch_frame_list = self.data[start_idx:end_idx]
for i,frame_region_list in enumerate(batch_frame_list):
# shape is (max_cell_num, frame_num, cell_feature_num)
# tmp_x = np.zeros((self.dim[0], self.dim[1], self.dim[2]))
if not frame_region_list:
continue
for region_idx, region, in enumerate(frame_region_list):
y,x = region.centroid
bbox = region.bbox
orientation = region.orientation
min_y = bbox[0]
max_y = bbox[2]
min_x = bbox[1]
max_x = bbox[3]
area = region.area
length = region.major_axis_length
cell_label = region.label
cell_index = cell_label - 1
cell_info = (min_x, max_x, x, min_y, max_y, y, orientation, area, length)
if region_idx + 1 > self.dim[0]:
continue
# supplement tmp_x at (region_idx, )
# tmp_x[region_idx, i, :] = cell_info
X[idx, cell_index, i, :,0] = cell_info # tmp_x
return X
def get_greatest_score_info(first_node, second_node, graph):
'''A function that is useful for track linking
'''
score_names = [k for k in graph.get_edge_data(first_node, second_node).keys()]
pred_scores = [val['score'] for k,val in graph.get_edge_data(first_node, second_node).items()]
max_score_index = np.argmax(pred_scores)
max_name = score_names[max_score_index]
max_score = pred_scores[max_score_index]
return(max_name, max_score)
def get_score_by_type(first_node, second_node, graph, score_type='child'):
'''A function useful in track linking
'''
pred_score = graph.get_edge_data(first_node, second_node)[score_type]['score']
return(pred_score)
def count_unvisited(G, experiment_name):
count = 0
for node_id in G.nodes:
if node_id.startswith(experiment_name):
if not G.nodes[node_id]['visited']:
count += 1
return(count)
def create_lineages_from_graph(graph,
graph_df,
fov_id,
peak_id,
):
'''
This function iterates through nodes in a graph of detections
to link the nodes as "CellFromGraph" objects, eventually
leading to the ultimate goal of returning
a CellTree object with each cell's information for the experiment.
For now it ignores the number of cells in a detection and simply
assumes a 1:1 relationship between detections and cell number.
'''
# iterate through all nodes in graph
# graph_score = 0
# track_dict = {}
# tracks = CellTree()
tracks = {}
for node_id in graph.nodes:
graph.nodes[node_id]['visited'] = False
graph_df['visited'] = False
num_unvisited = count_unvisited(graph, params['experiment_name'])
while num_unvisited > 0:
# which detection nodes are not yet visited
unvisited_detection_nodes = graph_df[(~(graph_df.visited) & graph_df.node_id.str.startswith(params['experiment_name']))]
# grab the first unvisited node_id from the dataframe
prior_node_id = unvisited_detection_nodes.iloc[0,1]
prior_node_time = graph.nodes[prior_node_id]['time']
prior_node_region = graph.nodes[prior_node_id]['region']
cell_id = create_cell_id(prior_node_region,
prior_node_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
current_cell = CellFromGraph(cell_id,
prior_node_region,
prior_node_time,
parent=None)
if not cell_id in tracks.keys():
tracks[cell_id] = current_cell
else:
current_cell = tracks[cell_id]
# for use later in establishing predecessors
current_node_id = prior_node_id
# set this detection's "visited" status to True in the graph and in the dataframe
graph.nodes[prior_node_id]['visited'] = True
graph_df.iloc[np.where(graph_df.node_id==prior_node_id)[0][0],3] = True
# build current_track list to this detection's node
current_track = collections.deque()
current_track.append(current_node_id)
predecessors_list = [k for k in graph.predecessors(prior_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while len(unvisited_predecessors_list) != 0:
# initialize a scores array to select highest score from the available options
predecessor_scores = np.zeros(len(unvisited_predecessors_list))
# populate array with scores
for i in range(len(unvisited_predecessors_list)):
predecessor_node_id = unvisited_predecessors_list[i]
edge_type, edge_score = get_greatest_score_info(predecessor_node_id, current_node_id, graph)
predecessor_scores[i] = edge_score
# find highest score
max_index = np.argmax(predecessor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
current_node_id = unvisited_predecessors_list[max_index]
current_track.appendleft(current_node_id)
predecessors_list = [k for k in graph.predecessors(current_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while prior_node_id is not 'B':
# which nodes succeed our current node?
successor_node_ids = [node_id for node_id in graph.successors(prior_node_id)]
# keep only the potential successor detections that have not yet been visited
unvisited_node_ids = []
for i,successor_node_id in enumerate(successor_node_ids):
# if it starts with params['experiment_name'], it is a detection node, and not born, appear, etc.
if successor_node_id.startswith(params['experiment_name']):
# if it has been used in the cell track graph, i.e., if 'visited' is True,
# move on. Otherwise, append to our list
if graph.nodes[successor_node_id]['visited']:
continue
else:
unvisited_node_ids.append(successor_node_id)
# if it doesn't start with params['experiment_name'], it is a born, appear, etc., and should always be appended
else:
unvisited_node_ids.append(successor_node_id)
# initialize a scores array to select highest score from the available options
successor_scores = np.zeros(len(unvisited_node_ids))
successor_edge_types = []
# populate array with scores
for i in range(len(unvisited_node_ids)):
successor_node_id = unvisited_node_ids[i]
edge_type, edge_score = get_greatest_score_info(prior_node_id, successor_node_id, graph)
successor_scores[i] = edge_score
successor_edge_types.append(edge_type)
# find highest score
max_score = np.max(successor_scores)
max_index = np.argmax(successor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
next_node_id = unvisited_node_ids[max_index]
max_edge_type = successor_edge_types[max_index]
# if the max_score in successor_scores isn't greater than log(0.1), just make the cell disappear for now.
if max_score < np.log(0.1):
max_edge_type = 'disappear'
next_node_id = [n_id for n_id in unvisited_node_ids if n_id.startswith('disappear')][0]
# if this is a division event, add child node as a new cell,
# add the new cell as a daughter to current_cell,
# add current_cell as a parent to new cell.
# Then, search for the second child cell, add it to current_cell, etc.
if max_edge_type == 'child':
new_cell_time = graph.nodes[next_node_id]['time']
new_cell_region = graph.nodes[next_node_id]['region']
new_cell_id = create_cell_id(new_cell_region,
new_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
new_cell = CellFromGraph(new_cell_id,
new_cell_region,
new_cell_time,
parent=current_cell)
tracks[new_cell_id] = new_cell
current_cell.add_daughter(new_cell, new_cell_time)
# initialize a scores array to select highest score from the available options
unvisited_detection_nodes = [unvisited_node_id for unvisited_node_id in unvisited_node_ids if unvisited_node_id.startswith(params['experiment_name'])]
child_scores = np.zeros(len(unvisited_detection_nodes))
# populate array with scores
for i in range(len(unvisited_detection_nodes)):
successor_node_id = unvisited_detection_nodes[i]
if successor_node_id == next_node_id:
child_scores[i] = -np.inf
continue
child_score = get_score_by_type(prior_node_id, successor_node_id, graph, score_type='child')
child_scores[i] = child_score
try:
second_daughter_score = np.max(child_scores)
# sometimes a second daughter doesn't exist: perhaps parent is at mouth of a trap and one
# daughter is lost to the central channel at division time. In this case, do the following:
if second_daughter_score < np.log(0.5):
current_cell = new_cell
else:
second_daughter_index = np.argmax(child_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
other_daughter_node_id = unvisited_detection_nodes[second_daughter_index]
other_daughter_cell_time = graph.nodes[other_daughter_node_id]['time']
other_daughter_cell_region = graph.nodes[other_daughter_node_id]['region']
other_daughter_cell_id = create_cell_id(other_daughter_cell_region,
other_daughter_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
other_daughter_cell = CellFromGraph(other_daughter_cell_id,
other_daughter_cell_region,
other_daughter_cell_time,
parent=current_cell)
tracks[other_daughter_cell_id] = other_daughter_cell
current_cell.add_daughter(other_daughter_cell, new_cell_time)
# now we remove current_cell, since it's done, and move on to one of the daughters
current_cell = new_cell
# sometimes a second daughter doesn't exist: perhaps parent is at mouth of a trap and one
# daughter is lost to the central channel at division time. In this case, do the following:
except IndexError:
current_cell = new_cell
# if this is a migration, grow the current_cell.
elif max_edge_type == 'migrate':
cell_time = graph.nodes[next_node_id]['time']
cell_region = graph.nodes[next_node_id]['region']
current_cell.grow(cell_region, cell_time)
# if the event represents death, kill the cell
elif max_edge_type == 'die':
if prior_node_id.startswith(params['experiment_name']):
death_time = graph.nodes[prior_node_id]['time']
death_region = graph.nodes[prior_node_id]['region']
current_cell.die(death_region, death_time)
# if the event represents disappearance, end the cell
elif max_edge_type == 'disappear':
if prior_node_id.startswith(params['experiment_name']):
disappear_time = graph.nodes[prior_node_id]['time']
disappear_region = graph.nodes[prior_node_id]['region']
current_cell.disappears(disappear_region, disappear_time)
# set the next node to 'visited'
graph.nodes[next_node_id]['visited'] = True
if next_node_id != 'B':
graph_df.iloc[np.where(graph_df.node_id==next_node_id)[0][0],3] = True
# reset prior_node_id to iterate to next frame and append node_id to current track
prior_node_id = next_node_id
if num_unvisited != count_unvisited(graph, params['experiment_name']):
same_iter_num = 0
else:
same_iter_num += 1
num_unvisited = count_unvisited(graph, params['experiment_name'])
print("{} detections remain unvisited.".format(num_unvisited))
if same_iter_num > 10:
print("WARNING: Ten iterations surpassed without decreasing the number of visited nodes.\n \
Breaking tracking loop now. You should probably not trust these results.")
break
return tracks
def viterbi_create_lineages_from_graph(graph,
graph_df,
fov_id,
peak_id,
):
'''
This function iterates through nodes in a graph of detections
to link the nodes as "CellFromGraph" objects, eventually
leading to the ultimate goal of returning
a maximally-scoring CellTree object with each cell's information for the experiment.
For now it ignores the number of cells in a detection and simply
assumes a 1:1 relationship between detections and cell number.
'''
# iterate through all nodes in G
graph_score = 0
# track_dict = {}
tracks = CellTree()
max_time = np.max([node.timepoint for node in graph.nodes])
print(max_time)
for node_id in graph.nodes:
graph.nodes[node_id]['visited'] = False
graph_df['visited'] = False
num_unvisited = count_unvisited(graph, params['experiment_name'])
for t in range(1,max_time+1):
if t > 1:
prior_time_nodes = time_nodes
if t == 1:
time_nodes = [node for node in G.nodes if node.time == t]
else:
time_nodes = next_time_nodes
if t != max_time:
next_time_nodes = [node for node in G.nodes if node.time == t+1]
for node in time_nodes:
pass
while num_unvisited > 0:
# which detection nodes are not yet visited
unvisited_detection_nodes = graph_df[(~(graph_df.visited) & graph_df.node_id.str.startswith(params['experiment_name']))]
# grab the first unvisited node_id from the dataframe
prior_node_id = unvisited_detection_nodes.iloc[0,1]
prior_node_time = graph.nodes[prior_node_id]['time']
prior_node_region = graph.nodes[prior_node_id]['region']
cell_id = create_cell_id(prior_node_region,
prior_node_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
current_cell = CellFromGraph(cell_id,
prior_node_region,
prior_node_time,
parent=None)
if not cell_id in tracks.cell_id_list:
tracks.add_cell(current_cell)
else:
current_cell = tracks.get_cell(cell_id)
# track_dict_key = prior_node_id
# for use later in establishing predecessors
current_node_id = prior_node_id
# set this detection's "visited" status to True in the graph and in the dataframe
graph.nodes[prior_node_id]['visited'] = True
graph_df.iloc[np.where(graph_df.node_id==prior_node_id)[0][0],3] = True
# build current_track list to this detection's node
current_track = collections.deque()
current_track.append(current_node_id)
predecessors_list = [k for k in graph.predecessors(prior_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while len(unvisited_predecessors_list) != 0:
# initialize a scores array to select highest score from the available options
predecessor_scores = np.zeros(len(unvisited_predecessors_list))
# populate array with scores
for i in range(len(unvisited_predecessors_list)):
predecessor_node_id = unvisited_predecessors_list[i]
edge_type, edge_score = get_greatest_score_info(predecessor_node_id, current_node_id, graph)
predecessor_scores[i] = edge_score
# find highest score
max_index = np.argmax(predecessor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
current_node_id = unvisited_predecessors_list[max_index]
current_track.appendleft(current_node_id)
predecessors_list = [k for k in graph.predecessors(current_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while prior_node_id is not 'B':
# which nodes succeed our current node?
successor_node_ids = [node_id for node_id in graph.successors(prior_node_id)]
# keep only the potential successor detections that have not yet been visited
unvisited_node_ids = []
for i,successor_node_id in enumerate(successor_node_ids):
# if it starts with params['experiment_name'], it is a detection node, and not born, appear, etc.
if successor_node_id.startswith(params['experiment_name']):
# if it has been used in the cell track graph, i.e., if 'visited' is True,
# move on. Otherwise, append to our list
if graph.nodes[successor_node_id]['visited']:
continue
else:
unvisited_node_ids.append(successor_node_id)
# if it doesn't start with params['experiment_name'], it is a born, appear, etc., and should always be appended
else:
unvisited_node_ids.append(successor_node_id)
# initialize a scores array to select highest score from the available options
successor_scores = np.zeros(len(unvisited_node_ids))
successor_edge_types = []
# populate array with scores
for i in range(len(unvisited_node_ids)):
successor_node_id = unvisited_node_ids[i]
edge_type, edge_score = get_greatest_score_info(prior_node_id, successor_node_id, graph)
successor_scores[i] = edge_score
successor_edge_types.append(edge_type)
# find highest score
max_index = np.argmax(successor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
next_node_id = unvisited_node_ids[max_index]
max_edge_type = successor_edge_types[max_index]
# if this is a division event, add child node as a new cell,
# add the new cell as a daughter to current_cell,
# add current_cell as a parent to new cell.
# Then, search for the second child cell, add it to current_cell, etc.
if max_edge_type == 'child':
new_cell_time = graph.nodes[next_node_id]['time']
new_cell_region = graph.nodes[next_node_id]['region']
new_cell_id = create_cell_id(new_cell_region,
new_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
new_cell = CellFromGraph(new_cell_id,
new_cell_region,
new_cell_time,
parent=current_cell)
tracks.add_cell(new_cell)
current_cell.add_daughter(new_cell, new_cell_time)
# print("First daughter", current_cell.id, new_cell.id)
# initialize a scores array to select highest score from the available options
unvisited_detection_nodes = [unvisited_node_id for unvisited_node_id in unvisited_node_ids if unvisited_node_id.startswith(params['experiment_name'])]
child_scores = np.zeros(len(unvisited_detection_nodes))
# populate array with scores
for i in range(len(unvisited_detection_nodes)):
successor_node_id = unvisited_detection_nodes[i]
if successor_node_id == next_node_id:
child_scores[i] = -np.inf
continue
child_score = get_score_by_type(prior_node_id, successor_node_id, graph, score_type='child')
child_scores[i] = child_score
# print(child_scores)
try:
second_daughter_index = np.argmax(child_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
other_daughter_node_id = unvisited_detection_nodes[second_daughter_index]
other_daughter_cell_time = graph.nodes[other_daughter_node_id]['time']
other_daughter_cell_region = graph.nodes[other_daughter_node_id]['region']
other_daughter_cell_id = create_cell_id(other_daughter_cell_region,
other_daughter_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
other_daughter_cell = CellFromGraph(other_daughter_cell_id,
other_daughter_cell_region,
other_daughter_cell_time,
parent=current_cell)
tracks.add_cell(other_daughter_cell)
current_cell.add_daughter(other_daughter_cell, new_cell_time)
# now we remove current_cell, since it's done, and move on to one of the daughters
current_cell = new_cell
# print("Second daughter", current_cell.parent.id, other_daughter_cell.id)
# sometimes a second daughter doesn't exist: perhaps parent is at mouth of a trap and one
# daughter is lost to the central channel at division time. In this case, do the following:
except IndexError:
current_cell = new_cell
# if this is a migration, grow the current_cell.
elif max_edge_type == 'migrate':
cell_time = graph.nodes[next_node_id]['time']
cell_region = graph.nodes[next_node_id]['region']
current_cell.grow(cell_region, cell_time)
# if the event represents death, kill the cell
elif max_edge_type == 'die':
if prior_node_id.startswith(params['experiment_name']):
death_time = graph.nodes[prior_node_id]['time']
death_region = graph.nodes[prior_node_id]['region']
current_cell.die(death_region, death_time)
# if the event represents disappearance, end the cell
elif max_edge_type == 'disappear':
if prior_node_id.startswith(params['experiment_name']):
disappear_time = graph.nodes[prior_node_id]['time']
disappear_region = graph.nodes[prior_node_id]['region']
current_cell.disappears(disappear_region, disappear_time)
# set the next node to 'visited'
graph.nodes[next_node_id]['visited'] = True
if next_node_id != 'B':
graph_df.iloc[np.where(graph_df.node_id==next_node_id)[0][0],3] = True
# reset prior_node_id to iterate to next frame and append node_id to current track
# current_track.append(next_node_id)
prior_node_id = next_node_id
# print(current_cell.id, current_cell.parent.id)
# track_dict[track_dict_key][:] = current_track
if num_unvisited != count_unvisited(graph, params['experiment_name']):
same_iter_num = 0
else:
same_iter_num += 1
num_unvisited = count_unvisited(graph, params['experiment_name'])
print("{} detections remain unvisited.".format(num_unvisited))
if same_iter_num > 10:
break
return(tracks)
def create_lineages_from_graph_2(graph,
graph_df,
fov_id,
peak_id,
):
'''
This function iterates through nodes in a graph of detections
to link the nodes as "CellFromGraph" objects, eventually
leading to the ultimate goal of returning
a CellTree object with each cell's information for the experiment.
For now it ignores the number of cells in a detection and simply
assumes a 1:1 relationship between detections and cell number.
'''
# iterate through all nodes in G
# graph_score = 0
# track_dict = {}
tracks = CellTree()
for node_id in graph.nodes:
graph.nodes[node_id]['visited'] = False
graph_df['visited'] = False
num_unvisited = count_unvisited(graph, params['experiment_name'])
while num_unvisited > 0:
# which detection nodes are not yet visited
unvisited_detection_nodes = graph_df[(~(graph_df.visited) & graph_df.node_id.str.startswith(params['experiment_name']))]
# grab the first unvisited node_id from the dataframe
prior_node_id = unvisited_detection_nodes.iloc[0,1]
prior_node_time = graph.nodes[prior_node_id]['time']
prior_node_region = graph.nodes[prior_node_id]['region']
cell_id = create_cell_id(prior_node_region,
prior_node_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
current_cell = CellFromGraph(cell_id,
prior_node_region,
prior_node_time,
parent=None)
if not cell_id in tracks.cell_id_list:
tracks.add_cell(current_cell)
else:
current_cell = tracks.get_cell(cell_id)
# track_dict_key = prior_node_id
# for use later in establishing predecessors
current_node_id = prior_node_id
# set this detection's "visited" status to True in the graph and in the dataframe
graph.nodes[prior_node_id]['visited'] = True
graph_df.iloc[np.where(graph_df.node_id==prior_node_id)[0][0],3] = True
# build current_track list to this detection's node
current_track = collections.deque()
current_track.append(current_node_id)
predecessors_list = [k for k in graph.predecessors(prior_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while len(unvisited_predecessors_list) != 0:
# initialize a scores array to select highest score from the available options
predecessor_scores = np.zeros(len(unvisited_predecessors_list))
# populate array with scores
for i in range(len(unvisited_predecessors_list)):
predecessor_node_id = unvisited_predecessors_list[i]
edge_type, edge_score = get_greatest_score_info(predecessor_node_id, current_node_id, graph)
predecessor_scores[i] = edge_score
# find highest score
max_index = np.argmax(predecessor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
current_node_id = unvisited_predecessors_list[max_index]
current_track.appendleft(current_node_id)
predecessors_list = [k for k in graph.predecessors(current_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while prior_node_id is not 'B':
# which nodes succeed our current node?
successor_node_ids = [node_id for node_id in graph.successors(prior_node_id)]
# keep only the potential successor detections that have not yet been visited
unvisited_node_ids = []
for i,successor_node_id in enumerate(successor_node_ids):
# if it starts with params['experiment_name'], it is a detection node, and not born, appear, etc.
if successor_node_id.startswith(params['experiment_name']):
# if it has been used in the cell track graph, i.e., if 'visited' is True,
# move on. Otherwise, append to our list
if graph.nodes[successor_node_id]['visited']:
continue
else:
unvisited_node_ids.append(successor_node_id)
# if it doesn't start with params['experiment_name'], it is a born, appear, etc., and should always be appended
else:
unvisited_node_ids.append(successor_node_id)
# initialize a scores array to select highest score from the available options
successor_scores = np.zeros(len(unvisited_node_ids))
successor_edge_types = []
# populate array with scores
for i in range(len(unvisited_node_ids)):
successor_node_id = unvisited_node_ids[i]
edge_type, edge_score = get_greatest_score_info(prior_node_id, successor_node_id, graph)
successor_scores[i] = edge_score
successor_edge_types.append(edge_type)
# find highest score
max_index = np.argmax(successor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
next_node_id = unvisited_node_ids[max_index]
max_edge_type = successor_edge_types[max_index]
# if this is a division event, add child node as a new cell,
# add the new cell as a daughter to current_cell,
# add current_cell as a parent to new cell.
# Then, search for the second child cell, add it to current_cell, etc.
if max_edge_type == 'child':
new_cell_time = graph.nodes[next_node_id]['time']
new_cell_region = graph.nodes[next_node_id]['region']
new_cell_id = create_cell_id(new_cell_region,
new_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
new_cell = CellFromGraph(new_cell_id,
new_cell_region,
new_cell_time,
parent=current_cell)
tracks.add_cell(new_cell)
current_cell.add_daughter(new_cell, new_cell_time)
# print("First daughter", current_cell.id, new_cell.id)
# initialize a scores array to select highest score from the available options
unvisited_detection_nodes = [unvisited_node_id for unvisited_node_id in unvisited_node_ids if unvisited_node_id.startswith(params['experiment_name'])]
child_scores = np.zeros(len(unvisited_detection_nodes))
# populate array with scores
for i in range(len(unvisited_detection_nodes)):
successor_node_id = unvisited_detection_nodes[i]
if successor_node_id == next_node_id:
child_scores[i] = -np.inf
continue
child_score = get_score_by_type(prior_node_id, successor_node_id, graph, score_type='child')
child_scores[i] = child_score
# print(child_scores)
try:
second_daughter_index = np.argmax(child_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
other_daughter_node_id = unvisited_detection_nodes[second_daughter_index]
other_daughter_cell_time = graph.nodes[other_daughter_node_id]['time']
other_daughter_cell_region = graph.nodes[other_daughter_node_id]['region']
other_daughter_cell_id = create_cell_id(other_daughter_cell_region,
other_daughter_cell_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
other_daughter_cell = CellFromGraph(other_daughter_cell_id,
other_daughter_cell_region,
other_daughter_cell_time,
parent=current_cell)
tracks.add_cell(other_daughter_cell)
current_cell.add_daughter(other_daughter_cell, new_cell_time)
# now we remove current_cell, since it's done, and move on to one of the daughters
current_cell = new_cell
# print("Second daughter", current_cell.parent.id, other_daughter_cell.id)
# sometimes a second daughter doesn't exist: perhaps parent is at mouth of a trap and one
# daughter is lost to the central channel at division time. In this case, do the following:
except IndexError:
current_cell = new_cell
# if this is a migration, grow the current_cell.
elif max_edge_type == 'migrate':
cell_time = graph.nodes[next_node_id]['time']
cell_region = graph.nodes[next_node_id]['region']
current_cell.grow(cell_region, cell_time)
# if the event represents death, kill the cell
elif max_edge_type == 'die':
if prior_node_id.startswith(params['experiment_name']):
death_time = graph.nodes[prior_node_id]['time']
death_region = graph.nodes[prior_node_id]['region']
current_cell.die(death_region, death_time)
# if the event represents disappearance, end the cell
elif max_edge_type == 'disappear':
if prior_node_id.startswith(params['experiment_name']):
disappear_time = graph.nodes[prior_node_id]['time']
disappear_region = graph.nodes[prior_node_id]['region']
current_cell.disappears(disappear_region, disappear_time)
# set the next node to 'visited'
graph.nodes[next_node_id]['visited'] = True
if next_node_id != 'B':
graph_df.iloc[np.where(graph_df.node_id==next_node_id)[0][0],3] = True
# reset prior_node_id to iterate to next frame and append node_id to current track
# current_track.append(next_node_id)
prior_node_id = next_node_id
# print(current_cell.id, current_cell.parent.id)
# track_dict[track_dict_key][:] = current_track
if num_unvisited != count_unvisited(graph, params['experiment_name']):
same_iter_num = 0
else:
same_iter_num += 1
num_unvisited = count_unvisited(graph, params['experiment_name'])
print("{} detections remain unvisited.".format(num_unvisited))
if same_iter_num > 10:
break
return(tracks)
# obtains cell length and width of the cell using the feret diameter
def feretdiameter(region):
'''
feretdiameter calculates the length and width of the binary region shape. The cell orientation
from the ellipsoid is used to find the major and minor axis of the cell.
See https://en.wikipedia.org/wiki/Feret_diameter.
'''
# y: along vertical axis of the image; x: along horizontal axis of the image;
# calculate the relative centroid in the bounding box (non-rotated)
# print(region.centroid)
y0, x0 = region.centroid
y0 = y0 - np.int16(region.bbox[0]) + 1
x0 = x0 - np.int16(region.bbox[1]) + 1
cosorient = np.cos(region.orientation)
sinorient = np.sin(region.orientation)
# print(cosorient, sinorient)
amp_param = 1.2 #amplifying number to make sure the axis is longer than actual cell length
# coordinates relative to bounding box
# r_coords = region.coords - [np.int16(region.bbox[0]), np.int16(region.bbox[1])]
# limit to perimeter coords. pixels are relative to bounding box
region_binimg = np.pad(region.image, 1, 'constant') # pad region binary image by 1 to avoid boundary non-zero pixels
distance_image = ndi.distance_transform_edt(region_binimg)
r_coords = np.where(distance_image == 1)
r_coords = list(zip(r_coords[0], r_coords[1]))
# coordinates are already sorted by y. partion into top and bottom to search faster later
# if orientation > 0, L1 is closer to top of image (lower Y coord)
if region.orientation > 0:
L1_coords = r_coords[:int(np.round(len(r_coords)/4))]
L2_coords = r_coords[int(np.round(len(r_coords)/4)):]
else:
L1_coords = r_coords[int(np.round(len(r_coords)/4)):]
L2_coords = r_coords[:int(np.round(len(r_coords)/4))]
#####################
# calculte cell length
L1_pt = np.zeros((2,1))
L2_pt = np.zeros((2,1))
# define the two end points of the the long axis line
# one pole.
L1_pt[1] = x0 + cosorient * 0.5 * region.major_axis_length*amp_param
L1_pt[0] = y0 - sinorient * 0.5 * region.major_axis_length*amp_param
# the other pole.
L2_pt[1] = x0 - cosorient * 0.5 * region.major_axis_length*amp_param
L2_pt[0] = y0 + sinorient * 0.5 * region.major_axis_length*amp_param
# calculate the minimal distance between the points at both ends of 3 lines
# aka calcule the closest coordiante in the region to each of the above points.
# pt_L1 = r_coords[np.argmin([np.sqrt(np.power(Pt[0]-L1_pt[0],2) + np.power(Pt[1]-L1_pt[1],2)) for Pt in r_coords])]
# pt_L2 = r_coords[np.argmin([np.sqrt(np.power(Pt[0]-L2_pt[0],2) + np.power(Pt[1]-L2_pt[1],2)) for Pt in r_coords])]
try:
pt_L1 = L1_coords[np.argmin([np.sqrt(np.power(Pt[0]-L1_pt[0],2) + np.power(Pt[1]-L1_pt[1],2)) for Pt in L1_coords])]
pt_L2 = L2_coords[np.argmin([np.sqrt(np.power(Pt[0]-L2_pt[0],2) + np.power(Pt[1]-L2_pt[1],2)) for Pt in L2_coords])]
length = np.sqrt(np.power(pt_L1[0]-pt_L2[0],2) + np.power(pt_L1[1]-pt_L2[1],2))
except:
length = None
#####################
# calculate cell width
# draw 2 parallel lines along the short axis line spaced by 0.8*quarter of length = 0.4, to avoid in midcell
# limit to points in each half
W_coords = []
if region.orientation > 0:
W_coords.append(r_coords[:int(np.round(len(r_coords)/2))]) # note the /2 here instead of /4
W_coords.append(r_coords[int(np.round(len(r_coords)/2)):])
else:
W_coords.append(r_coords[int(np.round(len(r_coords)/2)):])
W_coords.append(r_coords[:int(np.round(len(r_coords)/2))])
# starting points
x1 = x0 + cosorient * 0.5 * length*0.4
y1 = y0 - sinorient * 0.5 * length*0.4
x2 = x0 - cosorient * 0.5 * length*0.4
y2 = y0 + sinorient * 0.5 * length*0.4
W1_pts = np.zeros((2,2))
W2_pts = np.zeros((2,2))
# now find the ends of the lines
# one side
W1_pts[0,1] = x1 - sinorient * 0.5 * region.minor_axis_length*amp_param
W1_pts[0,0] = y1 - cosorient * 0.5 * region.minor_axis_length*amp_param
W1_pts[1,1] = x2 - sinorient * 0.5 * region.minor_axis_length*amp_param
W1_pts[1,0] = y2 - cosorient * 0.5 * region.minor_axis_length*amp_param
# the other side
W2_pts[0,1] = x1 + sinorient * 0.5 * region.minor_axis_length*amp_param
W2_pts[0,0] = y1 + cosorient * 0.5 * region.minor_axis_length*amp_param
W2_pts[1,1] = x2 + sinorient * 0.5 * region.minor_axis_length*amp_param
W2_pts[1,0] = y2 + cosorient * 0.5 * region.minor_axis_length*amp_param
# calculate the minimal distance between the points at both ends of 3 lines
pt_W1 = np.zeros((2,2))
pt_W2 = np.zeros((2,2))
d_W = np.zeros((2,1))
i = 0
for W1_pt, W2_pt in zip(W1_pts, W2_pts):
# # find the points closest to the guide points
# pt_W1[i,0], pt_W1[i,1] = r_coords[np.argmin([np.sqrt(np.power(Pt[0]-W1_pt[0],2) + np.power(Pt[1]-W1_pt[1],2)) for Pt in r_coords])]
# pt_W2[i,0], pt_W2[i,1] = r_coords[np.argmin([np.sqrt(np.power(Pt[0]-W2_pt[0],2) + np.power(Pt[1]-W2_pt[1],2)) for Pt in r_coords])]
# find the points closest to the guide points
pt_W1[i,0], pt_W1[i,1] = W_coords[i][np.argmin([np.sqrt(np.power(Pt[0]-W1_pt[0],2) + np.power(Pt[1]-W1_pt[1],2)) for Pt in W_coords[i]])]
pt_W2[i,0], pt_W2[i,1] = W_coords[i][np.argmin([np.sqrt(np.power(Pt[0]-W2_pt[0],2) + np.power(Pt[1]-W2_pt[1],2)) for Pt in W_coords[i]])]
# calculate the actual width
d_W[i] = np.sqrt(np.power(pt_W1[i,0]-pt_W2[i,0],2) + np.power(pt_W1[i,1]-pt_W2[i,1],2))
i += 1
# take the average of the two at quarter positions
width = np.mean([d_W[0],d_W[1]])
return length, width
# take info and make string for cell id
def create_focus_id(region, t, peak, fov, experiment_name=None):
'''Make a unique focus id string for a new focus'''
if experiment_name is None:
focus_id = 'f{:0=2}p{:0=4}t{:0=4}r{:0=2}'.format(fov, peak, t, region.label)
else:
focus_id = '{}f{:0=2}p{:0=4}t{:0=4}r{:0=2}'.format(experiment_name, fov, peak, t, region.label)
return focus_id
# take info and make string for cell id
def create_cell_id(region, t, peak, fov, experiment_name=None):
'''Make a unique cell id string for a new cell'''
# cell_id = ['f', str(fov), 'p', str(peak), 't', str(t), 'r', str(region.label)]
if experiment_name is None:
cell_id = ['f', '%02d' % fov, 'p', '%04d' % peak, 't', '%04d' % t, 'r', '%02d' % region.label]
cell_id = ''.join(cell_id)
else:
cell_id = '{}f{:0=2}p{:0=4}t{:0=4}r{:0=2}'.format(experiment_name, fov, peak, t, region.label)
return cell_id
def create_detection_id(t, peak, fov, region_label, experiment_name=None, max_cell_number=6):
'''Make a unique cell id string for a new cell'''
# cell_id = ['f', str(fov), 'p', str(peak), 't', str(t), 'r', str(region.label)]
if experiment_name is None:
det_id = ['f', '%02d' % fov, 'p', '%04d' % peak, 't', '%04d' % t, 'r', '%02d' % region_label]
det_id = ''.join(det_id)
else:
det_id = '{}f{:0=2}p{:0=4}t{:0=4}r{:0=2}'.format(experiment_name, fov, peak, t, region_label)
return det_id
def initialize_track_graph(peak_id,
fov_id,
experiment_name,
predictions_dict,
regions_by_time,
max_cell_number=6,
born_threshold=0.75,
appear_threshold=0.75):
detection_dict = {}
frame_num = predictions_dict['migrate_model_predictions'].shape[0]
ebunch = []
G = nx.MultiDiGraph()
# create common start point
G.add_node('A')
# create common end point
G.add_node('B')
last_frame = False
node_id_list = []
timepoint_list = []
region_label_list = []
for frame_idx in range(frame_num):
timepoint = frame_idx + 1
paired_detection_time = timepoint+1
# get detections for this frame
frame_regions_list = regions_by_time[frame_idx]
# if we're at the end of the imaging, make all cells migrate to node 'B'
if timepoint == frame_num:
last_frame = True
else:
paired_frame_regions_list = regions_by_time[frame_idx+1]
# get state change probabilities (class predictions) for this frame
frame_prediction_dict = {key:val[frame_idx,...] for key,val in predictions_dict.items() if key != 'general_model_predictions'}
# for i in range(len(predictions_dict['general_model_predictions'])):
# frame_general_prediction = predictions_dict['general_model_predictions'][]
# create the "will be born" and "will appear" nodes for this frame
prior_born_state = 'born_{:0=4}'.format(timepoint-1)
born_state = 'born_{:0=4}'.format(timepoint)
G.add_node(born_state, visited=False, time=timepoint)
prior_appear_state = 'appear_{:0=4}'.format(timepoint-1)
appear_state = 'appear_{:0=4}'.format(timepoint)
G.add_node(appear_state, visited=False, time=timepoint)
if frame_idx == 0:
ebunch.append(('A', appear_state, 'start', {'weight':appear_threshold, 'score':1*np.log(appear_threshold)}))
ebunch.append(('A', born_state, 'start', {'weight':born_threshold, 'score':1*np.log(born_threshold)}))
# create the "Dies" and "Disappeared" nodes to link from prior frame
prior_dies_state = 'dies_{:0=4}'.format(timepoint-1)
dies_state = 'dies_{:0=4}'.format(timepoint)
next_dies_state = 'dies_{:0=4}'.format(timepoint+1)
G.add_node(dies_state, visited=False, time=timepoint)
prior_disappear_state = 'disappear_{:0=4}'.format(timepoint-1)
disappear_state = 'disappear_{:0=4}'.format(timepoint)
next_disappear_state = 'disappear_{:0=4}'.format(timepoint+1)
G.add_node(disappear_state, visited=False, time=timepoint)
node_id_list.extend([born_state, dies_state, appear_state, disappear_state])
timepoint_list.extend([timepoint, timepoint, timepoint, timepoint])
region_label_list.extend([0,0,0,0])
if frame_idx > 0:
ebunch.append((prior_dies_state, dies_state, 'die', {'weight':1.1, 'score':1*np.log(1.1)})) # impossible to move out of dies track
ebunch.append((prior_disappear_state, disappear_state, 'disappear', {'weight':1.1, 'score':1*np.log(1.1)})) # impossible to move out of disappear track
ebunch.append((prior_born_state, born_state, 'born', {'weight':born_threshold, 'score':1*np.log(born_threshold)}))
ebunch.append((prior_appear_state, appear_state, 'appear', {'weight':appear_threshold, 'score':1*np.log(appear_threshold)}))
if last_frame:
ebunch.append((appear_state, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
ebunch.append((disappear_state, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
ebunch.append((born_state, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
ebunch.append((dies_state, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
for region_idx in range(max_cell_number):
# the tracking models assume there are 6 detections in each frame, regardless of how many
# are actually there. Therefore, this try/except logic will catch cases where there
# were fewer than 6 detections in a frame.
try:
region = frame_regions_list[region_idx]
region_label = region.label
except IndexError:
region = None
region_label = region_idx + 1
# create the name for this detection
detection_id = create_detection_id(timepoint,
peak_id,
fov_id,
region_label,
experiment_name=experiment_name)
det = Detection(detection_id, region, timepoint)
detection_dict[det.id] = det
if det.area is not None:
# if the detection represents a segmentation from our imaging, add its ID,
# which is also its key in detection_dict, as a node in G
G.add_node(det.id, visited=False, cell_count=1, region=region, time=timepoint)
timepoint_list.append(timepoint)
node_id_list.append(detection_id)
region_label_list.append(region.label)
# also set up all edges for this detection's node in our ebunch
# loop through prediction types and add each to the ebunch
for key,val in frame_prediction_dict.items():
if frame_idx == 0:
ebunch.append(('A', detection_id, 'start', {'weight':1, 'score':1*np.log(1)}))
if last_frame:
ebunch.append((detection_id, 'B', 'end', {'weight':1, 'score':1*np.log(1)}))
if val.shape[0] == max_cell_number ** 2:
continue
else:
frame_predictions = val
detection_prediction = frame_predictions[region_idx]
if key == 'appear_model_predictions':
if frame_idx == 0:
continue
elem = (prior_appear_state, detection_id, 'appear', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'born' in key:
if frame_idx == 0:
continue
elem = (prior_born_state, detection_id, 'born', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'zero_cell' in key:
G.nodes[det.id]['zero_cell_weight'] = detection_prediction
G.nodes[det.id]['zero_cell_score'] = 1*np.log(detection_prediction)
elif 'one_cell' in key:
G.nodes[det.id]['one_cell_weight'] = detection_prediction
G.nodes[det.id]['zero_cell_score'] = 1*np.log(detection_prediction)
elif 'two_cell' in key:
G.nodes[det.id]['two_cell_weight'] = detection_prediction
G.nodes[det.id]['zero_cell_score'] = 1*np.log(detection_prediction)
ebunch.append(elem)
else:
# if the array is cell_number^2, reshape it to cell_number x cell_number
# Then slice our detection's row and iterate over paired_cells
if val.shape[0] == max_cell_number**2:
frame_predictions = val.reshape((max_cell_number,max_cell_number))
detection_predictions = frame_predictions[region_idx,:]
# loop through paired detection predictions, test whether paired detection exists
# then append the edge to our ebunch
for paired_cell_idx in range(detection_predictions.size):
# attempt to grab the paired detection. If we get an IndexError, it doesn't exist.
try:
paired_detection = paired_frame_regions_list[paired_cell_idx]
except IndexError:
continue
# create the paired detection's id for use in our ebunch
paired_detection_id = create_detection_id(paired_detection_time,
peak_id,
fov_id,
paired_detection.label,
experiment_name=experiment_name)
paired_prediction = detection_predictions[paired_cell_idx]
if 'child_' in key:
child_weight = paired_prediction
elem = (detection_id, paired_detection_id, 'child', {'child_weight':child_weight, 'score':1*np.log(child_weight)})
ebunch.append(elem)
if 'migrate_' in key:
migrate_weight = paired_prediction
elem = (detection_id, paired_detection_id, 'migrate', {'migrate_weight':migrate_weight, 'score':1*np.log(migrate_weight)})
ebunch.append(elem)
# if 'interaction_' in key:
# interaction_weight = paired_prediction
# elem = (detection_id, paired_detection_id, 'interaction', {'weight':interaction_weight, 'score':1*np.log(interaction_weight)})
# ebunch.append(elem)
# if the array is cell_number long, do similar stuff as above.
elif val.shape[0] == max_cell_number:
frame_predictions = val
detection_prediction = frame_predictions[region_idx]
if key == 'appear_model_predictions':
if frame_idx == 0:
continue
# print("Linking {} to {}.".format(prior_appear_state, detection_id))
elem = (prior_appear_state, detection_id, 'appear', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'disappear_' in key:
if last_frame:
continue
# print("Linking {} to {}.".format(detection_id, next_disappear_state))
elem = (detection_id, next_disappear_state, 'disappear', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'born_' in key:
if frame_idx == 0:
continue
# print("Linking {} to {}.".format(prior_born_state, detection_id))
elem = (prior_born_state, detection_id, 'born', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
elif 'die_model' in key:
if last_frame:
continue
# print("Linking {} to {}.".format(detection_id, next_dies_state))
elem = (detection_id, next_dies_state, 'die', {'weight':detection_prediction, 'score':1*np.log(detection_prediction)})
# the following classes aren't yet implemented
elif 'zero_cell' in key:
G.nodes[det.id]['zero_cell_weight'] = detection_prediction
G.nodes[det.id]['zero_cell_score'] = 1*np.log(detection_prediction)
elif 'one_cell' in key:
G.nodes[det.id]['one_cell_weight'] = detection_prediction
G.nodes[det.id]['one_cell_score'] = 1*np.log(detection_prediction)
elif 'two_cell' in key:
G.nodes[det.id]['two_cell_weight'] = detection_prediction
G.nodes[det.id]['two_cell_score'] = 1*np.log(detection_prediction)
ebunch.append(elem)
G.add_edges_from(ebunch)
graph_df = pd.DataFrame(data={'timepoint':timepoint_list,
'node_id':node_id_list,
'region_label':region_label_list})
return(G, graph_df)
# function for a growing cell, used to calculate growth rate
def cell_growth_func(t, sb, elong_rate):
'''
Assumes you have taken log of the data.
It also allows the size at birth to be a free parameter, rather than fixed
at the actual size at birth (but still uses that as a guess)
Assumes natural log, not base 2 (though I think that makes less sense)
old form: sb*2**(alpha*t)
'''
return sb+elong_rate*t
# functions for checking if a cell has divided or not
# this function should also take the variable t to
# weight the allowed changes by the difference in time as well
def check_growth_by_region(cell, region):
'''Checks to see if it makes sense
to grow a cell by a particular region'''
# load parameters for checking
max_growth_length = params['track']['max_growth_length']
min_growth_length = params['track']['min_growth_length']
max_growth_area = params['track']['max_growth_area']
min_growth_area = params['track']['min_growth_area']
# check if length is not too much longer
if cell.lengths[-1]*max_growth_length < region.major_axis_length:
return False
# check if it is not too short (cell should not shrink really)
if cell.lengths[-1]*min_growth_length > region.major_axis_length:
return False
# check if area is not too great
if cell.areas[-1]*max_growth_area < region.area:
return False
# check if area is not too small
if cell.lengths[-1]*min_growth_area > region.area:
return False
# # check if y position of region is within
# # the quarter positions of the bounding box
# lower_quarter = cell.bboxes[-1][0] + (region.major_axis_length / 4)
# upper_quarter = cell.bboxes[-1][2] - (region.major_axis_length / 4)
# if lower_quarter > region.centroid[0] or upper_quarter < region.centroid[0]:
# return False
# check if y position of region is within the bounding box of previous region
lower_bound = cell.bboxes[-1][0]
upper_bound = cell.bboxes[-1][2]
if lower_bound > region.centroid[0] or upper_bound < region.centroid[0]:
return False
# return true if you get this far
return True
# see if a cell has reasonably divided
def check_division(cell, region1, region2):
'''Checks to see if it makes sense to divide a
cell into two new cells based on two regions.
Return 0 if nothing should happend and regions ignored
Return 1 if cell should grow by region 1
Return 2 if cell should grow by region 2
Return 3 if cell should divide into the regions.'''
# load in parameters
max_growth_length = params['track']['max_growth_length']
min_growth_length = params['track']['min_growth_length']
# see if either region just could be continued growth,
# if that is the case then just return
# these shouldn't return true if the cells are divided
# as they would be too small
if check_growth_by_region(cell, region1):
return 1
if check_growth_by_region(cell, region2):
return 2
# make sure combined size of daughters is not too big
combined_size = region1.major_axis_length + region2.major_axis_length
# check if length is not too much longer
if cell.lengths[-1]*max_growth_length < combined_size:
return 0
# and not too small
if cell.lengths[-1]*min_growth_length > combined_size:
return 0
# centroids of regions should be in the upper and lower half of the
# of the mother's bounding box, respectively
# top region within top half of mother bounding box
if cell.bboxes[-1][0] > region1.centroid[0] or cell.centroids[-1][0] < region1.centroid[0]:
return 0
# bottom region with bottom half of mother bounding box
if cell.centroids[-1][0] > region2.centroid[0] or cell.bboxes[-1][2] < region2.centroid[0]:
return 0
# if you got this far then divide the mother
return 3
### functions for pruning a dictionary of cells
# find cells with both a mother and two daughters
def find_complete_cells(Cells):
'''Go through a dictionary of cells and return another dictionary
that contains just those with a parent and daughters'''
Complete_Cells = {}
for cell_id in Cells:
if Cells[cell_id].daughters and Cells[cell_id].parent:
Complete_Cells[cell_id] = Cells[cell_id]
return Complete_Cells
# finds cells whose birth label is 1
def find_mother_cells(Cells):
'''Return only cells whose starting region label is 1.'''
Mother_Cells = {}
for cell_id in Cells:
if Cells[cell_id].birth_label == 1:
Mother_Cells[cell_id] = Cells[cell_id]
return Mother_Cells
def filter_foci(Foci, label, t, debug=False):
Filtered_Foci = {}
for focus_id, focus in Foci.items():
# copy the times list so as not to update it in-place
times = focus.times
if debug:
print(times)
match_inds = [i for i,time in enumerate(times) if time == t]
labels = [focus.labels[idx] for idx in match_inds]
if label in labels:
Filtered_Foci[focus_id] = focus
return Filtered_Foci
def filter_cells(Cells, attr, val, idx=None, debug=False):
'''Return only cells whose designated attribute equals "val".'''
Filtered_Cells = {}
for cell_id, cell in Cells.items():
at_val = getattr(cell, attr)
if debug:
print(at_val)
print("Times: ", cell.times)
if idx is not None:
at_val = at_val[idx]
if at_val == val:
Filtered_Cells[cell_id] = cell
return Filtered_Cells
def filter_cells_containing_val_in_attr(Cells, attr, val):
'''Return only cells that have val in list attribute, attr.'''
Filtered_Cells = {}
for cell_id, cell in Cells.items():
at_list = getattr(cell, attr)
if val in at_list:
Filtered_Cells[cell_id] = cell
return Filtered_Cells
### functions for additional cell centric analysis
def compile_cell_info_df(Cells):
# count the number of rows that will be in the long dataframe
quant_fluor = False
long_df_row_number = 0
for cell in Cells.values():
# first time through, evaluate whether we quantified cells' fluorescence
if long_df_row_number == 0:
if len(cell.area_mean_fluorescence.keys()) != 0:
quant_fluor = True
fluorescence_channels = [k for k in cell.area_mean_fluorescence.keys()]
long_df_row_number += len(cell.times)
# initialize some arrays for filling with data
data = {
# ids can be up to 100 characters long
'id': np.chararray(long_df_row_number, itemsize=100),
'times': np.zeros(long_df_row_number, dtype='uint16'),
'lengths': np.zeros(long_df_row_number),
'volumes': np.zeros(long_df_row_number),
'areas': np.zeros(long_df_row_number),
'abs_times': np.zeros(long_df_row_number, dtype='uint32')
}
if quant_fluor:
for fluorescence_channel in fluorescence_channels:
data['{}_area_mean_fluorescence'.format(fluorescence_channel)] = np.zeros(long_df_row_number)
data['{}_volume_mean_fluorescence'.format(fluorescence_channel)] = np.zeros(long_df_row_number)
data['{}_total_fluorescence'.format(fluorescence_channel)] = np.zeros(long_df_row_number)
data = populate_focus_arrays(Cells, data, cell_quants=True)
long_df = pd.DataFrame(data=data)
wide_df_row_number = len(Cells)
data = {
# ids can be up to 100 characters long
'id': np.chararray(wide_df_row_number, itemsize=100),
'fov': np.zeros(wide_df_row_number, dtype='uint8'),
'peak': np.zeros(wide_df_row_number, dtype='uint16'),
'parent_id': np.chararray(wide_df_row_number, itemsize=100),
'child1_id': np.chararray(wide_df_row_number, itemsize=100),
'child2_id': np.chararray(wide_df_row_number, itemsize=100),
'division_time': np.zeros(wide_df_row_number),
'birth_label': np.zeros(wide_df_row_number, dtype='uint8'),
'birth_time': np.zeros(wide_df_row_number, dtype='uint16'),
'sb': np.zeros(wide_df_row_number),
'sd': np.zeros(wide_df_row_number),
'delta': np.zeros(wide_df_row_number),
'tau': np.zeros(wide_df_row_number),
'elong_rate': np.zeros(wide_df_row_number),
'septum_position': np.zeros(wide_df_row_number),
'death': np.zeros(wide_df_row_number),
'disappear': np.zeros(wide_df_row_number)
}
data = populate_focus_arrays(Cells, data, cell_quants=True, wide=True)
# data['parent_id'] = data['parent_id'].decode()
# data['child1_id'] = data['child1_id'].decode()
# data['child2_id'] = data['child2_id'].decode()
wide_df = pd.DataFrame(data=data)
return(wide_df,long_df)
def populate_focus_arrays(Foci, data_dict, cell_quants=False, wide=False):
focus_counter = 0
focus_count = len(Foci)
end_idx = 0
for i,focus in enumerate(Foci.values()):
if wide:
start_idx = i
end_idx = i + 1
else:
start_idx = end_idx
end_idx = len(focus) + start_idx
if focus_counter % 100 == 0:
print("Generating focus information for focus {} out of {}.".format(focus_counter+1, focus_count))
# loop over keys in data dictionary, and set
# values in appropriate array, at appropriate indices
# to those we find in the focus.
for key in data_dict.keys():
if '_id' in key:
if key == 'parent_id':
if focus.parent is None:
data_dict[key][start_idx:end_idx] = ''
else:
data_dict[key][start_idx:end_idx] = focus.parent.id
if focus.daughters is None:
if key == 'child1_id' or key == 'child2_id':
data_dict[key][start_idx:end_idx] = ''
elif len(focus.daughters) == 1:
if key == 'child2_id':
data_dict[key][start_idx:end_idx] = ''
elif key == 'child1_id':
data_dict[key][start_idx:end_idx] = focus.daughters[0].id
elif key == 'child2_id':
data_dict[key][start_idx:end_idx] = focus.daughters[1].id
else:
attr_vals = getattr(focus, key)
if (cell_quants and key=='abs_times'):
if len(attr_vals) == end_idx-start_idx:
data_dict[key][start_idx:end_idx] = attr_vals
else:
data_dict[key][start_idx:end_idx] = attr_vals[:-1]
else:
# print(key)
# print(attr_vals)
data_dict[key][start_idx:end_idx] = attr_vals
focus_counter += 1
data_dict['id'] = data_dict['id'].decode()
return(data_dict)
def compile_foci_info_long_df(Foci):
'''
Parameters
----------------
Foci : dictionary, keys of which are focus_ids,
values of which are objects of class Focus
Returns
----------------------
A long DataFrame with
detailed information about each timepoint for each focus.
'''
# count the number of rows that will be in the long dataframe
long_df_row_number = 0
for focus in Foci.values():
long_df_row_number += len(focus)
# initialize some arrays for filling with data
data = {
# ids can be up to 100 characters long
'id': np.chararray(long_df_row_number, itemsize=100),
'times': np.zeros(long_df_row_number, dtype='uint16'),
'lengths': np.zeros(long_df_row_number),
'volumes': np.zeros(long_df_row_number),
'areas': np.zeros(long_df_row_number),
'abs_times': np.zeros(long_df_row_number, dtype='uint32'),
'area_mean_fluorescence': np.zeros(long_df_row_number),
'volume_mean_fluorescence': np.zeros(long_df_row_number),
'total_fluorescence': np.zeros(long_df_row_number),
'median_fluorescence': np.zeros(long_df_row_number),
'sd_fluorescence': np.zeros(long_df_row_number),
'disp_l': np.zeros(long_df_row_number),
'disp_w': np.zeros(long_df_row_number)
}
data = populate_focus_arrays(Foci, data)
long_df = pd.DataFrame(data=data)
return(long_df)
def find_all_cell_intensities(Cells,
specs, time_table, channel_name='sub_c2',
apply_background_correction=True):
'''
Finds fluorescenct information for cells. All the cells in Cells
should be from one fov/peak.
'''
# iterate over each fov in specs
for fov_id,fov_peaks in specs.items():
# iterate over each peak in fov
for peak_id,peak_value in fov_peaks.items():
# if peak_id's value is not 1, go to next peak
if peak_value != 1:
continue
print("Quantifying channel {} fluorescence in cells in fov {}, peak {}.".format(channel_name, fov_id, peak_id))
# Load fluorescent images and segmented images for this channel
fl_stack = load_stack(fov_id, peak_id, color=channel_name)
corrected_stack = np.zeros(fl_stack.shape)
for frame in range(fl_stack.shape[0]):
# median filter will be applied to every image
with warnings.catch_warnings():
warnings.simplefilter("ignore")
median_filtered = median(fl_stack[frame,...], selem=morphology.disk(1))
# subtract the gaussian-filtered image from true image to correct
# uneven background fluorescence
if apply_background_correction:
blurred = filters.gaussian(median_filtered, sigma=10, preserve_range=True)
corrected_stack[frame,:,:] = median_filtered-blurred
else:
corrected_stack[frame,:,:] = median_filtered
seg_stack = load_stack(fov_id, peak_id, color='seg_unet')
# evaluate whether each cell is in this fov/peak combination
for cell_id,cell in Cells.items():
cell_fov = cell.fov
if cell_fov != fov_id:
continue
cell_peak = cell.peak
if cell_peak != peak_id:
continue
cell_times = cell.times
cell_labels = cell.labels
cell.area_mean_fluorescence[channel_name] = []
cell.volume_mean_fluorescence[channel_name] = []
cell.total_fluorescence[channel_name] = []
# loop through cell's times
for i,t in enumerate(cell_times):
frame = t-1
cell_label = cell_labels[i]
total_fluor = np.sum(corrected_stack[frame, seg_stack[frame, :,:] == cell_label])
cell.area_mean_fluorescence[channel_name].append(total_fluor/cell.areas[i])
cell.volume_mean_fluorescence[channel_name].append(total_fluor/cell.volumes[i])
cell.total_fluorescence[channel_name].append(total_fluor)
# The cell objects in the original dictionary will be updated,
# no need to return anything specifically.
return
def find_cell_intensities_worker(fov_id, peak_id, Cells, midline=True, channel='sub_c3'):
'''
Finds fluorescenct information for cells. All the cells in Cells
should be from one fov/peak. See the function
organize_cells_by_channel()
This version is the same as find_cell_intensities but return the Cells object for collection by the pool.
The original find_cell_intensities is kept for compatibility.
'''
information('Processing peak {} in FOV {}'.format(peak_id, fov_id))
# Load fluorescent images and segmented images for this channel
fl_stack = load_stack(fov_id, peak_id, color=channel)
seg_stack = load_stack(fov_id, peak_id, color='seg_otsu')
# determine absolute time index
time_table = params['time_table']
times_all = []
for fov in params['time_table']:
times_all = np.append(times_all, [int(x) for x in time_table[fov].keys()])
times_all = np.unique(times_all)
times_all = np.sort(times_all)
times_all = np.array(times_all,np.int_)
t0 = times_all[0] # first time index
# Loop through cells
for Cell in Cells.values():
# give this cell two lists to hold new information
Cell.fl_tots = [] # total fluorescence per time point
Cell.fl_area_avgs = [] # avg fluorescence per unit area by timepoint
Cell.fl_vol_avgs = [] # avg fluorescence per unit volume by timepoint
if midline:
Cell.mid_fl = [] # avg fluorescence of midline
# and the time points that make up this cell's life
for n, t in enumerate(Cell.times):
# create fluorescent image only for this cell and timepoint.
fl_image_masked = np.copy(fl_stack[t-t0])
fl_image_masked[seg_stack[t-t0] != Cell.labels[n]] = 0
# append total flourescent image
Cell.fl_tots.append(np.sum(fl_image_masked))
# and the average fluorescence
Cell.fl_area_avgs.append(np.sum(fl_image_masked) / Cell.areas[n])
Cell.fl_vol_avgs.append(np.sum(fl_image_masked) / Cell.volumes[n])
if midline:
# add the midline average by first applying morphology transform
bin_mask = np.copy(seg_stack[t-t0])
bin_mask[bin_mask != Cell.labels[n]] = 0
med_mask, _ = morphology.medial_axis(bin_mask, return_distance=True)
# med_mask[med_dist < np.floor(cap_radius/2)] = 0
# print(img_fluo[med_mask])
if (np.shape(fl_image_masked[med_mask])[0] > 0):
Cell.mid_fl.append(np.nanmean(fl_image_masked[med_mask]))
else:
Cell.mid_fl.append(0)
# return the cell object to the pool initiated by mm3_Colors.
return Cells
def find_cell_intensities(fov_id, peak_id, Cells, midline=False, channel_name='sub_c2'):
'''
Finds fluorescenct information for cells. All the cells in Cells
should be from one fov/peak. See the function
organize_cells_by_channel()
'''
# Load fluorescent images and segmented images for this channel
fl_stack = load_stack(fov_id, peak_id, color=channel_name)
seg_stack = load_stack(fov_id, peak_id, color='seg_unet')
# determine absolute time index
times_all = []
for fov in params['time_table']:
times_all = np.append(times_all, time_table[fov].keys())
times_all = np.unique(times_all)
times_all = np.sort(times_all)
times_all = np.array(times_all,np.int_)
t0 = times_all[0] # first time index
# Loop through cells
for Cell in Cells.values():
# give this cell two lists to hold new information
Cell.fl_tots = [] # total fluorescence per time point
Cell.fl_area_avgs = [] # avg fluorescence per unit area by timepoint
Cell.fl_vol_avgs = [] # avg fluorescence per unit volume by timepoint
if midline:
Cell.mid_fl = [] # avg fluorescence of midline
# and the time points that make up this cell's life
for n, t in enumerate(Cell.times):
# create fluorescent image only for this cell and timepoint.
fl_image_masked = np.copy(fl_stack[t-t0])
fl_image_masked[seg_stack[t-t0] != Cell.labels[n]] = 0
# append total flourescent image
Cell.fl_tots.append(np.sum(fl_image_masked))
# and the average fluorescence
Cell.fl_area_avgs.append(np.sum(fl_image_masked) / Cell.areas[n])
Cell.fl_vol_avgs.append(np.sum(fl_image_masked) / Cell.volumes[n])
if midline:
# add the midline average by first applying morphology transform
bin_mask = np.copy(seg_stack[t-t0])
bin_mask[bin_mask != Cell.labels[n]] = 0
med_mask, _ = morphology.medial_axis(bin_mask, return_distance=True)
# med_mask[med_dist < np.floor(cap_radius/2)] = 0
# print(img_fluo[med_mask])
if (np.shape(fl_image_masked[med_mask])[0] > 0):
Cell.mid_fl.append(np.nanmean(fl_image_masked[med_mask]))
else:
Cell.mid_fl.append(0)
# The cell objects in the original dictionary will be updated,
# no need to return anything specifically.
return
# find foci using a difference of gaussians method
def foci_analysis(fov_id, peak_id, Cells):
'''Find foci in cells using a fluorescent image channel.
This function works on a single peak and all the cells therein.'''
# make directory for foci debug
# foci_dir = os.path.join(params['ana_dir'], 'overlay/')
# if not os.path.exists(foci_dir):
# os.makedirs(foci_dir)
# Import segmented and fluorescenct images
try:
image_data_seg = load_stack(fov_id, peak_id, color='seg_unet')
except IOError:
image_data_seg = load_stack(fov_id, peak_id, color='seg_otsu')
image_data_FL = load_stack(fov_id, peak_id,
color='sub_{}'.format(params['foci']['foci_plane']))
# determine absolute time index
times_all = []
for fov, times in params['time_table'].items():
times_all = np.append(times_all, list(times.keys()))
times_all = np.unique(times_all)
times_all = np.sort(times_all)
times_all = np.array(times_all, np.int_)
t0 = times_all[0] # first time index
for cell_id, cell in six.iteritems(Cells):
information('Extracting foci information for %s.' % (cell_id))
# declare lists holding information about foci.
disp_l = []
disp_w = []
foci_h = []
# foci_stack = np.zeros((np.size(cell.times),
# image_data_seg[0,:,:].shape[0], image_data_seg[0,:,:].shape[1]))
# Go through each time point of this cell
for t in cell.times:
# retrieve this timepoint and images.
image_data_temp = image_data_FL[t-t0,:,:]
image_data_temp_seg = image_data_seg[t-t0,:,:]
# find foci as long as there is information in the fluorescent image
if np.sum(image_data_temp) != 0:
disp_l_tmp, disp_w_tmp, foci_h_tmp = foci_lap(image_data_temp_seg,
image_data_temp, cell, t)
disp_l.append(disp_l_tmp)
disp_w.append(disp_w_tmp)
foci_h.append(foci_h_tmp)
# if there is no information, append an empty list.
# Should this be NaN?
else:
disp_l.append([])
disp_w.append([])
foci_h.append([])
# foci_stack[i] = image_data_temp_seg
# add information to the cell (will replace old data)
cell.disp_l = disp_l
cell.disp_w = disp_w
cell.foci_h = foci_h
# Create a stack of the segmented images with marked foci
# This should poentially be changed to the fluorescent images with marked foci
# foci_stack = np.uint16(foci_stack)
# foci_stack = np.stack(foci_stack, axis=0)
# # Export overlaid images
# foci_filename = params['experiment_name'] + 't%04d_xy%03d_p%04d_r%02d_overlay.tif' % (Cells[cell_id].birth_time, Cells[cell_id].fov, Cells[cell_id].peak, Cells[cell_id].birth_label)
# foci_filepath = foci_dir + foci_filename
#
# tiff.imsave(foci_filepath, foci_stack, compress=3) # save it
# test
# sys.exit()
return
# foci pool (for parallel analysis)
def foci_analysis_pool(fov_id, peak_id, Cells):
'''Find foci in cells using a fluorescent image channel.
This function works on a single peak and all the cells therein.'''
# make directory for foci debug
# foci_dir = os.path.join(params['ana_dir'], 'overlay/')
# if not os.path.exists(foci_dir):
# os.makedirs(foci_dir)
# Import segmented and fluorescenct images
image_data_seg = load_stack(fov_id, peak_id, color='seg_unet')
image_data_FL = load_stack(fov_id, peak_id,
color='sub_{}'.format(params['foci']['foci_plane']))
# Load time table to determine first image index.
times_all = np.array(np.sort(params['time_table'][fov_id].keys()), np.int_)
t0 = times_all[0] # first time index
tN = times_all[-1] # last time index
# call foci_cell for each cell object
pool = Pool(processes=params['num_analyzers'])
[pool.apply_async(foci_cell(cell_id, cell, t0, image_data_seg, image_data_FL)) for cell_id, cell in six.iteritems(Cells)]
pool.close()
pool.join()
# parralel function for each cell
def foci_cell(cell_id, cell, t0, image_data_seg, image_data_FL):
'''find foci in a cell, single instance to be called by the foci_analysis_pool for parallel processing.
'''
disp_l = []
disp_w = []
foci_h = []
# foci_stack = np.zeros((np.size(cell.times),
# image_data_seg[0,:,:].shape[0], image_data_seg[0,:,:].shape[1]))
# Go through each time point of this cell
for t in cell.times:
# retrieve this timepoint and images.
image_data_temp = image_data_FL[t-t0,:,:]
image_data_temp_seg = image_data_seg[t-t0,:,:]
# find foci as long as there is information in the fluorescent image
if np.sum(image_data_temp) != 0:
disp_l_tmp, disp_w_tmp, foci_h_tmp = foci_lap(image_data_temp_seg,
image_data_temp, cell, t)
disp_l.append(disp_l_tmp)
disp_w.append(disp_w_tmp)
foci_h.append(foci_h_tmp)
# if there is no information, append an empty list.
# Should this be NaN?
else:
disp_l.append(np.nan)
disp_w.append(np.nan)
foci_h.append(np.nan)
# foci_stack[i] = image_data_temp_seg
# add information to the cell (will replace old data)
cell.disp_l = disp_l
cell.disp_w = disp_w
cell.foci_h = foci_h
# actual worker function for foci detection
def foci_lap(img, img_foci, cell, t):
'''foci_lap finds foci using a laplacian convolution then fits a 2D
Gaussian.
The returned information are the parameters of this Gaussian.
All the information is returned in the form of np.arrays which are the
length of the number of found foci across all cells in the image.
Parameters
----------
img : 2D np.array
phase contrast or bright field image. Only used for debug
img_foci : 2D np.array
fluorescent image with foci.
cell : cell object
t : int
time point to which the images correspond
Returns
-------
disp_l : 1D np.array
displacement on long axis, in px, of a foci from the center of the cell
disp_w : 1D np.array
displacement on short axis, in px, of a foci from the center of the cell
foci_h : 1D np.array
Foci "height." Sum of the intensity of the gaussian fitting area.
'''
# pull out useful information for just this time point
i = cell.times.index(t) # find position of the time point in lists (time points may be missing)
bbox = cell.bboxes[i]
orientation = cell.orientations[i]
centroid = cell.centroids[i]
region = cell.labels[i]
# declare arrays which will hold foci data
disp_l = [] # displacement in length of foci from cell center
disp_w = [] # displacement in width of foci from cell center
foci_h = [] # foci total amount (from raw image)
# define parameters for foci finding
minsig = params['foci']['foci_log_minsig']
maxsig = params['foci']['foci_log_maxsig']
thresh = params['foci']['foci_log_thresh']
peak_med_ratio = params['foci']['foci_log_peak_med_ratio']
debug_foci = params['foci']['debug_foci']
# test
#print ("minsig={:d} maxsig={:d} thres={:.4g} peak_med_ratio={:.2g}".format(minsig,maxsig,thresh,peak_med_ratio))
# test
# calculate median cell intensity. Used to filter foci
img_foci_masked = np.copy(img_foci).astype(np.float)
img_foci_masked[img != region] = np.nan
cell_fl_median = np.nanmedian(img_foci_masked)
cell_fl_mean = np.nanmean(img_foci_masked)
img_foci_masked[img != region] = 0
# subtract this value from the cell
if False:
img_foci = img_foci.astype('int32') - cell_fl_median.astype('int32')
img_foci[img_foci < 0] = 0
img_foci = img_foci.astype('uint16')
# int_mask = np.zeros(img_foci.shape, np.uint8)
# avg_int = cv2.mean(img_foci, mask=int_mask)
# avg_int = avg_int[0]
# print('median', cell_fl_median)
# find blobs using difference of gaussian
over_lap = .95 # if two blobs overlap by more than this fraction, smaller blob is cut
numsig = (maxsig - minsig + 1) # number of division to consider between min ang max sig
blobs = blob_log(img_foci_masked, min_sigma=minsig, max_sigma=maxsig,
overlap=over_lap, num_sigma=numsig, threshold=thresh)
# these will hold information about foci position temporarily
x_blob, y_blob, r_blob = [], [], []
x_gaus, y_gaus, w_gaus = [], [], []
# loop through each potential foci
for blob in blobs:
yloc, xloc, sig = blob # x location, y location, and sigma of gaus
xloc = int(np.around(xloc)) # switch to int for slicing images
yloc = int(np.around(yloc))
radius = int(np.ceil(np.sqrt(2)*sig)) # will be used to slice out area around foci
# ensure blob is inside the bounding box
# this might be better to check if (xloc, yloc) is in regions.coords
if yloc > np.int16(bbox[0]) and yloc < np.int16(bbox[2]) and xloc > np.int16(bbox[1]) and xloc < np.int16(bbox[3]):
x_blob.append(xloc) # for plotting
y_blob.append(yloc) # for plotting
r_blob.append(radius)
# cut out a small image from original image to fit gaussian
gfit_area = img_foci[yloc-radius:yloc+radius, xloc-radius:xloc+radius]
# gfit_area_0 = img_foci[max(0, yloc-1*radius):min(img_foci.shape[0], yloc+1*radius),
# max(0, xloc-1*radius):min(img_foci.shape[1], xloc+1*radius)]
gfit_area_fixed = img_foci[yloc-maxsig:yloc+maxsig, xloc-maxsig:xloc+maxsig]
# fit gaussian to proposed foci in small box
p = fitgaussian(gfit_area)
(peak_fit, x_fit, y_fit, w_fit) = p
# print('peak', peak_fit)
if x_fit <= 0 or x_fit >= radius*2 or y_fit <= 0 or y_fit >= radius*2:
if debug_foci: print('Throw out foci (gaus fit not in gfit_area)')
continue
elif peak_fit/cell_fl_median < peak_med_ratio:
if debug_foci: print('Peak does not pass height test.')
continue
else:
# find x and y position relative to the whole image (convert from small box)
x_rel = int(xloc - radius + x_fit)
y_rel = int(yloc - radius + y_fit)
x_gaus = np.append(x_gaus, x_rel) # for plotting
y_gaus = np.append(y_gaus, y_rel) # for plotting
w_gaus = np.append(w_gaus, w_fit) # for plotting
if debug_foci: print('x', xloc, x_rel, x_fit, 'y', yloc, y_rel, y_fit, 'w', sig, radius, w_fit, 'h', np.sum(gfit_area), np.sum(gfit_area_fixed), peak_fit)
# calculate distance of foci from middle of cell (scikit image)
if orientation < 0:
orientation = np.pi+orientation
disp_y = (y_rel-centroid[0])*np.sin(orientation) - (x_rel-centroid[1])*np.cos(orientation)
disp_x = (y_rel-centroid[0])*np.cos(orientation) + (x_rel-centroid[1])*np.sin(orientation)
# append foci information to the list
disp_l = np.append(disp_l, disp_y)
disp_w = np.append(disp_w, disp_x)
foci_h = np.append(foci_h, np.sum(gfit_area_fixed))
# foci_h = np.append(foci_h, peak_fit)
else:
if debug_foci:
print ('Blob not in bounding box.')
# draw foci on image for quality control
if debug_foci:
outputdir = os.path.join(params['ana_dir'], 'debug_foci')
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
# print(np.min(gfit_area), np.max(gfit_area), gfit_median, avg_int, peak)
# processing of image
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(1,5,1)
plt.title('fluor image')
plt.imshow(img_foci, interpolation='nearest', cmap='gray')
ax = fig.add_subplot(1,5,2)
ax.set_title('segmented image')
ax.imshow(img, interpolation='nearest', cmap='gray')
ax = fig.add_subplot(1,5,3)
ax.set_title('DoG blobs')
ax.imshow(img_foci, interpolation='nearest', cmap='gray')
# add circles for where the blobs are
for i, spot in enumerate(x_blob):
foci_center = Ellipse([x_blob[i], y_blob[i]], r_blob[i], r_blob[i],
color=(1.0, 1.0, 0), linewidth=2, fill=False, alpha=0.5)
ax.add_patch(foci_center)
# show the shape of the gaussian for recorded foci
ax = fig.add_subplot(1,5,4)
ax.set_title('final foci')
ax.imshow(img_foci, interpolation='nearest', cmap='gray')
# print foci that pass and had gaussians fit
for i, spot in enumerate(x_gaus):
foci_ellipse = Ellipse([x_gaus[i], y_gaus[i]], w_gaus[i], w_gaus[i],
color=(0, 1.0, 0.0), linewidth=2, fill=False, alpha=0.5)
ax.add_patch(foci_ellipse)
ax = fig.add_subplot(1,5,5)
ax.set_title('overlay')
ax.imshow(img, interpolation='nearest', cmap='gray')
# print foci that pass and had gaussians fit
for i, spot in enumerate(x_gaus):
foci_ellipse = Ellipse([x_gaus[i], y_gaus[i]], 3, 3,
color=(1.0, 1.0, 0), linewidth=2, fill=False, alpha=0.5)
ax.add_patch(foci_ellipse)
#plt.show()
filename = 'foci_' + cell.id + '_time{:04d}'.format(t) + '.pdf'
fileout = os.path.join(outputdir,filename)
fig.savefig(fileout, bbox_inches='tight', pad_inches=0)
print (fileout)
plt.close('all')
nblobs = len(blobs)
print ("nblobs = {:d}".format(nblobs))
return disp_l, disp_w, foci_h
# actual worker function for foci detection
def foci_info_unet(foci, Cells, specs, time_table, channel_name='sub_c2'):
'''foci_info_unet operates on cells in which foci have been found using
using Unet.
Parameters
----------
Foci : empty dictionary for Focus objects to be placed into
Cells : dictionary of Cell objects to which foci will be added
specs : dictionary containing information on which fov/peak ids
are to be used, and which are to be excluded from analysis
time_table : dictionary containing information on which time
points correspond to which absolute times in seconds
channel_name : name of fluorescent channel for reading in
fluorescence images for focus quantification
Returns
-------
Updates cell information in Cells in-place.
Cells must have .foci attribute
'''
# iterate over each fov in specs
for fov_id,fov_peaks in specs.items():
# keep cells with this fov_id
fov_cells = filter_cells(Cells, attr='fov', val=fov_id)
# iterate over each peak in fov
for peak_id,peak_value in fov_peaks.items():
# print(fov_id, peak_id)
# keep cells with this peak_id
peak_cells = filter_cells(fov_cells, attr='peak', val=peak_id)
# if peak_id's value is not 1, go to next peak
if peak_value != 1:
continue
print("Analyzing foci in experiment {}, channel {}, fov {}, peak {}.".format(params['experiment_name'], channel_name, fov_id, peak_id))
# Load fluorescent images and segmented images for this channel
fl_stack = load_stack(fov_id, peak_id, color=channel_name)
seg_foci_stack = load_stack(fov_id, peak_id, color='foci_seg_unet')
seg_cell_stack = load_stack(fov_id, peak_id, color='seg_unet')
# loop over each frame
for frame in range(fl_stack.shape[0]):
fl_img = fl_stack[frame, ...]
seg_foci_img = seg_foci_stack[frame, ...]
seg_cell_img = seg_cell_stack[frame, ...]
# if there are no foci in this frame, move to next frame
if np.max(seg_foci_img) == 0:
continue
# if there are no cells in this fov/peak/frame, move to next frame
if np.max(seg_cell_img) == 0:
continue
t = frame+1
frame_cells = filter_cells_containing_val_in_attr(peak_cells, attr='times', val=t)
# loop over focus regions in this frame
focus_regions = measure.regionprops(seg_foci_img)
# compare this frame's foci to prior frame's foci for tracking
if frame > 0:
prior_seg_foci_img = seg_foci_stack[frame-1, ...]
fov_foci = filter_cells(foci,
attr='fov',
val=fov_id)
peak_foci = filter_cells(fov_foci,
attr='peak',
val=peak_id)
prior_frame_foci = filter_cells_containing_val_in_attr(peak_foci, attr='times', val=t-1)
# if there were foci in prior frame, do stuff
if len(prior_frame_foci) > 0:
prior_regions = measure.regionprops(prior_seg_foci_img)
# compare_array is prior_focus_number x this_focus_number
# contains dice indices for each pairwise comparison
# between focus positions
compare_array = np.zeros((np.max(prior_seg_foci_img),
np.max(seg_foci_img)))
# populate the array with dice indices
for prior_focus_idx in range(np.max(prior_seg_foci_img)):
prior_focus_mask = np.zeros(seg_foci_img.shape)
prior_focus_mask[prior_seg_foci_img == (prior_focus_idx + 1)] = 1
# apply gaussian blur with sigma=1 to prior focus mask
sig = 1
gaus_1 = filters.gaussian(prior_focus_mask, sigma=sig)
for this_focus_idx in range(np.max(seg_foci_img)):
this_focus_mask = np.zeros(seg_foci_img.shape)
this_focus_mask[seg_foci_img == (this_focus_idx + 1)] = 1
# apply gaussian blur with sigma=1 to this focus mask
gaus_2 = filters.gaussian(this_focus_mask, sigma=sig)
# multiply the two images and place max into campare_array
product = gaus_1 * gaus_2
compare_array[prior_focus_idx, this_focus_idx] = np.max(product)
# which rows of each column are maximum product of gaussian blurs?
max_inds = np.argmax(compare_array, axis=0)
# because np.argmax returns zero if all rows are equal, we
# need to evaluate if all rows are equal.
# If std_dev is zero, then all were equal,
# and we omit that index from consideration for
# focus tracking.
sd_vals = np.std(compare_array, axis=0)
tracked_inds = np.where(sd_vals > 0)[0]
# if there is an index from a tracked focus, do this
if tracked_inds.size > 0:
for tracked_idx in tracked_inds:
# grab this frame's region belonging to tracked focus
tracked_label = tracked_idx + 1
(tracked_region_idx, tracked_region) = [(_,reg) for _,reg in enumerate(focus_regions) if reg.label == tracked_label][0]
# pop the region from focus_regions
del focus_regions[tracked_region_idx]
# grab prior frame's region belonging to tracked focus
prior_tracked_label = max_inds[tracked_idx] + 1
# prior_tracked_region = [reg for reg in prior_regions if reg.label == prior_tracked_label][0]
# grab the focus for which the prior_tracked_label is in
# any of the labels in the prior focus from the prior time
prior_tracked_foci = filter_foci(
prior_frame_foci,
label=prior_tracked_label,
t = t-1,
debug=False
)
prior_tracked_focus = [val for val in prior_tracked_foci.values()][0]
# determine which cell this focus belongs to
for cell_id,cell in frame_cells.items():
cell_idx = cell.times.index(t)
cell_label = cell.labels[cell_idx]
masked_cell_img = np.zeros(seg_cell_img.shape)
masked_cell_img[seg_cell_img == cell_label] = 1
masked_focus_img = np.zeros(seg_foci_img.shape)
masked_focus_img[seg_foci_img == tracked_region.label] = 1
intersect_img = masked_cell_img + masked_focus_img
pixels_two = len(np.where(intersect_img == 2))
pixels_one = len(np.where(masked_focus_img == 1))
# if over half the focus is within this cell, do the following
if pixels_two/pixels_one >= 0.5:
prior_tracked_focus.grow(
region=tracked_region,
t=t,
seg_img=seg_foci_img,
intensity_image=fl_img,
current_cell=cell
)
# after tracking foci, those that were tracked have been removed from focus_regions list
# now we check if any regions remain in the list
# if there are any remaining, instantiate new foci
if len(focus_regions) > 0:
new_ids = []
for focus_region in focus_regions:
# make the focus_id
new_id = create_focus_id(
region = focus_region,
t = t,
peak = peak_id,
fov = fov_id,
experiment_name = params['experiment_name'])
# populate list for later checking if any are missing
# from foci dictionary's keys
new_ids.append(new_id)
# determine which cell this focus belongs to
for cell_id,cell in frame_cells.items():
cell_idx = cell.times.index(t)
cell_label = cell.labels[cell_idx]
masked_cell_img = np.zeros(seg_cell_img.shape)
masked_cell_img[seg_cell_img == cell_label] = 1
masked_focus_img = np.zeros(seg_foci_img.shape)
masked_focus_img[seg_foci_img == focus_region.label] = 1
intersect_img = masked_cell_img + masked_focus_img
pixels_two = len(np.where(intersect_img == 2))
pixels_one = len(np.where(masked_focus_img == 1))
# if over half the focus is within this cell, do the following
if pixels_two/pixels_one >= 0.5:
# set up the focus
# if no foci in cell, just add this one.
foci[new_id] = Focus(cell = cell,
region = focus_region,
seg_img = seg_foci_img,
intensity_image = fl_img,
t = t)
for new_id in new_ids:
# if new_id is not a key in the foci dictionary,
# that suggests the focus doesn't overlap well
# with any cells in this frame, so we'll relabel
# this frame of seg_foci_stack to zero for that
# focus to avoid trying to track a focus
# that doesn't exist.
if new_id not in foci:
# get label of new_id's region
this_label = int(new_id[-2:])
# set pixels in this frame that match this label to 0
seg_foci_stack[frame, seg_foci_img == this_label] = 0
return
def update_cell_foci(cells, foci):
'''Updates cells' .foci attribute in-place using information
in foci dictionary
'''
for focus_id, focus in foci.items():
for cell in focus.cells:
cell_id = cell.id
cells[cell_id].foci[focus_id] = focus
# finds best fit for 2d gaussian using functin above
def fitgaussian(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution found by a fit
if params are not provided, they are calculated from the moments
params should be (height, x, y, width_x, width_y)"""
gparams = moments(data) # create guess parameters.
errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) - data)
p, success = leastsq(errorfunction, gparams)
return p
# calculate dice coefficient for two blobs
def dice_coeff_foci(mask_1_f, mask_2_f):
'''Accepts two flattened numpy arrays from
binary masks of two blobs and compares them
using the dice metric.
Returns a single dice score.
'''
intersection = np.sum(mask_1_f * mask_2_f)
score = (2. * intersection) / (np.sum(mask_1_f) + np.sum(mask_2_f))
return score
# returnes a 2D gaussian function
def gaussian(height, center_x, center_y, width):
'''Returns a gaussian function with the given parameters. It is a circular gaussian.
width is 2*sigma x or y
'''
# return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)
return lambda x,y: height*np.exp(-(((center_x-x)/width)**2+((center_y-y)/width)**2)/2)
# moments of a 2D gaussian
def moments(data):
'''
Returns (height, x, y, width_x, width_y)
The (circular) gaussian parameters of a 2D distribution by calculating its moments.
width_x and width_y are 2*sigma x and sigma y of the guassian.
'''
total = data.sum()
X, Y = np.indices(data.shape)
x = (X*data).sum()/total
y = (Y*data).sum()/total
col = data[:, int(y)]
width = float(np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum()))
row = data[int(x), :]
# width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())
height = data.max()
return height, x, y, width
# returns a 1D gaussian function
def gaussian1d(x, height, mean, sigma):
'''
x : data
height : height
mean : center
sigma : RMS width
'''
return height * np.exp(-(x-mean)**2 / (2*sigma**2))
# analyze ring fluroescence.
def ring_analysis(fov_id, peak_id, Cells, ring_plane='c2'):
'''Add information to the Cell objects about the location of the Z ring. Sums the fluorescent channel along the long axis of the cell. This can be plotted directly to give a good idea about the development of the ring. Also fits a gaussian to the profile.
Parameters
----------
fov_id : int
FOV number of the lineage to analyze.
peak_id : int
Peak number of the lineage to analyze.
Cells : dict of Cell objects (from a Lineages dictionary)
Cells should be prefiltered to match fov_id and peak_id.
ring_plane : str
The suffix of the channel to analyze. 'c1', 'c2', 'sub_c2', etc.
Usage
-----
for fov_id, peaks in Lineages.iteritems():
for peak_id, Cells in peaks.iteritems():
mm3.ring_analysis(fov_id, peak_id, Cells, ring_plane='sub_c2')
'''
peak_width_guess = 2
# Load data
ring_stack = load_stack(fov_id, peak_id, color=ring_plane)
seg_stack = load_stack(fov_id, peak_id, color='seg_unet')
# Load time table to determine first image index.
time_table = load_time_table()
times_all = np.array(np.sort(time_table[fov_id].keys()), np.int_)
t0 = times_all[0] # first time index
# Loop through cells
for Cell in Cells.values():
# initialize ring data arrays for cell
Cell.ring_locs = []
Cell.ring_heights = []
Cell.ring_widths = []
Cell.ring_medians = []
Cell.ring_profiles = []
# loop through each time point for this cell
for n, t in enumerate(Cell.times):
# Make mask of fluorescent channel using segmented image
ring_image_masked = np.copy(ring_stack[t-t0])
ring_image_masked[seg_stack[t-t0] != Cell.labels[n]] = 0
# Sum along long axis, use the profile_line function from skimage
# Use orientation of cell as calculated from the ellipsoid fit,
# the known length of the cell from the feret diameter,
# and a width that is greater than the cell width.
# find endpoints of line
centroid = Cell.centroids[n]
orientation = Cell.orientations[n]
length = Cell.lengths[n]
width = Cell.widths[n] * 1.25
# give 2 pixel buffer to each end to capture area outside cell.
p1 = (centroid[0] - np.sin(orientation) * (length+4)/2,
centroid[1] - np.cos(orientation) * (length+4)/2)
p2 = (centroid[0] + np.sin(orientation) * (length+4)/2,
centroid[1] + np.cos(orientation) * (length+4)/2)
# ensure old pole is always first point
if p1[0] > p2[0]:
p1, p2 = p2, p1 # python is cool
profile = profile_line(ring_image_masked, p1, p2, linewidth=width,
order=1, mode='constant', cval=0)
profile_indicies = np.arange(len(profile))
# subtract median from profile, using non-zero values for median
profile_median = np.median(profile[np.nonzero(profile)])
profile_sub = profile - profile_median
profile_sub[profile_sub < 0] = 0
# find peak position simply using maximum.
peak_index = np.argmax(profile)
peak_height = profile[peak_index]
peak_height_sub = profile_sub[peak_index]
try:
# Fit gaussian
p_guess = [peak_height_sub, peak_index, peak_width_guess]
popt, pcov = curve_fit(gaussian1d, profile_indicies,
profile_sub, p0=p_guess)
peak_width = popt[2]
except:
# information('Ring gaussian fit failed. {} {} {}'.format(fov_id, peak_id, t))
peak_width = np.float('NaN')
# Add data to cells
Cell.ring_locs.append(peak_index - 3) # minus 3 because we added 2 before and line_profile adds 1.
Cell.ring_heights.append(peak_height)
Cell.ring_widths.append(peak_width)
Cell.ring_medians.append(profile_median)
Cell.ring_profiles.append(profile) # append whole profile
return
# Calculate Y projection intensity of a fluorecent channel per cell
def profile_analysis(fov_id, peak_id, Cells, profile_plane='c2'):
'''Calculate profile of plane along cell and add information to Cell object. Sums the fluorescent channel along the long axis of the cell.
Parameters
----------
fov_id : int
FOV number of the lineage to analyze.
peak_id : int
Peak number of the lineage to analyze.
Cells : dict of Cell objects (from a Lineages dictionary)
Cells should be prefiltered to match fov_id and peak_id.
profile_plane : str
The suffix of the channel to analyze. 'c1', 'c2', 'sub_c2', etc.
Usage
-----
'''
# Load data
fl_stack = load_stack(fov_id, peak_id, color=profile_plane)
seg_stack = load_stack(fov_id, peak_id, color='seg_unet')
# Load time table to determine first image index.
# load_time_table()
times_all = []
for fov in params['time_table']:
times_all = np.append(times_all, list(params['time_table'][fov].keys()))
times_all = np.unique(times_all)
times_all = np.sort(times_all)
times_all = np.array(times_all,np.int_)
t0 = times_all[0] # first time index
# Loop through cells
for Cell in Cells.values():
# initialize ring data arrays for cell
fl_profiles = []
# loop through each time point for this cell
for n, t in enumerate(Cell.times):
# Make mask of fluorescent channel using segmented image
image_masked = np.copy(fl_stack[t-t0])
image_masked[seg_stack[t-t0] != Cell.labels[n]] = 0
# Sum along long axis, use the profile_line function from skimage
# Use orientation of cell as calculated from the ellipsoid fit,
# the known length of the cell from the feret diameter,
# and a width that is greater than the cell width.
# find endpoints of line
centroid = Cell.centroids[n]
orientation = Cell.orientations[n]
length = Cell.lengths[n]
width = Cell.widths[n] * 1.25
# give 2 pixel buffer to each end to capture area outside cell.
p1 = (centroid[0] - np.sin(orientation) * (length+4)/2,
centroid[1] - | np.cos(orientation) | numpy.cos |
from __future__ import print_function
import sys
import os
dir = os.path.dirname(os.path.abspath(__file__))
from FFTLog_integrals import *
import power_FFTLog as power
import numpy as np
from scipy.interpolate import interp1d
from scipy.integrate import quad
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.ticker
locmin = matplotlib.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1, numticks=100)
def find_ind(k, P):
ipos = P >= 0.0
ineg = P < 0.0
kpos, Ppos = k[ipos], P[ipos]
kneg, Pneg = k[ineg], P[ineg]
return (kpos, Ppos, kneg, Pneg)
def plot_all():
N = 1400
nu = -0.6
with_padding = False
save_matrices = False
kw = {'N':N, 'nu':nu, 'with_padding':with_padding, 'save_matrices':save_matrices}
fft_2G22 = FFT_22(kernel='2G22', **kw)
fft_G13 = FFT_13(kernel='G13', **kw)
fft_2K22 = FFT_22(kernel='2K22', **kw)
fft_4KG22 = FFT_22(kernel='4KG22', **kw)
fft_KG13 = FFT_13(kernel='KG13', **kw)
k = np.exp(fft_2G22.lnk)
PL = fft_2G22.PL(k)
# one-loop
P13 = fft_G13.P13(k, ell=0)
P22 = fft_2G22.P22(k, ell=0)
P_1loop_corr = P22 + 2*P13
P_2K22_ell0 = fft_2K22.DelP0(k) # Note we subract out P_11 !!!
P_2K22_ell2 = fft_2K22.P22(k, ell=2)
P_2K22_ell4 = fft_2K22.P22(k, ell=4)
P_4KG22_ell0 = fft_4KG22.P22(k, ell=0)
P_4KG22_ell2 = fft_4KG22.P22(k, ell=2)
P_KG13_ell0 = fft_KG13.P13(k, ell=0)
P_KG13_ell2 = fft_KG13.P13(k, ell=2)
P_3K13_ell0 = fft_KG13.K3_ell0(k)
P_3K13_ell2 = fft_KG13.K3_ell2(k)
P_1loop = PL + P_1loop_corr # no rsd corrections
P0 = P_2K22_ell0 + P_4KG22_ell0 + P_KG13_ell0 + (P_1loop) + P_3K13_ell0
P2 = P_2K22_ell2 + P_4KG22_ell2 + P_KG13_ell2 + P_3K13_ell2
P4 = P_2K22_ell4
plt.figure(figsize=(6,6))
plt.loglog(k, P0, 'k', lw=1.1) # label=r'$\ell=0$',
# plt.loglog(k, np.abs(P2), 'b', label=r'$\ell=2$', lw=1.2)
kp, P2p, kn, P2n = find_ind(k, P2)
plt.loglog(kp, P2p, 'b', lw=1.4) # label=r'$\ell=2$',
plt.loglog(kn, np.abs(P2n), 'b--', dashes=(5,3), lw=1.4)
plt.loglog(k, P4, 'r', lw=1.4) # label=r'$\ell=4$',
plt.loglog(k, P_1loop, 'k-.', label=r'$P^{1\!-\!loop}_{\theta\theta}$', lw=1.1)
plt.loglog(k, PL, c='gray', ls=':', lw=1.4)
plt.text(x=0.0035, y=7500, s=r'$P^0_{\theta\theta}$')
plt.text(x=0.19, y=2430, s=r'$P_L$')
plt.text(x=3e-2, y=400, s=r'$P^2_{\theta\theta}$', c='b')
plt.text(x=5e-2, y=36, s=r'$P^4_{\theta\theta}$', c='r')
# plt.grid(ls=':')
plt.legend(frameon=False, loc='upper right', fontsize=16)
plt.tick_params(right=True, top=True, which='both')
# plt.xlim(1e-3,3e0)
plt.xlim(3e-3,0.3)
plt.ylim(1e1,4e4)
# plt.xticks([1e-3,1e-2,1e-1,1e0])
plt.xlabel(r'Wavenumber $k$ [h Mpc$^{-1}$]')
plt.ylabel(r'$P^\ell_{\theta\theta}(k)$ [h$^{-3}$ Mpc$^3$]')
plt.show()
def plot_ell0_compts():
N = 1400
nu = -0.6
with_padding = False
save_matrices = False
kw = {'N':N, 'nu':nu, 'with_padding':with_padding, 'save_matrices':save_matrices}
fft_2G22 = FFT_22(kernel='2G22', **kw)
fft_G13 = FFT_13(kernel='G13', **kw)
fft_2K22 = FFT_22(kernel='2K22', **kw)
fft_4KG22 = FFT_22(kernel='4KG22', **kw)
fft_KG13 = FFT_13(kernel='KG13', **kw)
k = np.exp(fft_2G22.lnk)
PL = fft_2G22.PL(k)
# one-loop
P13 = fft_G13.P13(k, ell=0)
P22 = fft_2G22.P22(k, ell=0)
P_1loop_corr = P22 + 2*P13
P_2K22_ell0 = fft_2K22.DelP0(k) # Note we subract out P_11 !!!
P_4KG22_ell0 = fft_4KG22.P22(k, ell=0)
P_KG13_ell0 = fft_KG13.P13(k, ell=0)
# the last term
P_3K13_ell0 = fft_KG13.K3_ell0(k)
P_1loop = PL + P_1loop_corr # no rsd corrections
P0 = P_2K22_ell0 + P_4KG22_ell0 + P_KG13_ell0 + (P_1loop) + P_3K13_ell0
plt.figure(figsize=(6,6))
plt.loglog(k, P0, 'k', lw=1.2)
plt.loglog(k, P_2K22_ell0, 'b', lw=1.2)
plt.loglog(k, P_4KG22_ell0, 'magenta', lw=1.2)
plt.loglog(k, np.abs(P_KG13_ell0), 'r', ls='--', dashes=(5,3), lw=1.2)
plt.loglog(k, np.abs(P_3K13_ell0), 'lime', ls='--', dashes=(5,3), lw=1.2)
plt.loglog(k, np.abs(P22+2*P13), 'turquoise', ls='--', dashes=(5,3), lw=1.2)
plt.loglog(k, PL, c='gray', ls=':', lw=1.2)
plt.text(x=0.0035, y=7500, s=r'$P^0_{\theta\theta}$')
plt.text(x=0.19, y=2430, s=r'$P_L$')
plt.text(x=0.015, y=1100, s=r'$P_{22}+2P_{13}$', c='turquoise')
plt.text(x=0.1, y=74, s=r'$K^{(2)}_S K^{(2)}_S$', c='b')
plt.text(x=0.096, y=283, s=r'$K^{(2)}_S G^{(2)}_S$ (22)', c='magenta', fontsize=13) # 0.0269
plt.text(x=0.0155, y=115, s=r'$K^{(2)}_S G^{(2)}_S$ (13)', c='r', fontsize=13) # label=r'$KG13$',
plt.text(x=0.01, y=16, s=r'$K^{(3)}_S$', c='lime') # label=r'$3K13$'
# plt.grid(ls=':')
# plt.legend(frameon=False, loc='center left', fontsize=14)
# plt.xlim(1e-3,3e0)
plt.xlim(3e-3,0.3)
plt.ylim(1e1,4e4)
plt.tick_params(right=True, top=True, which='both')
# plt.xticks([1e-3,1e-2,1e-1,1e0])
plt.xlabel(r'Wavenumber $k$ [h Mpc$^{-1}$]')
plt.ylabel(r'$P^0_{\theta\theta}(k)\,$ [h$^{-3}$ Mpc$^3$]')
plt.show()
def plot_oneloop_theta():
k, PL, P13, P22, P_1loop = power.Ptt_1loop(k=None, PL=None, get_compts=True, N=1024)
fig, ax = plt.subplots(figsize=(6,6))
ax.loglog(k, P_1loop, 'k', label=r'$P_L+P_{22}+2P_{13}$', lw=1.4)
kp, Pp, kn, Pn = find_ind(k, P22+2*P13)
ax.loglog(kp, Pp, 'b', label=r'$P_{22}+2P_{13}$', lw=1.2)
ax.loglog(kn, np.abs(Pn), 'b--', lw=1.2)
# ax.loglog(k, np.abs(P22+2*P13), 'b', label=r'$|P_{22}+2P_{13}|$', lw=1.2)
ax.loglog(k, P22, 'r', label=r'$P_{22}$', lw=1.2)
ax.loglog(k, np.abs(2*P13), 'lime', ls='--', label=r'$2P_{13}$', lw=1.2)
ax.loglog(k, PL, 'gray', ls=':', label=r'$P_L$', lw=1.4)
ax.legend(frameon=False, loc='upper right', fontsize=13)
ax.set_xlim(2e-4,1e2)
ax.set_ylim(1e0,1e5)
ax.tick_params(right=True, top=True, which='both')
ax.xaxis.set_minor_locator(locmin)
ax.xaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())
ax.set_xticks([1e-3,1e-2,1e-1,1e0,1e1,1e2])
ax.set_xlabel(r'Wavenumber $k$ [h Mpc$^{-1}$]')
ax.set_ylabel(r'$P_{\theta\theta}(k)$ [h$^{-3}$ Mpc$^3$]')
plt.show()
def plot_oneloop_matter():
k, PL, P13, P22, P_1loop = power.Pmm_1loop(k=None, PL=None, get_compts=True, N=1024)
fig, ax = plt.subplots(figsize=(6,6))
ax.loglog(k, P_1loop, 'k', label=r'$P_L+P_{22}+2P_{13}$', lw=1.4)
kp, Pp, kn, Pn = find_ind(k, P22+2*P13)
ax.loglog(kp, Pp, 'b', label=r'$P_{22}+2P_{13}$', lw=1.2)
ax.loglog(kn, np.abs(Pn), 'b--', lw=1.2)
ax.loglog(k, P22, 'r', label=r'$P_{22}$', lw=1.2)
ax.loglog(k, np.abs(2*P13), 'lime', ls='--', label=r'$2P_{13}$', lw=1.2)
ax.loglog(k, PL, 'gray', ls=':', label=r'$P_L$', lw=1.4)
# ax.grid(ls=':')
ax.legend(frameon=False, loc='upper right', fontsize=13)
ax.set_xlim(2e-4,1e2)
ax.set_ylim(1e0,1e5)
ax.tick_params(right=True, top=True, which='both')
ax.xaxis.set_minor_locator(locmin)
ax.xaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())
ax.set_xticks([1e-3,1e-2,1e-1,1e0,1e1,1e2])
ax.set_xlabel(r'Wavenumber $k$ [h Mpc$^{-1}$]')
ax.set_ylabel(r'$P_{mm}(k)$ [h$^{-3}$ Mpc$^3$]')
plt.show()
def plot_Ps_vv_with_ratio(N=512): # P(k,mu) for diff mu
H0f = 51.57 # Om^0.55=0.3^0.55=0.5157
kin, plin = np.loadtxt('Pk_Planck15_large.dat', unpack=True, skiprows=4)
F = power.Ps_tt(kin, plin, N=N)
k = F.k
mu1, mu2, mu3, mu4 = 1.0, 0.6, 0.3, 0.1
Pvv1_norsd = F.Pvv_norsd(mu1)
Pvv2_norsd = F.Pvv_norsd(mu2)
Pvv3_norsd = F.Pvv_norsd(mu3)
Pvv4_norsd = F.Pvv_norsd(mu4)
Psvv1 = F.Psvv(mu1, with_fog=False)
Psvv2 = F.Psvv(mu2, with_fog=False)
Psvv3 = F.Psvv(mu3, with_fog=False)
Psvv4 = F.Psvv(mu4, with_fog=False)
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(6,8), gridspec_kw={'height_ratios': [2.5, 1], 'hspace': 0.0})
ax[0].loglog(k, Psvv1, 'k', lw=1.2, label=r'$\mu=1.0$')
ax[0].loglog(k, Psvv2, 'b', lw=1.2, label=r'$\mu=0.6$')
ax[0].loglog(k, Psvv3, 'r', lw=1.2, label=r'$\mu=0.3$')
ax[0].loglog(k, Psvv4, 'lime', lw=1.2, label=r'$\mu=0.1$')
ax[0].loglog(k, Pvv1_norsd, 'k', ls=':', lw=1.7)
ax[0].loglog(k, Pvv2_norsd, 'b', ls=':', lw=1.7)
ax[0].loglog(k, Pvv3_norsd, 'r', ls=':', lw=1.7)
ax[0].loglog(k, Pvv4_norsd, 'lime', ls=':', lw=1.5)
ax[1].semilogx(k, Psvv1/Pvv1_norsd, 'k', lw=1.2)
ax[1].semilogx(k, Psvv2/Pvv2_norsd, 'b', lw=1.2)
ax[1].semilogx(k, Psvv3/Pvv3_norsd, 'r', lw=1.2)
ax[1].semilogx(k, Psvv4/Pvv4_norsd, 'lime', lw=1.2)
ax[0].legend(frameon=False, loc='upper right', fontsize=16)
ax[1].text(x=4e-3, y=0.4, s=r'$P^s_{vv}(k,\mu)\,/\,P_{vv,no\:RSD}(k,\mu)$', color='k', fontsize=18)
ax[1].set_yticks([0.4,0.6,0.8,1.0])
ax[0].set_xlim(3e-3,0.24)
ax[0].set_ylim(8e0*H0f**2, 2e9*H0f**2)
ax[1].set_ylim(0.3,1.05)
ax[0].tick_params(right=True, top=True, which='both')
ax[1].tick_params(right=True, top=True, which='both')
ax[1].set_xlabel(r'Wavenumber $k$ [h Mpc$^{-1}$]')
ax[0].set_ylabel(r'$P^s_{vv}(k,\mu)$ [$(km/s)^2\, (h^{-1}\, Mpc)^3$]')
ax[1].set_ylabel(r'Ratio')
ax[0].yaxis.set_minor_locator(locmin)
ax[0].yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())
plt.show()
def plot_Ps_vv_with_ratio2(N=512): # Pvv^ell
H0f = 51.57
kin, plin = np.loadtxt('Pk_Planck15_large.dat', unpack=True, skiprows=4)
F = power.Ps_tt(kin, plin, N=N)
P0vv = F.Psvv_ell(ell=0, with_fog=False)
P2vv = F.Psvv_ell(ell=2, with_fog=False)
P4vv = F.Psvv_ell(ell=4, with_fog=False)
P6vv = F.Psvv_ell(ell=6, with_fog=False)
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(6,8), gridspec_kw={'height_ratios': [2.5, 1], 'hspace': 0.0})
ax[0].loglog(F.k, P0vv, 'k', lw=1.2, label=r'$\ell=0$')
ax[0].loglog(F.k, P2vv, 'b', lw=1.2, label=r'$\ell=2$')
pos_signal = np.ma.masked_where(P4vv<=0.0, P4vv)
neg_signal = np.ma.masked_where(P4vv>0.0, P4vv)
ax[0].loglog(F.k, pos_signal, 'r', lw=1.2, label=r'$\ell=4$')
ax[0].loglog(F.k, np.abs(neg_signal), 'r--', dashes=(5,3), lw=1.2)
ax[0].loglog(F.k, P6vv, 'lime', lw=1.2, label=r'$\ell=6$')
ax[0].loglog(F.k, F.P0vv_norsd, 'k:', lw=1.7)
ax[0].loglog(F.k, F.P2vv_norsd, 'b:', lw=1.7)
ax[1].semilogx(F.k, P0vv/F.P0vv_norsd, 'k', lw=1.2, label=r'$P^0_{vv}\,/\,P^0_{vv,no\:RSD}$')
ax[1].semilogx(F.k, P2vv/F.P2vv_norsd, 'b', lw=1.2, label=r'$P^2_{vv}\,/\,P^2_{vv,no\:RSD}$')
ax[0].legend(frameon=False, loc='upper right', fontsize=18, ncol=1)
ax[1].legend(frameon=False, loc='lower left', fontsize=18, ncol=1)
ax[1].set_yticks([0.4,0.6,0.8,1.0])
ax[0].set_xlim(3e-3,0.24)
ax[0].set_ylim(8e0*H0f**2,2e9*H0f**2)
ax[1].set_ylim(0.3,1.05)
ax[0].tick_params(right=True, top=True, which='both')
ax[1].tick_params(right=True, top=True, which='both')
ax[1].set_xlabel(r'Wavenumber $k$ [h Mpc$^{-1}$]')
ax[0].set_ylabel(r'$P^\ell_{vv}(k)$ [$(km/s)^2\, (h^{-1}\, Mpc)^3$]')
ax[1].set_ylabel(r'Ratio')
ax[0].yaxis.set_minor_locator(locmin)
ax[0].yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())
plt.show()
def plot_Ps_vv_disp_with_ratio(N=512): # P(k,mu) for diff mu
H0f = 51.57 # Om^0.55=0.3^0.55=0.5157
sig_fog = 3.5
kin, plin = np.loadtxt('Pk_Planck15_large.dat', unpack=True, skiprows=4)
F = power.Ps_tt(kin, plin, sig_fog=sig_fog, N=N)
k = F.k
mu1, mu2, mu3, mu4 = 1.0, 0.6, 0.3, 0.1
Pvv1_norsd = F.Pvv_norsd(mu1)
Pvv2_norsd = F.Pvv_norsd(mu2)
Pvv3_norsd = F.Pvv_norsd(mu3)
Pvv4_norsd = F.Pvv_norsd(mu4)
Psvv1_disp = F.Psvv(mu1, with_fog=True)
Psvv2_disp = F.Psvv(mu2, with_fog=True)
Psvv3_disp = F.Psvv(mu3, with_fog=True)
Psvv4_disp = F.Psvv(mu4, with_fog=True)
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(6,8), gridspec_kw={'height_ratios': [2.5, 1], 'hspace': 0.0})
ax[0].loglog(k, Psvv1_disp, 'k', lw=1.2, label=r'$\mu=1.0$')
ax[0].loglog(k, Psvv2_disp, 'b', lw=1.2, label=r'$\mu=0.6$')
ax[0].loglog(k, Psvv3_disp, 'r', lw=1.2, label=r'$\mu=0.3$')
ax[0].loglog(k, Psvv4_disp, 'lime', lw=1.2, label=r'$\mu=0.1$')
ax[0].loglog(k, Pvv1_norsd, 'k', ls=':', lw=1.7)
ax[0].loglog(k, Pvv2_norsd, 'b', ls=':', lw=1.7)
ax[0].loglog(k, Pvv3_norsd, 'r', ls=':', lw=1.7)
ax[0].loglog(k, Pvv4_norsd, 'lime', ls=':', lw=1.5)
ax[1].semilogx(k, Psvv1_disp/Pvv1_norsd, 'k', lw=1.2)
ax[1].semilogx(k, Psvv2_disp/Pvv2_norsd, 'b', lw=1.2)
ax[1].semilogx(k, Psvv3_disp/Pvv3_norsd, 'r', lw=1.2)
ax[1].semilogx(k, Psvv4_disp/Pvv4_norsd, 'lime', lw=1.2)
# uncomment to add more clutter to the plot
# Ps1 = F.Ps(mu1) * (H0f*mu1/k)**2 # no damping
# Ps2 = F.Ps(mu2) * (H0f*mu2/k)**2
# Ps3 = F.Ps(mu3) * (H0f*mu3/k)**2
# Ps4 = F.Ps(mu4) * (H0f*mu4/k)**2
# ax[1].semilogx(k, Ps1/Pvv1_norsd, 'k:', lw=1.2)
# ax[1].semilogx(k, Ps2/Pvv2_norsd, 'b:', lw=1.2)
# ax[1].semilogx(k, Ps3/Pvv3_norsd, 'r:', lw=1.2)
# ax[1].semilogx(k, Ps4/Pvv4_norsd, 'lime', ls=':', lw=1.2)
ax[0].legend(frameon=False, loc='upper right', fontsize=16)
ax[1].text(x=4e-3, y=0.4, s=r'$P^s_{vv}(k,\mu)\,/\,P_{vv,no\:RSD}(k,\mu)$', color='k', fontsize=18)
ax[1].set_yticks([0.4,0.6,0.8,1.0])
ax[0].set_xlim(3e-3,0.24)
ax[0].set_ylim(8e0*H0f**2,2e9*H0f**2)
ax[1].set_ylim(0.3,1.05)
ax[0].tick_params(right=True, top=True, which='both')
ax[1].tick_params(right=True, top=True, which='both')
ax[1].set_xlabel(r'Wavenumber $k$ [h Mpc$^{-1}$]')
ax[0].set_ylabel(r'$P^s_{vv}(k,\mu)$ [$(km/s)^2\, (h^{-1}\, Mpc)^3$]')
ax[1].set_ylabel(r'Ratio')
ax[0].yaxis.set_minor_locator(locmin)
ax[0].yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())
plt.show()
def plot_Ps_vv_disp_with_ratio2(N=512): # Puu^ell for ell=0,2
H0f = 51.57 # Om^0.55=0.3^0.55=0.5157
sig_fog = 3.5 # 6.a
kin, plin = np.loadtxt('Pk_Planck15_large.dat', unpack=True, skiprows=4)
F = power.Ps_tt(kin, plin, sig_fog=sig_fog, N=N)
P0vv_disp = F.Psvv_ell(ell=0, with_fog=True)
P2vv_disp = F.Psvv_ell(ell=2, with_fog=True)
P4vv_disp = F.Psvv_ell(ell=4, with_fog=True)
P6vv_disp = F.Psvv_ell(ell=6, with_fog=True)
P8vv_disp = F.Psvv_ell(ell=8, with_fog=True)
P10vv_disp = F.Psvv_ell(ell=10, with_fog=True)
P0vv = F.Psvv_ell(ell=0, with_fog=False)
P2vv = F.Psvv_ell(ell=2, with_fog=False)
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(6,8), gridspec_kw={'height_ratios': [2.5, 1], 'hspace': 0.0})
ax[0].loglog(F.k, P0vv_disp, 'k', lw=1.2, label=r'$\ell=0$')
ax[0].loglog(F.k, P2vv_disp, 'b', lw=1.2, label=r'$\ell=2$')
pos_signal = | np.ma.masked_where(P4vv_disp<=0.0, P4vv_disp) | numpy.ma.masked_where |
import scipy.ndimage as scnd
import scipy.optimize as sio
import numpy as np
import numba
import warnings
import stemtool as st
@numba.jit
def fit_nbed_disks(corr_image, disk_size, positions, diff_spots):
warnings.filterwarnings("ignore")
positions = np.asarray(positions, dtype=np.float64)
diff_spots = np.asarray(diff_spots, dtype=np.float64)
fitted_disk_list = np.zeros_like(positions)
disk_locations = np.zeros_like(positions)
for ii in range(int(np.shape(positions)[0])):
posx = positions[ii, 0]
posy = positions[ii, 1]
par = st.util.fit_gaussian2D_mask(corr_image, posx, posy, disk_size)
fitted_disk_list[ii, 0] = par[0]
fitted_disk_list[ii, 1] = par[1]
disk_locations = np.copy(fitted_disk_list)
disk_locations[:, 1] = 0 - disk_locations[:, 1]
center = disk_locations[
np.logical_and((diff_spots[:, 0] == 0), (diff_spots[:, 1] == 0)), :
]
cx = center[0, 0]
cy = center[0, 1]
disk_locations[:, 0:2] = disk_locations[:, 0:2] - np.asarray(
(cx, cy), dtype=np.float64
)
lcbed, _, _, _ = np.linalg.lstsq(diff_spots, disk_locations, rcond=None)
cy = (-1) * cy
return fitted_disk_list, np.asarray((cx, cy), dtype=np.float64), lcbed
def sobel_filter(image, med_filter=50):
ls_image, _ = st.util.sobel(st.util.image_logarizer(image))
ls_image[ls_image > (med_filter * np.median(ls_image))] = med_filter * np.median(
ls_image
)
ls_image[ls_image < (np.median(ls_image) / med_filter)] = (
np.median(ls_image) / med_filter
)
return ls_image
@numba.jit
def strain_and_disk(data4D, disk_size, pixel_list_xy, disk_list, ROI=1, med_factor=50):
warnings.filterwarnings("ignore")
if np.size(ROI) < 2:
ROI = np.ones((data4D.shape[2], data4D.shape[3]), dtype=bool)
# Calculate needed values
scan_size = np.asarray(data4D.shape)[2:4]
sy, sx = np.mgrid[0 : scan_size[0], 0 : scan_size[1]]
scan_positions = (np.asarray((np.ravel(sy), np.ravel(sx)))).astype(int)
cbed_size = np.asarray(data4D.shape)[0:2]
yy, xx = np.mgrid[0 : cbed_size[0], 0 : cbed_size[1]]
center_disk = (
st.util.make_circle(cbed_size, cbed_size[1] / 2, cbed_size[0] / 2, disk_size)
).astype(np.float64)
i_matrix = (np.eye(2)).astype(np.float64)
sobel_center_disk, _ = st.util.sobel(center_disk)
# Initialize matrices
e_xx = np.zeros(scan_size, dtype=np.float64)
e_xy = np.zeros(scan_size, dtype=np.float64)
e_th = np.zeros(scan_size, dtype=np.float64)
e_yy = np.zeros(scan_size, dtype=np.float64)
disk_x = np.zeros(scan_size, dtype=np.float64)
disk_y = np.zeros(scan_size, dtype=np.float64)
COM_x = np.zeros(scan_size, dtype=np.float64)
COM_y = np.zeros(scan_size, dtype=np.float64)
# Calculate for mean CBED if no reference
mean_cbed = np.mean(data4D, axis=(-1, -2), dtype=np.float64)
mean_ls_cbed, _ = st.util.sobel(st.util.image_logarizer(mean_cbed))
mean_ls_cbed[
mean_ls_cbed > med_factor * np.median(mean_ls_cbed)
] = med_factor * np.median(mean_ls_cbed)
mean_ls_cbed[mean_ls_cbed < np.median(mean_ls_cbed) / med_factor] = (
np.median(mean_ls_cbed) / med_factor
)
mean_lsc = st.util.cross_corr_unpadded(mean_ls_cbed, sobel_center_disk)
_, mean_center, mean_axes = fit_nbed_disks(
mean_lsc, disk_size, pixel_list_xy, disk_list
)
axes_lengths = ((mean_axes[:, 0] ** 2) + (mean_axes[:, 1] ** 2)) ** 0.5
beam_r = axes_lengths[1]
inverse_axes = np.linalg.inv(mean_axes)
for pp in range(np.size(sy)):
ii = scan_positions[0, pp]
jj = scan_positions[1, pp]
pattern = data4D[:, :, ii, jj]
pattern_ls, _ = st.util.sobel(st.util.image_logarizer(pattern))
pattern_ls[pattern_ls > med_factor * np.median(pattern_ls)] = np.median(
pattern_ls
)
pattern_lsc = st.util.cross_corr_unpadded(pattern_ls, sobel_center_disk)
_, pattern_center, pattern_axes = fit_nbed_disks(
pattern_lsc, disk_size, pixel_list_xy, disk_list
)
pcirc = (
(((yy - pattern_center[1]) ** 2) + ((xx - pattern_center[0]) ** 2)) ** 0.5
) <= beam_r
pattern_x = np.sum(pattern[pcirc] * xx[pcirc]) / np.sum(pattern[pcirc])
pattern_y = np.sum(pattern[pcirc] * yy[pcirc]) / np.sum(pattern[pcirc])
t_pattern = np.matmul(pattern_axes, inverse_axes)
s_pattern = t_pattern - i_matrix
e_xx[ii, jj] = -s_pattern[0, 0]
e_xy[ii, jj] = -(s_pattern[0, 1] + s_pattern[1, 0])
e_th[ii, jj] = -(s_pattern[0, 1] - s_pattern[1, 0])
e_yy[ii, jj] = -s_pattern[1, 1]
disk_x[ii, jj] = pattern_center[0] - mean_center[0]
disk_y[ii, jj] = pattern_center[1] - mean_center[1]
COM_x[ii, jj] = pattern_x - mean_center[0]
COM_y[ii, jj] = pattern_y - mean_center[1]
return e_xx, e_xy, e_th, e_yy, disk_x, disk_y, COM_x, COM_y
@numba.jit
def dpc_central_disk(data4D, disk_size, position, ROI=1, med_val=20):
"""
DPC routine on only the central disk
Parameters
----------
data4D: ndarray
The 4 dimensional dataset that will be analyzed
The first two dimensions are the Fourier space
diffraction dimensions and the last two dimensions
are the real space scanning dimensions
disk_size: float
Size of the central disk
position: ndarray
X and Y positions
This is the initial guess that will be refined
ROI: ndarray
The region of interest for the scanning region
that will be analyzed. If no ROI is given then
the entire scanned area will be analyzed
med_val: float
Sometimes some pixels are either too bright in
the diifraction patterns due to stray muons or
are zero due to dead detector pixels. This removes
the effect of such pixels before Sobel filtering
Returns
-------
p_cen: ndarray
P positions of the central disk
q_cen: ndarray
Q positions of the central disk
p_com: ndarray
P positions of the center of mass
of the central disk
q_com: ndarray
Q positions of the center of mass
of the central disk
Notes
-----
This is when we want to perform DPC without bothering
about the higher order disks. The ROI of the 4D dataset
is calculated, and the central disk is fitted in each ROI
point, and then a disk is calculated centered on the edge
fitted center and then the COM inside that disk is also
calculated.
:Authors:
<NAME> <<EMAIL>>
"""
warnings.filterwarnings("ignore")
if np.size(ROI) < 2:
ROI = np.ones((data4D.shape[2], data4D.shape[3]), dtype=bool)
yy, xx = np.mgrid[0 : data4D.shape[2], 0 : data4D.shape[3]]
data4D_ROI = data4D[:, :, yy[ROI], xx[ROI]]
pp, qq = np.mgrid[0 : data4D.shape[0], 0 : data4D.shape[1]]
no_points = np.sum(ROI)
fitted_pos = np.zeros((2, no_points), dtype=np.float64)
fitted_com = np.zeros((2, no_points), dtype=np.float64)
pos_p = position[0]
pos_q = position[1]
corr_disk = st.util.make_circle(
np.asarray(data4D.shape[0:2]), pos_p, pos_q, disk_size
)
sobel_corr_disk, _ = st.util.sobel(corr_disk)
p_cen = np.zeros((data4D.shape[2], data4D.shape[3]), dtype=np.float64)
q_cen = np.zeros((data4D.shape[2], data4D.shape[3]), dtype=np.float64)
p_com = np.zeros((data4D.shape[2], data4D.shape[3]), dtype=np.float64)
q_com = | np.zeros((data4D.shape[2], data4D.shape[3]), dtype=np.float64) | numpy.zeros |
import numpy as np
import csv
import tkinter as tk
import tkinter.simpledialog as ts
from tkinter import ttk
from PIL import ImageTk, Image
import tkinter.filedialog as tf
import tkinter.messagebox as tM
import physfunc as phys
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.backends.backend_tkagg as tkagg
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.pylab as pylab
import datetime
import time
params = {'legend.fontsize': 'x-large',
'figure.figsize': (7.5, 7),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
plt.rcParams.update({'font.size': 10})
defaultsettings, defaultList = phys.parse_csv('defaults.csv', 4, 0, 0, 'No')
mesh_list = phys.ConvertToInt(defaultsettings[defaultList[0]])
regions = defaultsettings[defaultList[1]]
mesh_line_set = defaultsettings[defaultList[2]]
default_meshlineposition = phys.ConvertToFloat(defaultsettings[defaultList[3]])
concentration_data = defaultsettings[defaultList[4]]
default_concs = phys.ConvertToFloat(defaultsettings[defaultList[5]])
default_interface = defaultsettings[defaultList[6]]
default_IntPosition = phys.ConvertToFloat(defaultsettings[defaultList[7]])
default_Vnumbers = phys.ConvertToInt(defaultsettings[defaultList[8]])
default_Vmeshlines = [round(float(i)) for i in defaultsettings[defaultList[9]]]
default_others = phys.ConvertToFloat(defaultsettings[defaultList[10]])
default_info = defaultsettings[defaultList[11]]
default_infoValues = defaultsettings[defaultList[12]]
default_Plot = defaultsettings[defaultList[13]]
default_PlotValue = defaultsettings[defaultList[14]]
# temp data
vlines = [0] * len(default_meshlineposition)
line_temp = [0] * 2
temp_Vpt = 0
# IO function
def SetDefault():
with open('defaults.csv', 'w') as default_file:
default_file.write('%s' % defaultList[0])
for i in range(1, len(defaultList)):
default_file.write(',%s' % defaultList[i])
lengthlist = [len(mesh_list), len(regions), len(mesh_line_set),
len(default_meshlineposition), len(concentration_data),
len(default_concs), len(default_interface),
len(default_IntPosition), len(default_Vnumbers),
len(default_Vmeshlines), len(default_others),
len(default_info), len(default_infoValues),
len(default_Plot), len(default_PlotValue)]
for i in range(max(lengthlist)):
datalist = str(mesh_values[i].get())
writelist = [0] * len(lengthlist)
for j in range(len(lengthlist)):
if i >= lengthlist[j]:
writelist[j] = ''
else:
if j == 0:
writelist[j] = str(mesh_values[i].get())
elif j == 1:
writelist[j] = regions[i]
elif j == 2:
writelist[j] = mesh_line_set[i]
elif j == 3:
writelist[j] = str(mesh_line_positions[i].get())
elif j == 4:
writelist[j] = concentration_data[i]
elif j == 5:
writelist[j] = str(concentrations[i].get())
elif j == 6:
writelist[j] = default_interface[i]
elif j == 7:
writelist[j] = str(interface_list[i].get())
elif j == 8:
writelist[j] = str(Vmesh_number[i].get())
elif j == 9:
if i == 0:
writelist[j] = str(0)
else:
writelist[j] = str(Vmesh_lineEnt[i - 1].get())
elif j == 10:
writelist[j] = str(othersettings[i].get())
elif j == 11:
writelist[j] = default_info[i]
elif j == 12:
if len(InfoArray[i].get()) == 0:
writelist[j] = 'None'
else:
writelist[j] = str(InfoArray[i].get())
elif j == 13:
writelist[j] = default_Plot[i]
elif j == 14:
if len(PlotDataArray[i].get()) == 0:
writelist[j] = 'None'
else:
writelist[j] = str(PlotDataArray[i].get())
if j > 0:
datalist = datalist + ',' + writelist[j]
default_file.write('\n%s' % datalist)
def readcurveDoubleClick(event):
global x_raw
global doping_raw
try:
data
except NameError:
tM.showerror("Error", "You haven't load data.")
else:
try:
load_index
except NameError:
tM.showerror("Error", "You need to select a data first.")
else:
x_raw = np.asarray(data[curvename[load_index]]['X'])
doping_raw = np.asarray(data[curvename[load_index]]['Y'])
StoreVariables('x_raw(length)', len(x_raw))
StoreVariables('doping_raw(length)', len(doping_raw))
log_region.insert(tk.END, "\nSuccefully read raw doping data (x, conc.)")
log_region.see(tk.END)
ax1.set_title(r'Doping profile @ %s' % curvename[load_index])
bar1.draw()
ax2.set_title(
r'Depletion region boundary position relative to the P/N junction @ %s' % curvename[load_index])
bar2.draw()
def readcurve():
global x_raw
global doping_raw
try:
data
except NameError:
tM.showerror("Error", "You haven't load data.")
else:
try:
load_index
except NameError:
tM.showerror("Error", "You need to select a data first.")
else:
x_raw = | np.asarray(data[curvename[load_index]]['X']) | numpy.asarray |
"""A module containing scripts that run demonstrative examples of the
Voronoi Cell Finite Element Method package (vcfempy). This can be
run as a standalone script since it has the __name__ == '__main__' idiom.
"""
import os
import sys
# add relative path to package, in case it is not installed
sys.path.insert(0, os.path.abspath('../src/'))
def rectangular_mesh():
"""An example demonstrating mesh generation for a simple rectangular
domain with a single material
"""
print('*** Simple rectangular domain:\n')
# initialize the mesh object
rect_mesh = msh.PolyMesh2D('Rectangular Mesh')
# add main corner vertices
rect_mesh.add_vertices([[0, 0], [0, 20], [0, 40.],
[20, 40], [20, 20], [20, 0]])
# insert boundary vertices
# here, use a list comprehension to add all vertices in clockwise order
rect_mesh.insert_boundary_vertices(0, [k for k
in range(rect_mesh.num_vertices)])
# add material types and regions
# Note: here we create a MaterialRegion2D object and
# then add it to the mesh
clay = mtl.Material('clay', color='xkcd:clay')
msh.MaterialRegion2D(rect_mesh, rect_mesh.boundary_vertices, clay)
# generate mesh and print properties
# Note: here [16,32] is the grid size for mesh seed points
# and 0.2 is the degree of random shifting
rect_mesh.mesh_scale = 5.0
rect_mesh.mesh_rand = 0.2
rect_mesh.generate_mesh()
print(rect_mesh)
# plot histogram of number of nodes per element
fig = plt.figure()
ax = plt.gca()
ax.hist(rect_mesh.num_nodes_per_element,
bins=[k for k in range(3, 11)],
align='left', rwidth=0.95, color='xkcd:gray')
ax.set_xlabel('# nodes in element', fontsize=12, fontweight='bold')
ax.set_ylabel('# elements', fontsize=12, fontweight='bold')
ax.set_title('Rectangular Mesh Histogram', fontsize=14, fontweight='bold')
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(12)
plt.savefig('rect_mesh_hist.png')
# plot mesh
fig = plt.figure()
fig.set_size_inches((10, 10))
ax = rect_mesh.plot_mesh()
rect_mesh.plot_boundaries()
rect_mesh.plot_mesh_edges()
rect_mesh.plot_mesh(element_quad_points=True)
rect_mesh.plot_vertices()
rect_mesh.plot_nodes()
ax.set_xlabel('x [m]', fontsize=12, fontweight='bold')
ax.set_ylabel('y [m]', fontsize=12, fontweight='bold')
ax.set_title('Rectangular Mesh', fontsize=14, fontweight='bold')
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(12)
ax.axis('equal')
plt.savefig('rect_mesh.png')
# test quadrature
# Note: Here we test integration of constant,
# linear, and quadratic functions
int_test = np.zeros(6)
int_exp = np.array([800., 8_000., 16_000., 320_000./3,
1_280_000./3, 160_000.])
for xq, wq, cent, area in zip(rect_mesh.element_quad_points,
rect_mesh.element_quad_weights,
rect_mesh.element_centroids,
rect_mesh.element_areas):
int_test[0] += np.abs(area) * np.sum(wq)
for xq_k, wk in zip(xq, wq):
int_test[1] += np.abs(area) * wk * (xq_k[0] + cent[0])
int_test[2] += np.abs(area) * wk * (xq_k[1] + cent[1])
int_test[3] += np.abs(area) * wk * (xq_k[0] + cent[0])**2
int_test[4] += np.abs(area) * wk * (xq_k[1] + cent[1])**2
int_test[5] += np.abs(area) * wk * (xq_k[0] * xq_k[1]
+ xq_k[0] * cent[1]
+ xq_k[1] * cent[0]
+ cent[0] * cent[1])
print('Tst Ints: ', int_test)
print('Exp Ints: ', int_exp)
print('Int Errs: ', (int_test - int_exp)/int_exp)
print('\n')
def dam_mesh():
"""An example demonstrating mesh generation for a polygonal domain with
multiple materials and mesh edges between the materials. Demonstrates
"soft" (no mesh_edge) vs. "hard" edges (using a mesh_edge).
"""
print('*** Dam with multiple material regions:\n')
# initialize the mesh object
dam_mesh = msh.PolyMesh2D('Dam Mesh')
# add boundary vertices
# Note: here we show that vertices can be passed as single coordinate pairs
# or as lists of coordinate pairs
# numpy arrays can also be used
dam_mesh.add_vertices([[0, 0], [84, 65], [92.5, 65], [180, 0]])
dam_mesh.add_vertices([92.5, 0])
dam_mesh.add_vertices([45, 0])
dam_mesh.add_vertices([55, 30])
# add outer boundary vertices
dam_mesh.insert_boundary_vertices(0, [0, 6, 1, 2, 3])
# create two different material types
# they are initialized with colors given as valid matplotlib color strings
gravel = mtl.Material('gravel', color='xkcd:stone')
clay = mtl.Material('clay', color='xkcd:clay')
# add material regions
# Note: new material regions are added to their parent mesh by default
msh.MaterialRegion2D(dam_mesh, [0, 6, 1, 5], gravel)
msh.MaterialRegion2D(dam_mesh, [2, 3, 4], gravel)
msh.MaterialRegion2D(dam_mesh, [1, 2, 4, 5], clay)
# add edges to be preserved in mesh generation
# Note: the left edge of the clay region will be a "soft" edge
# and the right edge will be a "hard" edge
msh.MeshEdge2D(dam_mesh, [2, 4])
# generate the mesh and print basic mesh properties
dam_mesh.mesh_scale = 4.0
dam_mesh.mesh_rand = 0.2
dam_mesh.generate_mesh()
print(dam_mesh)
# plot histogram of number of nodes per element
fig = plt.figure()
ax = plt.gca()
ax.hist(dam_mesh.num_nodes_per_element,
bins=[k for k in range(3, 11)],
align='left', rwidth=0.95, color='xkcd:gray')
ax.set_xlabel('# nodes in element', fontsize=12, fontweight='bold')
ax.set_ylabel('# elements', fontsize=12, fontweight='bold')
ax.set_title('Dam Mesh Histogram', fontsize=14, fontweight='bold')
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(12)
plt.savefig('dam_mesh_hist.png')
# plot mesh
fig = plt.figure()
fig.set_size_inches((10, 10))
dam_mesh.plot_boundaries()
dam_mesh.plot_mesh_edges()
ax = dam_mesh.plot_mesh()
dam_mesh.plot_vertices()
ax.set_xlabel('x [m]', fontsize=12, fontweight='bold')
ax.set_ylabel('y [m]', fontsize=12, fontweight='bold')
ax.set_title('Dam Mesh', fontsize=14, fontweight='bold')
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(12)
ax.axis('equal')
plt.savefig('dam_mesh.png')
# test area using generated quadrature
int_test = np.zeros(1)
int_exp = shp.Polygon(dam_mesh.vertices[dam_mesh.boundary_vertices]).area
int_exp = np.array([int_exp])
for e in dam_mesh.elements:
wq = e.quad_weights
area = e.area
int_test[0] += np.abs(area) * np.sum(wq)
print('Tst Ints: ', int_test)
print('Exp Ints: ', int_exp)
print('Int Errs: ', (int_test - int_exp)/int_exp)
print('\n')
def tunnel_mesh():
"""An example demonstrating mesh generation for a symmetric analysis of
a tunnel, which has a concave domain boundary. Also demonstrates
mesh_edges within a material.
"""
print('*** Symmetric tunnel with concave boundary:\n')
# initialize the mesh object
tunnel_mesh = msh.PolyMesh2D('Tunnel Mesh')
# add main corners
# Note: we also insert a vertex in the middle of a straight section of
# boundary these can be added to aid in adding boundary conditions
tunnel_mesh.add_vertices([[0, 15.], [0, 20.], [20, 20], [20, 0], [15, 0]])
# create circular arc (concave)
theta = np.linspace(0, 0.5*np.pi, 20)
for t in theta:
tunnel_mesh.add_vertices(10.*np.array([np.cos(t), np.sin(t)]))
# add boundary vertices in clockwise order
tunnel_mesh.insert_boundary_vertices(0, [k for k in
range(tunnel_mesh.num_vertices)])
# add material types and regions
rock = mtl.Material('rock', color='xkcd:greenish')
msh.MaterialRegion2D(tunnel_mesh, tunnel_mesh.boundary_vertices, rock)
# add mesh edges
# Note: mesh edges need not be at material region boundaries
# they can also be used to force element edge alignment
# (e.g. with joints in rock or existing planes of failure)
nv = tunnel_mesh.num_vertices
tunnel_mesh.add_vertices([[2.5, 17.5],
[10., 12.5],
[12., 7.5],
[8., 17.5],
[12.5, 15.],
[17.5, 2.5]])
msh.MeshEdge2D(tunnel_mesh, [nv, nv+1, nv+2])
msh.MeshEdge2D(tunnel_mesh, [nv+5, nv+4, nv+3])
# generate mesh and show properties
tunnel_mesh.mesh_scale = 0.5
tunnel_mesh.mesh_rand = 0.3
tunnel_mesh.generate_mesh()
print(tunnel_mesh)
# plot histogram of number of nodes per element
fig = plt.figure()
ax = plt.gca()
ax.hist(tunnel_mesh.num_nodes_per_element,
bins=[k for k in range(3, 11)],
align='left', rwidth=0.95, color='xkcd:gray')
ax.set_xlabel('# nodes in element', fontsize=12, fontweight='bold')
ax.set_ylabel('# elements', fontsize=12, fontweight='bold')
ax.set_title('Tunnel Mesh Histogram', fontsize=14, fontweight='bold')
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(12)
plt.savefig('tunnel_mesh_hist.png')
# plot mesh
fig = plt.figure()
fig.set_size_inches((10, 10))
ax = tunnel_mesh.plot_mesh()
tunnel_mesh.plot_boundaries()
tunnel_mesh.plot_mesh()
tunnel_mesh.plot_mesh_edges()
tunnel_mesh.plot_vertices()
ax.set_xlabel('x [m]', fontsize=12, fontweight='bold')
ax.set_ylabel('y [m]', fontsize=12, fontweight='bold')
ax.set_title('Tunnel Mesh', fontsize=14, fontweight='bold')
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(12)
ax.axis('equal')
plt.savefig('tunnel_mesh.png')
# test quadrature
# Note: here we test integration of constant,
# linear, and quadratic functions
int_test = np.zeros(6)
int_exp = np.array([400. - 0.25*np.pi*10.0**2, 4000. - 1000./3,
4000. - 1000./3, 20.*8000./3 - np.pi*10.**4/16,
20.*8000./3 - np.pi*10.**4/16, 40000. - 0.125*10.**4])
for e in tunnel_mesh.elements:
xq = e.quad_points
wq = e.quad_weights
cent = e.centroid
area = e.area
int_test[0] += np.abs(area) * np.sum(wq)
for xq_k, wk in zip(xq, wq):
int_test[1] += np.abs(area) * wk * (xq_k[0] + cent[0])
int_test[2] += | np.abs(area) | numpy.abs |
from scipy import stats
from scipy import sparse
from numpy import array
import numpy as np
from scipy.spatial import distance
evaluate_euclidean_representations = False
time_dimensions = 3
nb_splits = 5
ambient_euclidean_dimensionality = 6
dimensionality_of_ambient_space = 5
beta = -1.0
i_list = []
j_list = []
v_list = []
fc = open("C_matrix.txt","r")
for fline in fc:
l = fline.split(" ")
i_list.append(int(l[0]))
j_list.append(int(l[1]))
v_list.append(-int(l[2]))
fc.close()
n = 34
I = array(i_list)
J = array(j_list)
V = array(v_list)
edges_dict = {}
for i in range(len(I)):
edges_dict[(I[i],J[i])] = abs(V[i])
edges_dict[(J[i],I[i])] = abs(V[i])
C = sparse.coo_matrix((V,(I,J)),shape=(n,n))
C = C.toarray()
C = C + C.transpose()
C_sum = np.sum(C,axis=0)
top_10 = [33,0,32,2,1,31,23,3,8,13]
top_5 = [33,0,32,2,1]
recall_at_1 = 0.0
rank_first_leader = []
rank_second_leader = []
rho5_list = []
rho10_list = []
for i in range(nb_splits):
if evaluate_euclidean_representations:
file_name = "zachary_data/euclidean/%d/d.txt" % (i+1)
D = np.loadtxt(file_name, usecols=range(n))
else:
file_name = "zachary_data/d_%d_q_%d/%d/d.txt" % (dimensionality_of_ambient_space , time_dimensions, i+1)
D = np.loadtxt(file_name, usecols=range(n))
D = np.sum(D,axis=0)
sorted_D = np.argsort(D)
search_second_leader = False
for j in range(n):
if (sorted_D[j] == 0) or (sorted_D[j] == n-1):
if search_second_leader:
rank_second_leader.append(j+1)
continue
else:
search_second_leader = True
rank_first_leader.append(j+1)
rho5, pval5 = stats.spearmanr(C_sum[top_5],D[top_5])
rho10, pval10 = stats.spearmanr(C_sum[top_10],D[top_10])
rho5_list.append(rho5)
rho10_list.append(rho10)
if evaluate_euclidean_representations:
print("Euclidean space of dimensionality %d" % ambient_euclidean_dimensionality)
else:
print("dimensionality of the ambient space = %d" % dimensionality_of_ambient_space)
if time_dimensions == 1:
print("hyperbolic case")
elif time_dimensions == dimensionality_of_ambient_space :
print("spherical case")
else:
print("ultrahyperbolic case with %d time dimensions" % time_dimensions)
ddofint = 1
print("rank of first leader")
print("mean = %f ----- std = %f" % ( | np.mean(rank_first_leader) | numpy.mean |
"""REFERENCE: <NAME> and <NAME>, "GoDec: Randomized Lo-rank & Sparse Matrix
Decomposition in Noisy Case", ICML 2011
Tianyi Zhou, 2011, All rights reserved."""
import numpy as np
class GoDec:
"""
GoDec - Go Decomposition (<NAME> and <NAME>, 2011)
The algorithm estimate the low-rank part L and the sparse part S of a matrix X = L + S + G with noise G.
Args:
X : array-like, shape (n_features, n_samples), which will be decomposed into a sparse matrix S
and a low-rank matrix L.
rank : int >= 1, optional
The rank of low-rank matrix. The default is 1.
card : int >= 0, optional
The cardinality of the sparse matrix. The default is None (number of array elements in X).
iterated_power : int >= 1, optional
Number of iterations for the power method, increasing it lead to better accuracy and more time cost. The default is 1.
max_iter : int >= 0, optional
Maximum number of iterations to be run. The default is 100.
error_bound : float >= 0, optional
error_bounderance for stopping criteria. The default is 0.001.
return_error: bool, whether to return error.
"""
def __init__(self,
X,
rank=2,
card=None,
iterated_power=2,
max_iter=10,
error_bound=1e-6,
return_error=False,
**kwargs):
self.X = X
self.rank = rank
self.card = int(np.prod(X.shape)/20) if card is None else card
self.iterated_power = iterated_power
self.max_iter = max_iter
self.error_bound = error_bound
self.return_error = return_error
self.percent = kwargs.get('percent', None)
self.start_percent = kwargs.get('start_percent', None)
self.rcode = kwargs.get('rcode', None)
self.task_id = kwargs.get('task_id', None)
def __call__(self):
return self._godec(self.X, self.rank, self.card, self.iterated_power,
self.max_iter, self.error_bound, self.return_error)
def _godec(self,
X,
rank=2,
card=None,
iterated_power=2,
max_iter=100,
error_bound=0.001,
return_error=False):
"""
Returns:
L : array-like, low-rank matrix.
S : array-like, sparse matrix.
LS : array-like, reconstruction matrix.
RMSE : root-mean-square error.
"""
update_progress = self.percent is not None and self.start_percent is not None
if update_progress:
try:
import TaskUtil
except:
raise ModuleNotFoundError
if return_error:
RMSE = []
X = X.T if (X.shape[0] < X.shape[1]) else X
_, n = X.shape
# Initialization of L and S
L = X
S = np.zeros(X.shape)
LS = np.zeros(X.shape)
for i in range(max_iter):
# Update of L
Y2 = np.random.randn(n, rank)
for _ in range(iterated_power):
Y1 = L.dot(Y2)
Y2 = L.T.dot(Y1)
Q, _ = np.linalg.qr(Y2)
L_new = (L.dot(Q)).dot(Q.T)
# Update of S
T = L - L_new + S
L = L_new
T_vec = T.reshape(-1)
S_vec = S.reshape(-1)
idx = abs(T_vec).argsort()[::-1]
S_vec[idx[:card]] = T_vec[idx[:card]] # K largest entries of |X-Lt|
S = S_vec.reshape(S.shape)
# Reconstruction
LS = L + S
# Stopping criteria
error = np.sqrt(np.mean((X - LS)**2))
if return_error:
RMSE.append(error)
print("[INFO] iter: ", i, "error: ", error)
if update_progress:
current_percent = int(self.start_percent+(i+1)*self.percent/self.max_iter)
TaskUtil.SetPercent(self.rcode, self.task_id, current_percent, '')
if (error <= error_bound):
print(f"[INFO] Converged after {i} iterations.")
break
if return_error:
L, S, LS, RMSE
return L, S
def godec_original(X, r, k, e=1e-6, q=0, max_iter=100):
"""GoDec implemented exactly following the oiginal paper.
Args:
X(np.ndarray): the dense matrix to be decomposited.
r(int): the rank of the desired Low-rank component.
k(int): the cardinality of the desired Sparse component.
e(float): the error bound between the final reconstruction L+S
and the actual input X.
q(int): when q=0, directly perform BRP(bilateral random project)
approximation if L. When q>0, perform the power scheme.
Return:
the final decomposition L(Low-rank) and S(Sparse).
"""
m, n = X.shape
t = 0 # init iteration
Lt = X # init Low-rank approximation
St = np.zeros_like(X) # init Sparse residual component
desired_err = np.linalg.norm(X, ord='fro')*e # termination condition
## start GoDec
cur_err = np.inf
while cur_err>desired_err and t<max_iter:
## update Lt
L_wave = X-St
for _ in range(q):
L_wave = (L_wave.dot(L_wave.T)).dot(L_wave)
A1 = np.random.randn(n, r)
Y1 = L_wave.dot(A1)
A2 = Y1
Y2 = L_wave.T.dot(Y1)
Q2, R2 = | np.linalg.qr(Y2) | numpy.linalg.qr |
import numpy as np
import csv
from collections import namedtuple
import json
import torch
import torch.nn.utils.rnn as rnn_utils
from tqdm import trange
tasks = ["_Balura_Game", "_Fixations", "_Reading", "_Video_1", "_Video_2"]
tasks_code = ["BLG", "FXS", "TEX", "VD1", "VD2"]
DataInfo = namedtuple("DataInfo", ["dimension_size", "people_size", "feature_size"])
exceptions = np.array([ 28, 36, 50, 56, 75, 82, 104, 105, 112, 124, 160, 205, 324])
amt_people = 335
info = DataInfo(dimension_size=2*len(tasks), people_size=amt_people-(exceptions<=amt_people).sum(), feature_size=3)
print("Dataset info:", info)
config = json.load(open("config.json"))
torch.manual_seed(config['seed'])
n = info.dimension_size * info.people_size
VAL = int(config['val'] * n)
TEST = int(config['test'] * n)
TRAIN = n - VAL - TEST
VAL_PEOPLE = int(config['val'] * info.people_size)
TEST_PEOPLE = int(config['test'] * info.people_size)
TRAIN_PEOPLE= info.people_size - VAL_PEOPLE - TEST_PEOPLE
def permutate(n):
#n = info.people_size * info.dimension_size
return torch.randperm(n)
PERM_TRAIN= permutate(TRAIN)
PERM_VAL = permutate(VAL)
PERM_TEST = permutate(TEST)
def data_index(person, dim):
"""
Output sequence of eye gaze (x, y) positions from the dataset for a person and a dimension of that person (task, session, etc)
Index starts at 0.
The vectors are [x, y, flag], flag being if it's null
"""
session = "S1" if dim % 2 == 0 else "S2"
# S1_Balura_Game S1_Fixations S1_Horizontal_Saccades S1_Random_Saccades S1_Reading S1_Video_1 S1_Video_2
for exc in exceptions:
person += (exc-1 <= person)
num = str(person+1).rjust(3, "0")
#global info, tasks, tasks_code
dir = "data/Round_1/id_1" + num + "/" + session + "/" + session + tasks[dim//2] + \
"/S_1" + num + "_" + session + "_" + tasks_code[dim//2] + \
".csv"
pos = []
mask = []
with open(dir) as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
vecs = []
pads = []
for i, row in enumerate(spamreader):
if i < 1:
continue
row = ''.join(row).split(",")
if (i-1) % config['Hz'] == 0 and (i-1) != 0:
vecs = np.stack(vecs)
pads = np.stack(pads)
pos.append(vecs)
mask.append(pads)
vecs = []
pads = []
if (i-1) % (config['Hz'] // config['second_split']) == 0:
flag = (row[1] == 'NaN' or row[2] == 'NaN')
arr = np.array([0, 0, flag]) if flag else np.array([float(row[1]), float(row[2]), flag])
vecs.append(arr)
arr2 = np.array([0]*(info.feature_size-1)+[info.feature_size]) if flag else np.ones(info.feature_size)
# the info.feature_size instead of 1 is to rescale and give it equal "weight"
pads.append(arr2)
pos=np.stack(pos)
mask= | np.stack(mask) | numpy.stack |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue May 8 00:10:40 2018
@author: avanetten
Read in a list of wkt linestrings, render to networkx graph
"""
from __future__ import print_function
import os
import utm
import shapely.wkt
import shapely.ops
from shapely.geometry import mapping, Point, LineString
import fiona
import networkx as nx
import osmnx as ox
from osgeo import gdal, ogr, osr
import argparse
import json
import pandas as pd
import numpy as np
import time
import matplotlib.pyplot as plt
import logging
# import cv2
from utils import make_logger
from jsons.config import Config
logger1 = None
###############################################################################
def clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=30,
weight='length_pix', verbose=True,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(list(G_.nodes())) == 0:
return G_
print ("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
if verbose:
print (" len(G_.nodes()):", len(list(G_.nodes())) )
print (" len(G_.edges()):", len(list(G_.edges())) )
if super_verbose:
print ("G_.nodes:", G_.nodes())
edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]
print (edge_tmp, "G.edge props:", G_.edge[edge_tmp[0]][edge_tmp[1]])
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
if super_verbose:
print (" \nGs.nodes:", G_sub.nodes() )
print (" all_lengths:", all_lengths )
# get all lenghts
lens = []
#for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
#for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
if super_verbose:
print (" u, v", u,v )
print (" uprime, vprime:", uprime, vprime )
max_len = | np.max(lens) | numpy.max |
"""Main script for controlling the calculation of the IS spectrum.
Calculate spectra from specified parameters as shown in the examples given in the class
methods, create a new set-up with the `Reproduce` abstract base class in `reproduce.py` or
use one of the pre-defined classes from `reproduce.py`.
"""
# The start method of the multiprocessing module was changed from python3.7 to python3.8
# (macOS). Instead of using 'fork', 'spawn' is the new default. To be able to use global
# variables across all parallel processes, the start method must be reset to 'fork'. See
# https://tinyurl.com/yyxxfxst for more info.
import multiprocessing as mp
mp.set_start_method("fork")
import matplotlib # pylint: disable=C0413
import matplotlib.pyplot as plt # pylint: disable=C0413
import numpy as np # pylint: disable=C0413
import isr_spectrum.inputs.config as cf
from isr_spectrum.plotting import hello_kitty as hk
from isr_spectrum.plotting import reproduce
from isr_spectrum.plotting.plot_class import PlotClass
# Customize matplotlib
matplotlib.rcParams.update(
{
"text.usetex": True,
"font.family": "serif",
"axes.unicode_minus": False,
"pgf.texsystem": "pdflatex",
}
)
class Simulation:
def __init__(self):
self.from_file = False
self.f = | np.ndarray([]) | numpy.ndarray |
from shfl.federated_government.federated_government import FederatedGovernment
from shfl.federated_aggregator.fedavg_aggregator import FedAvgAggregator
from shfl.data_distribution.data_distribution_iid import IidDataDistribution
from shfl.data_distribution.data_distribution_non_iid import NonIidDataDistribution
from shfl.private.federated_operation import apply_federated_transformation
from shfl.private.federated_operation import FederatedTransformation
from shfl.model.deep_learning_model import DeepLearningModel
from shfl.data_base.emnist import Emnist
from shfl.data_base.fashion_mnist import FashionMnist
from shfl.private.federated_operation import Normalize
from enum import Enum
import numpy as np
import tensorflow as tf
class Reshape(FederatedTransformation):
"""
Federated transformation to reshape the data
"""
def apply(self, labeled_data):
labeled_data.data = np.reshape(labeled_data.data,
(labeled_data.data.shape[0], labeled_data.data.shape[1],
labeled_data.data.shape[2], 1))
class ImagesDataBases(Enum):
"""
Enumeration of possible databases for image classification.
"""
EMNIST = Emnist
FASHION_EMNIST = FashionMnist
class FederatedImagesClassifier(FederatedGovernment):
"""
Class used to represent a high-level federated image classification
(see: [FederatedGoverment](../federated_government/#federatedgovernment-class)).
# Arguments:
data_base_name_key: key of the enumeration of valid data bases (see: [ImagesDataBases](./#imagesdatabases-class))
iid: boolean which specifies if the distribution if IID (True) or non-IID (False) (True by default)
num_nodes: number of clients.
percent: percentage of the database to distribute among nodes.
"""
def __init__(self, data_base_name_key, iid=True, num_nodes=20, percent=100):
if data_base_name_key in ImagesDataBases.__members__.keys():
module = ImagesDataBases.__members__[data_base_name_key].value
data_base = module()
train_data, train_labels, test_data, test_labels = data_base.load_data()
if iid:
distribution = IidDataDistribution(data_base)
else:
distribution = NonIidDataDistribution(data_base)
federated_data, self._test_data, self._test_labels = distribution.get_federated_data(num_nodes=num_nodes,
percent=percent)
apply_federated_transformation(federated_data, Reshape())
mean = | np.mean(train_data.data) | numpy.mean |
"""
Extract slits from a MOSFIRE slitmask
"""
import glob
import os
import traceback
import astropy.io.fits as pyfits
import numpy as np
from grizli import utils
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
import drizzlepac
import scipy.ndimage as nd
import peakutils
from skimage.feature import match_template
from skimage.registration import phase_cross_correlation
from tqdm import tqdm
utils.LOGFILE = 'mospipe.log'
utils.set_warnings()
def grating_dlambda(band):
"""
returns the dlambda/dpixel in angstrom for a band
(From MosfireDRP)
"""
orders = {"Y": 6, "J": 5, "H": 4, "K": 3}
order = orders[band]
d = 1e3/110.5 # Groove spacing in micron
pixelsize, focal_length = 18.0, 250e3 # micron
scale = pixelsize/focal_length
dlambda = scale * d / order * 10000
return dlambda
# grating_summary = {'Y': {'edge':[9612, 11350]},
# 'J': {'edge':[11550, 13623]},
# 'H': {'edge':[14590, 18142]},
# 'K': {'edge':[19118, 24071]}}
grating_summary = {'Y': {'edge':[9612, 11350]},
'J': {'edge':[11450, 13550]},
'H': {'edge':[14590, 18142]},
'K': {'edge':[18900, 24150]}}
for k in grating_summary:
edge = grating_summary[k]['edge']
grating_summary[k]['dlam'] = dlam = grating_dlambda(k)
grating_summary[k]['N'] = int(np.ceil(edge[1]-edge[0])/dlam)
grating_summary[k]['ref_wave'] = (edge[1]+edge[0])/2
def get_grating_loglam(filter):
"""
Get polynomial and WCS coefficients that approximate logarithmic
wavelength spacing
"""
gr = grating_summary[filter]
edge, dlam, N = gr['edge'], gr['dlam'], gr['N']
loglam = np.logspace(np.log10(edge[0]), np.log10(edge[1]), N)
xarr = np.arange(N)
xref = N//2-1
# Polynomial representation of logarithmic wavelength
c = np.polyfit(xarr-xref, loglam, 3)
# WAVE-LOG WCS
w = np.log(loglam/loglam[xref])*loglam[xref]
#plt.plot(mask.xarr-1024, w)
#plt.plot(mask.xarr, w)
cl = np.polyfit(xarr-xref, w, 1)
#plt.plot(loglam, (np.polyval(c, xarr-xref) - loglam)/loglam)
#print(N, xref, c, cl)
return N, loglam[xref], c, cl
def fit_wavelength_from_sky(sky_row, band, order=3, make_figure=True, nsplit=5, plot_axis=None, debug=False, use_oliva=True, **kwargs):
lines = {}
line_intens = {}
xarr = np.arange(2048)
# OH Sky lines from MosfireDRP
lines['Y'] = np.array([9793.6294, 9874.84889, 9897.54143, 9917.43821,
10015.6207, 10028.0978, # 10046.7027, 10085.1622
10106.4478, 10126.8684, 10174.623, 10192.4683,
10213.6107, 10289.3707, 10298.7496, 10312.3406,
10350.3153, 10375.6394, 10399.0957, 10421.1394,
10453.2888, 10471.829, 10512.1022, 10527.7948,
10575.5123, 10588.6942, 10731.6768, 10753.9758,
10774.9474, 10834.1592, 10844.6328, 10859.5264,
10898.7224, 10926.3765, 10951.2749, 10975.3784,
11029.8517, 11072.4773, 11090.083, 11140.9467,
11156.0366 ])
lines['J'] = np.array([11538.7582, 11591.7013, 11627.8446, 11650.7735,
11696.3379, 11716.2294, 11788.0779, 11866.4924,
11988.5382, 12007.0419, 12030.7863, 12122.4957,
12135.8356, 12154.9582, 12196.3557, 12229.2777,
12257.7632, 12286.964, 12325.9549, 12351.5321,
12400.8893, 12423.349, 12482.8503, 12502.43,
12905.5773, 12921.1364, 12943.1311, 12985.5595,
13021.6447, 13052.818, 13085.2604, 13127.8037,
13156.9911, 13210.6977, 13236.5414, 13301.9624,
13324.3509, 13421.579])
lines['H'] = np.array([14605.0225, 14664.9975, 14698.7767, 14740.3346,
14783.7537, 14833.029, 14864.3219, 14887.5334,
14931.8767, 15055.3754, 15088.2599, 15187.1554,
15240.922, 15287.7652, 15332.3843, 15395.3014,
15432.1242, 15570.0593, 15597.6252, 15631.4697,
15655.3049, 15702.5101, 15833.0432, 15848.0556,
15869.3672, 15972.6151, 16030.8077, 16079.6529,
16128.6053, 16194.6497, 16235.3623, 16317.0572,
16351.2684, 16388.4977, 16442.2868, 16477.849,
16502.395, 16553.6288, 16610.807, 16692.2366,
16708.8296, 16732.6568, 16840.538, 16903.7002,
16955.0726, 17008.6989, 17078.3519, 17123.5694,
17210.579, 17248.5646, 17282.8514, 17330.8089,
17386.0403, 17427.0418, 17449.9205, 17505.7497,
17653.0464, 17671.843, 17698.7879, 17811.3826,
17880.341, 17993.9600, 18067.9500 ])
lines['K'] = np.array([19518.4784, 19593.2626, 19618.5719, 19642.4493,
19678.046, 19701.6455, 19771.9063, 19839.7764,
20008.0235, 20193.1799, 20275.9409, 20339.697,
20412.7192, 20499.237, 20563.6072, 20729.032,
20860.2122, 20909.5976, 21176.5323, 21249.5368,
21279.1406, 21507.1875, 21537.4185, 21580.5093,
21711.1235, 21802.2757, 21873.507, 21955.6857,
22125.4484, 22312.8204, 22460.4183, 22517.9267,
22690.1765, 22742.1907, 22985.9156, 23914.55,
24041.62])
for b in lines:
line_intens[b] = np.ones_like(lines[b])
line_intens['K'] = np.array([0.05, 0.1, 0.1, 0.25, 0.1,
0.35, 0.4, 0.15, 0.7, 0.1, 0.7, 0.35, 1,
0.25, 0.65, 0.45, 0.2, 0.25, 0.25, 0.3, 0.05,
0.75, 0.3, 0.1, 0.2, 0.6, 0.25, 0.75, 0.5,
0.3, 0.05, 0.1, 0.05, 0.05, 0.05, 0.15,
0.05])
# Oliva et al. sky lines
# https://ui.adsabs.harvard.edu/abs/2015A%26A...581A..47O
if use_oliva:
data_dir = os.path.dirname(__file__)
oh_file = os.path.join(data_dir, 'data', 'oliva_oh_lines.vot')
oh = utils.read_catalog(oh_file)
for b in lines:
lines[b] = (oh['lambda1'] + oh['lambda2'])/2
line_intens[b] = oh['Flux']/1000.
dlam = grating_dlambda(band)
msg = f'Band={band}, dlambda={dlam:.3f} A/pix'
utils.log_comment(utils.LOGFILE, msg, verbose=True)
band_lims = {'Y': (9701, 11280),
'J': (11501, 13650),
'H': (14510, 17700),
'K': (19100, 23990)}
for k in band_lims:
ok = (lines[k] > band_lims[k][0]) & (lines[k] < band_lims[k][1])
lines[k] = lines[k][ok]
line_intens[k] = line_intens[k][ok]
if band == 'K':
x0 = 19577.35927
elif band == 'J':
x0 = 11536.0
elif band == 'H':
x0 = 14600.
elif band == 'Y':
x0 = 9700.
###############
# First guess linear from simple cross correlation
for it in range(3):
wave = xarr*dlam+x0
yline = wave*0.
for il, l in enumerate(lines[band]):
yline += line_intens[band][il]*np.exp(-(wave-l)**2/2/dlam**2)
cc = phase_cross_correlation(sky_row[None,:],
yline[None,:],
upsample_factor=100,
reference_mask=~np.isnan(sky_row[None,:]),
moving_mask=~np.isnan(yline[None,:]),
return_error=True)
try:
shift, error, diffphase = cc
except ValueError:
shift = cc
msg = f' phase shift {it} {shift[1]:.2f} x0={x0-shift[1]*dlam:.3f}'
utils.log_comment(utils.LOGFILE, msg, verbose=True)
x0 -= shift[1]*dlam
wave = xarr*dlam+x0
if debug:
# Template match cross correlation
xres = np.squeeze(match_template(sky_row[None,:], yline[None,:],
pad_input=True))
fig, ax = plt.subplots(1,1)
ax.plot(xarr, xres)
indexes = peakutils.indexes(yline, thres=0.08, min_dist=12)
base = peakutils.baseline(sky_row/sky_row.max(), 4)
# Peak centers
peaks_x = peakutils.interpolate(wave, sky_row/sky_row.max()-base,
ind=indexes, width=10)
peaks_pix = peakutils.interpolate(xarr, sky_row/sky_row.max()-base,
ind=indexes, width=10)
peaks_model = peakutils.interpolate(wave, yline, ind=indexes, width=10)
peaks_y = np.interp(peaks_x, wave, sky_row/sky_row.max())
if debug:
fig, axes = plt.subplots(nsplit,1,figsize=(12,12*nsplit/5))
for i, ax in enumerate(axes):
#ax.scatter(wave[indexes], yline[indexes], marker='x', color='r')
ax.scatter(peaks_x, peaks_y, marker='x', color='r')
ax.plot(wave, sky_row/sky_row.max())
ax.plot(wave, yline*0.5)
ax.set_xlim(wave[i*2048//nsplit], wave[(i+1)*2048//nsplit-1])
#######
# Fit polynomial dispersion
lam = peaks_x*1
for it in range(3):
ok = (np.abs(peaks_x-peaks_model) < 10)
ok &= (peaks_model > 9000) & (peaks_x > 9000)
ok &= (peaks_model < 2.3e4)
if it > 0:
ok &= np.abs(lam-peaks_model) < 0.5
lam_coeffs = np.polyfit((peaks_pix[ok]-1023), peaks_model[ok],
order, w=yline[indexes][ok])
lam = np.polyval(lam_coeffs, peaks_pix-1023)
wave_fit = np.polyval(lam_coeffs, xarr-1023)
if plot_axis is not None:
ax = plot_axis
make_figure = False
ax.scatter(peaks_model[ok]/1.e4, (peaks_x-peaks_model)[ok],
label='Linear')
ax.plot(peaks_model[ok]/1.e4, (lam-peaks_model)[ok],
label=f'poly deg={order}')
ax.grid()
ax.set_ylim(-3*dlam, 3*dlam)
ax.set_ylabel(r'$\Delta\lambda$ [$\mathrm{\AA}$]')
ax.legend(ncol=2, fontsize=8)
if make_figure | debug:
fig1, ax = plt.subplots(1,1,figsize=(6,6))
ax.scatter(peaks_model[ok]/1.e4, (peaks_x-peaks_model)[ok],
label='Linear')
ax.plot(peaks_model[ok]/1.e4, (lam-peaks_model)[ok],
label=f'poly deg={order}')
ax.grid()
ax.set_ylim(-3, 3)
ax.set_ylabel(r'$\Delta\lambda$ [A]')
ax.legend(ncol=2, fontsize=8)
fig1.tight_layout(pad=0.1)
ynew = wave_fit*0.
for il, l in enumerate(lines[band]):
ynew += line_intens[band][il]*np.exp(-(wave_fit-l)**2/2/dlam**2)
fig, axes = plt.subplots(nsplit,1,figsize=(12,9*nsplit/5))
for i, ax in enumerate(axes):
#ax.scatter(wave[indexes], yline[indexes], marker='x', color='r')
#ax.scatter(peaks_x, peaks_y, marker='x', color='r')
ax.plot(wave_fit, sky_row/sky_row.max())
ax.plot(wave, sky_row/sky_row.max(), alpha=0.4)
ax.plot(wave_fit, ynew/2)
ax.set_xlim(wave_fit[i*2048//nsplit],
wave_fit[(i+1)*2048//nsplit-1])
ax.set_yticklabels([])
ax.grid()
fig.tight_layout(pad=0.3)
figs = (fig1, fig)
else:
figs = None
return lam_coeffs, wave_fit, figs
# Header keys to pull out for a single exposure explaining the mask
OBJ_KEYS = ['OBJECT', 'FRAMDESC', 'MASKNAME','OBSERVER','PATTERN',
'DTHCOORD', 'SKYPA3','PROGID','PROGPI','PROGTL1','SEMESTER',
'WAVERED','WAVEBLUE']
# Heaader keys for the exposure sequence
SEQ_KEYS = ['AIRMASS','GUIDFWHM','MJD-OBS']
utils.set_warnings()
def show_ls_targets(path):
"""
Show Long2pos targets in a specified path
"""
offset_files = glob.glob(os.path.join(path, 'Offset*txt'))
ls_targets = []
for file in offset_files:
if '_Pos' in file:
ls_targets.append(file.split('_')[-2])
if len(ls_targets) == 0:
print(f'No LS targets found in {path}')
return ['']
ls_targets = np.unique(ls_targets).tolist()
for i, t in enumerate(ls_targets):
print(f'{i}: {t}')
return ls_targets
class MosfireMask(object):
"""
A group os MOSFIRE exposures for a single mask / night / filter
"""
def __init__(self, path='mask12_13_new_20200225/Reduced/mask12_13_new/2020feb26/H', min_nexp=3, ls_target='', use_pixel_flat=True):
if path.endswith('/'):
self.path = path[:-1]
else:
self.path = path
self.filter = self.path.strip('/').split('/')[-1]
self.datemask = os.path.join(os.getcwd(), self.path).split('/')[-5]
logfile = f'{self.datemask}-{self.filter}.log'
utils.LOGFILE = os.path.join(os.getcwd(), logfile)
self.logfile = utils.LOGFILE
utils.log_comment(self.logfile, f'Start mask {self.path}',
verbose=True, show_date=True)
flat_file = glob.glob(os.path.join(self.path, 'combflat_2d*fits'))[0]
self.flat_file = flat_file
self.flat = pyfits.open(flat_file)
pixel_flat_file = flat_file.replace('combflat','pixelflat')
if use_pixel_flat & os.path.exists(pixel_flat_file):
self.pixel_flat = pyfits.open(pixel_flat_file)[0].data
else:
self.pixel_flat = 1
self.offset_files = glob.glob(os.path.join(self.path, f'Offset*{ls_target}*txt'))
self.groups = {}
self.read_exposures(min_nexp=min_nexp)
#self.target_names = []
#self.target_keys = []
self.read_ssl_table()
self.nslits = 0 #len(self.ssl)
# Info
self.info(log=True)
self.slit_hdus = {}
self.slit_info = {}
self.plan_pairs = []
@property
def keys(self):
"""
Keys of the exposure groups, like 'A','B','Ap','Bp', etc.
"""
return list(self.groups.keys())
@property
def namestr(self):
"""
Descriptive name
"""
return f'{self.datemask} {self.filter}'
def __repr__(self):
return self.namestr
@property
def meta(self):
"""
Metadata dictionary
"""
meta = {}
for i, gr in enumerate(self.groups):
grm = self.groups[gr].meta
if i == 0:
for k in OBJ_KEYS:
meta[k] = grm[k]
for k in SEQ_KEYS:
meta[k] = grm[k]
else:
for k in SEQ_KEYS:
meta[k].extend(grm[k])
for k in SEQ_KEYS:
try:
meta[f'{k}_MIN'] = np.nanmin(meta[k])
meta[f'{k}_MED'] = np.nanmedian(meta[k])
meta[f'{k}_MAX'] = np.nanmax(meta[k])
except:
meta[f'{k}_MIN'] = 0.
meta[f'{k}_MED'] = 0.
meta[f'{k}_MAX'] = 0.
return meta
@property
def plans(self):
"""
Dither groups ['A','B'], ['Ap','Bp'], etc.
"""
keys = self.keys
plans = []
if ('A' in keys) & ('B' in keys):
plans.append(['A','B'])
for i in range(5):
pp = 'p'*(i+1)
if (f'A{pp}' in keys) & (f'B{pp}' in keys):
plans.append([f'A{pp}',f'B{pp}'])
return plans
def get_plan_pairs(self, tpad=90, show=False):
"""
Paired exposures in a given offset "plan"
"""
plan_pairs = []
if len(self.plans) == 0:
msg = f'{self.namestr} # No plans found!'
utils.log_comment(self.logfile, msg, verbose=True)
return [], None
for ip, plan in enumerate(self.plans):
pa, pb = plan
ta = np.array(self.groups[pa].meta['MJD-OBS'])
if ip == 0:
t0 = ta[0]
ta = (ta - t0)*86400
tb = np.array(self.groups[pb].meta['MJD-OBS'])
tb = (tb - t0)*86400
ia = []
ib = []
npl = len(self.plans)
for i, a in enumerate(ta):
dt = np.abs(tb - a)
tstep = (self.groups[pa].truitime[i]+tpad)*npl
ok = np.where(dt < tstep)[0]
if len(ok) > 0:
for j in ok:
if (j not in ib) & (i not in ia):
ia.append(i)
ib.append(j)
pd = {'plan':plan, 'ia':ia, 'ib':ib,
'ta':ta, 'tb':tb, 't0':t0,
'n':len(ia),
'fwhm':np.ones(len(ia)), 'shift':np.zeros(len(ia)),
'scale':np.ones(len(ia))}
plan_pairs.append(pd)
self.plan_pairs = plan_pairs
if show & (len(plan_pairs) > 0):
fig, ax = plt.subplots(1,1,figsize=(12,3))
#ax.plot(ta[ia], tb[ib]-ta[ia], marker='o', label='B - A')
for i, pd in enumerate(plan_pairs):
pa, pb = pd['plan']
ta, tb, ia, ib = pd['ta'], pd['tb'], pd['ia'], pd['ib']
p = 6
y0 = 2*p*i
ax.scatter(ta[ia], self.groups[pa].truitime[ia] + y0,
color='r', zorder=100)
for j in range(len(ia)):
ax.plot([ta[ia[j]], tb[ib[j]]],
np.ones(2)*self.groups[pa].truitime[ia[j]] + y0,
marker='o', color='0.5')
ax.vlines(ta, self.groups[pa].truitime-p+y0,
self.groups[pa].truitime-2+y0, color='r', alpha=0.3,
linestyle='--')
ax.vlines(ta[ia], self.groups[pa].truitime[ia]-p+y0,
self.groups[pa].truitime[ia]-2+y0, color='r',
alpha=0.9)
ax.vlines(tb, self.groups[pb].truitime+p+y0,
self.groups[pb].truitime+2+y0, color='0.5',
alpha=0.3, linestyle='--')
ax.vlines(tb[ib], self.groups[pb].truitime[ib]+p+y0,
self.groups[pb].truitime[ib]+2+y0, color='0.5',
alpha=0.9)
ax.set_xlabel(r'$\Delta t$, $\mathrm{MJD}_0 = $'+ '{0:.2f} {1}'.format(pd['t0'], self.namestr))
xl = ax.get_xlim()
xlab = 0.05*(xl[1]-xl[0])
for i, pd in enumerate(plan_pairs):
pa, pb = pd['plan']
ta, tb, ia, ib = pd['ta'], pd['tb'], pd['ia'], pd['ib']
y0 = 2*p*i
yi = np.interp(self.groups[pb].truitime[ib][0] + y0, ax.get_ylim(), [0,1])
ax.text(0.01, yi,
f"{pa} - {pb} {pd['n']}", rotation=90,
ha='left', va='center', transform=ax.transAxes)
ax.set_yticklabels([])
ax.set_yticks(ax.get_ylim())
fig.tight_layout(pad=0.1)
else:
fig = None
return plan_pairs, fig
def plan_pairs_info(self):
"""
Print a summary of the plain_pairs data (shifts, fwhm, etc)
"""
for pd in self.plan_pairs:
#print(pd)
row = '# fileA fileB dt fwhm shift scale\n'
row += '# {0}\n'.format(self.namestr)
row += '# plan: {0} {1}\n'.format(*pd['plan'])
pa, pb = pd['plan']
gra = self.groups[pa]
grb = self.groups[pb]
for i, (ia, ib) in enumerate(zip(pd['ia'], pd['ib'])):
row += '{0} {1} {2:>7.1f} {3:6.2f} {4:6.2f} {5:6.2f}\n'.format(gra.files[ia], grb.files[ib],
pd['ta'][i], pd['fwhm'][i],
pd['shift'][i], pd['scale'][i])
utils.log_comment(self.logfile, row, verbose=True)
@property
def exptime(self):
"""
Total exposure time across groups
"""
return np.sum([self.groups[k].truitime.sum() for k in self.groups])
@property
def nexp(self):
"""
Total number of exposure across groups
"""
return np.sum([self.groups[k].N for k in self.groups])
def read_ssl_table(self):
"""
Read the attached Science_Slit_List table
"""
from astropy.coordinates import SkyCoord
img = self.groups[self.keys[0]].img[0]
#img.info()
ssl = utils.GTable.read(img['Science_Slit_List'])
#msl = utils.GTable.read(img['Mechanical_Slit_List'])
valid_slits = np.where([t.strip() != '' for t in ssl['Target_Name']])[0]
self.ssl = ssl = ssl[valid_slits][::-1]
# Get mag from Target_List
tsl = utils.GTable.read(img['Target_List'])
tsl_names = [n.strip() for n in tsl['Target_Name']]
ssl_names = [n.strip() for n in ssl['Target_Name']]
self.ssl['Magnitude'] = -1.
self.ssl['target_ra'] = -1.
self.ssl['target_dec'] = -1.
for i, n in enumerate(ssl_names):
if n in tsl_names:
ti = tsl_names.index(n)
ras = '{0}:{1}:{2}'.format(tsl['RA_Hours'][ti],
tsl['RA_Minutes'][ti],
tsl['RA_Seconds'][ti])
des = '{0}:{1}:{2}'.format(tsl['Dec_Degrees'][ti],
tsl['Dec_Minutes'][ti],
tsl['Dec_Seconds'][ti])
target_rd = SkyCoord(ras, des, unit=('hour','deg'))
self.ssl['target_ra'][i] = target_rd.ra.value
self.ssl['target_dec'][i] = target_rd.dec.value
if 'Magnitude' in tsl.colnames:
try:
self.ssl['Magnitude'][i] = float(tsl['Magnitude'][ti])
except ValueError:
self.ssl['Magnitude'][i] = 99.
# Coords
ras = []
des = []
for i in range(len(ssl)):
ras.append('{0}:{1}:{2}'.format(ssl['Slit_RA_Hours'][i],
ssl['Slit_RA_Minutes'][i],
ssl['Slit_RA_Seconds'][i]))
des.append('{0}:{1}:{2}'.format(ssl['Slit_Dec_Degrees'][i],
ssl['Slit_Dec_Minutes'][i],
ssl['Slit_Dec_Seconds'][i]))
slit_rd = SkyCoord(ras, des, unit=('hour','deg'))
self.ssl['slit_ra'] = slit_rd.ra.value
self.ssl['slit_dec'] = slit_rd.dec.value
@property
def ssl_stop(self):
sl = np.cast[float](self.ssl['Slit_length'])
ssl_stop = np.cumsum(sl/0.1799+5.35)-9
return np.minimum(ssl_stop, 2045)
@property
def ssl_start(self):
sl = np.cast[float](self.ssl['Slit_length'])
return np.maximum(self.ssl_stop - sl/0.1799, 4)
@property
def target_names(self):
target_names = [t.strip().replace('/','-').replace(' ','_')
for t in self.ssl['Target_Name']]
return target_names
@property
def target_slit_numbers(self):
slit_numbers = [int(n.strip()) for n in self.ssl['Slit_Number']]
return slit_numbers
@property
def target_keys(self):
target_keys = [f'{self.datemask}-{self.filter}-slit_{n:02d}-{t}'
for n, t in zip(self.target_slit_numbers,
self.target_names)]
return target_keys
def info(self, log=True):
"""
Print summary info of the mask
"""
msg = '\n============================\n'
msg += f'{self.namestr} path={self.path}\n'
msg += '============================\n'
meta = self.meta
msg += f"{self.namestr} {meta['SEMESTER']} {meta['PROGID']} "
msg += f" {meta['PROGPI']} | {meta['PROGTL1']}\n"
for k in self.keys:
msg += f'{self.namestr} {self.groups[k].namestr}\n'
for i in range(len(self.ssl)):
msg += f"{self.namestr} {i:>4} {self.target_names[i]} "
ri, di = self.ssl['target_ra'][i], self.ssl['target_dec'][i]
msg += f"{ri:.6f} {di:.6f} {self.ssl['Magnitude'][i]:.1f}\n"
if log:
utils.log_comment(self.logfile, msg, verbose=True,
show_date=True)
else:
print(msg)
def make_region_file(self, regfile=None, make_figure=True, region_defaults='global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman"'):
"""
Make a region file showing the slits
"""
if regfile is None:
regfile = f'{self.datemask}-{self.filter}.reg'
sq = np.array([[-1, 1, 1, -1], [-1,-1,1,1]]).T
#print('PA', mask.meta['SKYPA3'])
pa = self.meta['SKYPA3']
theta = pa/180*np.pi
_mat = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
if make_figure:
fig, ax = plt.subplots(1,1,figsize=(10,10))
with open(regfile,'w') as fp:
rows = [f'# {self.namestr}\n {region_defaults}\nfk5\n']
fp.write(rows[0])
for i in range(len(self.ssl)):
ra, dec = self.ssl['slit_ra'][i], self.ssl['slit_dec'][i]
cosd = np.cos(dec/180*np.pi)
dim = np.cast[float]([self.ssl['Slit_width'][i],
self.ssl['Slit_length'][i]])/2.
dim2 = np.cast[float]([self.ssl['Slit_width'][i],
self.ssl['Slit_width'][i]])/2.
#dim[0] *= 500
dy = float(self.ssl['Target_to_center_of_slit_distance'][i])
yoff = np.array([0, dy])
scl = np.array([1./cosd, 1])/3600.
rot = (sq*dim).dot(_mat)*scl + np.array([ra, dec])
rot2 = (sq*dim2+yoff).dot(_mat)*scl + np.array([ra, dec])
if make_figure:
pl = ax.plot(*rot.T)
pl = ax.plot(*rot2.T)
ax.scatter(ra, dec, color=pl[0].get_color(), marker='x')
row = 'polygon('
row += ','.join([f'{v:.6f}' for v in rot.flatten()]) + ')\n'
row += 'polygon('
row += ','.join([f'{v:.6f}' for v in rot2.flatten()]) + ')'
sw = float(self.ssl['Slit_width'][i])/2
reg_label = ' # text=<<<{0} {1}>>>\n'
row += reg_label.format(self.target_slit_numbers[i],
self.target_names[i])
row = row.replace('<<<','{').replace('>>>','}')
fp.write(row)
rows.append(row)
if make_figure:
ax.set_aspect(1/cosd)
ax.set_xlim(ax.get_xlim()[::-1])
else:
fig = None
return rows, fig
def search_targets(self, query, ignore_case=True):
"""
String search on the target list
"""
if ignore_case:
has_q = [query.lower() in t.lower() for t in self.target_names]
else:
has_q = [query in t for t in self.target_names]
if np.sum(has_q) > 0:
qidx = np.where(has_q)[0]
for q in qidx:
print(f'{q} : {self.target_names[q]}')
return qidx
else:
return None
def read_exposures(self, min_nexp=4):
"""
Read exposure groups from "Offset_{shift}.txt" files produced by
mospy
"""
self.groups = {}
for offset_file in self.offset_files:
grp = ExposureGroup(offset_file=offset_file, min_nexp=min_nexp,
flat=self.pixel_flat)
if grp.frameid is None:
continue
key = grp.frameid + ''
while key in self.groups:
key += 'p'
grp.frameid = key
self.groups[key] = grp
def flag_cosmic_rays(self, **kwargs):
"""
Flag CRs in individual groups
"""
for k in self.groups:
self.groups[k].flag_cosmic_rays(**kwargs)
def interpolate_slits(self, order=1, debug=False):
"""
Fit slit coeffs and insert for neighboring slits found in the
Science_Slit_List table if not the same number as the slits found
from the flat
"""
if hasattr(self, 'tr_fit'):
tr_fit = self.tr_fit
else:
tr_coeffs = np.array([self.trace_stop[j]
for j in range(self.nslits-1)])
tr_fit = []
for i in range(tr_coeffs.shape[1]):
tc = np.polyfit(self.slit_stop[:-1]-1024, tr_coeffs[:,i],
order)
tr_fit.append(tc)
tr_fit = np.array(tr_fit)
self.tr_fit = tr_fit
self.trace_stop_orig = self.trace_stop
self.trace_start_orig = self.trace_start
if len(self.ssl_stop) > self.nslits:
utils.log_comment(self.logfile,
f'{self.namestr}: Found extra slit!',
verbose=True)
si = np.interp(self.ssl_stop, self.slit_stop,
np.arange(self.nslits))
new = np.where(np.abs(si-np.round(si)) > 0.15)[0][::-1]
if debug:
print('ssl interp', si)
for j in new:
xi = int(si[j])+1
utils.log_comment(self.logfile,
f'{self.namestr}: Insert slit in {xi}',
verbose=True)
self.slit_stop = np.insert(self.slit_stop, xi,
self.ssl_stop[j])
self.slit_start = np.insert(self.slit_start, xi+1,
self.ssl_stop[j]+5.35)
elif ((self.nslits - len(self.ssl)) > 0) & ((self.nslits - len(self.ssl)) <= 2):
pop = []
keep = []
for j in range(len(self.slit_stop)):
ds = self.slit_stop[j] - self.ssl_stop
if np.abs(ds).min() < 10:
keep.append(j)
else:
msg = f'Pop extra slit {j} at {self.slit_stop[j]}'
msg += f' (ds={ds[np.nanargmin(np.abs(ds))]:.2f})'
utils.log_comment(self.logfile, msg, verbose=True)
pop.append(j)
self.slit_stop = self.slit_stop[keep]
self.slit_start = self.slit_start[keep]
self.trace_stop = [self.trace_stop[k] for k in keep]
self.trace_start = [self.trace_start[k] for k in keep]
#self.trace_stop_orig = [self.trace_stop_orig[k] for k in keep]
#self.trace_start_orig = [self.trace_start_orig[k] for k in keep]
self.nslits = len(self.slit_stop)
if len(self.ssl_stop) > self.nslits:
if self.ssl_stop[0] < self.slit_start[0]+5:
utils.log_comment(self.logfile,
f'{self.namestr}: Remove first empty slit',
verbose=True)
self.ssl = self.ssl[1:]
self.trace_stop = np.array([np.polyval(tr_fit[j,:], self.slit_stop-1024)
for j in range(tr_fit.shape[0])]).T
self.trace_mid = np.array([np.polyval(tr_fit[j,:], (self.slit_stop + self.slit_stop)/2.-1024)
for j in range(tr_fit.shape[0])]).T
self.trace_start = np.array([np.polyval(tr_fit[j,:], self.slit_start-1024)
for j in range(tr_fit.shape[0])]).T
def find_slits(self, x0=1024, initial_thresh=800, thresh_step=1.3, make_figure=True, interpolate_coeffs=True, verbose=True, max_iter=5, use_ssl=False):
"""
Find slits based on gradient of the combflat image
"""
import peakutils
grad = (np.gradient(self.flat[0].data, axis=0))
self.grad = grad
xarr = np.arange(2048)
self.xarr = xarr
if initial_thresh is None:
gx = np.abs(grad[:,x0])
thres = 0.2*np.nanmax(gx)
else:
thres = initial_thresh*1
it = 0
pindexes, nindexes = [0], [0,0]
pn, nn = len(pindexes), len(nindexes)
self.use_ssl_slit = use_ssl
if use_ssl:
msg = f'{self.namestr} # Use SSL table for slit definition'
utils.log_comment(self.logfile, msg, verbose=True)
self.slit_stop = np.cast[int](np.round(self.ssl_stop))
self.slit_start = np.cast[int](np.round(self.ssl_start))
gx = np.abs(grad[:,x0])
thres = np.nanmax(gx)*0.2
pindexes = peakutils.indexes(grad[:,x0], thres=thres,
thres_abs=True, min_dist=12)
nindexes = peakutils.indexes(-grad[:,x0], thres=thres,
thres_abs=True, min_dist=12)
self.nslits = len(self.slit_stop)
for j in range(self.nslits):
ds = self.slit_stop[j] - nindexes
if np.nanmin(np.abs(ds)) < 8:
isl = np.nanargmin(np.abs(ds))
self.slit_stop[j] = nindexes[isl]
#
ds = self.slit_start[j] - pindexes
if np.nanmin(np.abs(ds)) < 8:
isl = np.nanargmin(np.abs(ds))
self.slit_start[j] = pindexes[isl]
self.tr_fit = np.array([[-2.76466436e-09, 2.70016208e-08],
[ 2.91466864e-07, 1.29269336e-03],
[ 1.00002258e+00, 1.02418277e+03]])
self.interpolate_slits()
else:
while ((pn != nn) | (np.maximum(nn, pn) > len(self.ssl))) & (it < max_iter):
pindexes = peakutils.indexes(grad[:,x0], thres=thres,
thres_abs=True, min_dist=12)
nindexes = peakutils.indexes(-grad[:,x0], thres=thres,
thres_abs=True, min_dist=12)
pn, nn = len(pindexes), len(nindexes)
msg = f'{self.namestr} # gradient iteration {it},'
msg += f' thresh={thres:.0f} nn={nn} np={pn} nssl={len(self.ssl)}'
utils.log_comment(self.logfile, msg, verbose=verbose)
thres *= thresh_step
it += 1
self.slit_stop = nindexes
self.slit_start = pindexes
self.nslits = len(self.slit_stop)
if self.nslits != (len(self.ssl)):
raise ValueError
############
# Fit curved slits
slitp = []
slitn = []
for pi in pindexes:
yi = grad[pi-1:pi+2,x0]
xi = xarr[pi-1:pi+2]
c = np.polyfit(xi, yi, 2)
ymax = -c[1]/(2*c[0])
slitp.append([[x0,ymax]])
for pi in nindexes:
yi = -grad[pi-1:pi+2,x0]
xi = xarr[pi-1:pi+2]
c = np.polyfit(xi, yi, 2)
ymax = -c[1]/(2*c[0])
slitn.append([[x0,ymax]])
for x in range(16, 2048-16, 16):
pi = peakutils.indexes(grad[:,x], thres=thres/2,
thres_abs=True, min_dist=8)
ni = peakutils.indexes(-grad[:,x], thres=thres/2,
thres_abs=True, min_dist=8)
for j in range(self.nslits):
dp = pindexes[j]-pi
dn = nindexes[j]-ni
for k in np.where(np.abs(dp) < 5)[0]:
yi = grad[pi[k]-1:pi[k]+2,x]
xi = xarr[pi[k]-1:pi[k]+2]
c = np.polyfit(xi, yi, 2)
ymax = -c[1]/(2*c[0])
slitp[j].append([x,ymax])
for k in np.where(np.abs(dn) < 5)[0]:
yi = -grad[ni[k]-1:ni[k]+2,x]
xi = xarr[ni[k]-1:ni[k]+2]
c = np.polyfit(xi, yi, 2)
ymax = -c[1]/(2*c[0])
slitn[j].append([x,ymax])
###########
# Fit them
trp = []
trn = []
for i in range(self.nslits):
# msg = f'{self.datemask} Fit slit {i:>2}: {self.slit_start[i]:>4}'
#msg += f' - {self.slit_stop[i]:>4} | {self.target_names[i]}'
#utils.log_comment(self.logfile, msg, verbose=verbose)
ap = np.array(slitp[i])
cp = np.polyfit(ap[:,0]-1024, ap[:,1], 2)
vp = np.polyval(cp, ap[:,0]-1024)
ok = np.abs(vp-ap[:,1]) < 1
cp = np.polyfit(ap[ok,0]-1024, ap[ok,1], 2)
an = np.array(slitn[i])
cn = np.polyfit(an[:,0]-1024, an[:,1], 2)
vn = np.polyval(cn, an[:,0]-1024)
ok = np.abs(vn-an[:,1]) < 1
cn = np.polyfit(an[ok,0]-1024, an[ok,1], 2)
trp.append(cp)
trn.append(cn)
self.trace_start = trp
self.trace_stop = trn
if interpolate_coeffs:
self.interpolate_slits()
##########
# Make the figure
if make_figure:
fig, ax = plt.subplots(1,1,figsize=(12, 5))
ax.plot(grad[:, x0])
#ax.plot(grad[:, 1200])
ax.scatter(xarr[self.slit_start], grad[self.slit_start,x0],
color='r')
ax.scatter(xarr[self.slit_stop], grad[self.slit_stop,x0],
color='r')
for pi, ni in zip(self.slit_start, self.slit_stop):
ax.plot(xarr[[pi, ni]], grad[[pi, ni], x0], color='r',
alpha=0.5)
yl = ax.get_ylim()
dlab = 0.1*(yl[1]-yl[0])
for j, pi in enumerate(self.slit_start):
ax.text(xarr[pi], grad[pi, x0]+dlab, f'{j}', ha='center',
va='bottom', fontsize=8)
for j, pi in enumerate(self.slit_stop):
ax.text(xarr[pi], grad[pi, x0]-dlab, f'{j}', ha='center',
va='top', fontsize=8)
ax.set_ylim(yl[0]-3*dlab, yl[1]+3*dlab)
# Target table expected stop
ax.vlines(self.ssl_stop, yl[0], -200, color='orange', alpha=0.3)
ax.grid()
ax.text(0.05, 0.95, self.namestr, ha='left', va='top',
transform=ax.transAxes)
ax.set_xlabel('y pixel')
ax.set_ylabel('gradient')
fig.tight_layout(pad=0.3)
else:
fig = None
return fig
def find_longpos_trace(self, use_plan=None, thresh=100, make_figure=True):
import peakutils
x0 = 1024
if use_plan is None:
use_plan = self.plans[0]
pa, pb = use_plan
diff = self.groups[pb].sci[0,:,:] - self.groups[pa].sci[0,:,:]
# Mask out extra exposures
for p in [pa, pb]:
if self.groups[p].nexp > 1:
self.groups[p].var[1:,:,:] = 0
self.groups[p].flag_cosmic_rays(minmax_nrej=0, sigma=1000)
prof = diff[:,1024]
pindexes = []
thres = thresh*1
it = -1
while (len(pindexes) != 1) & (it < 10):
it += 1
thres *= 1.2
pindexes = peakutils.indexes(diff[:,x0], thres=thres,
thres_abs=True, min_dist=12)
print(thres, len(pindexes))
peak_flux = diff[pindexes[0], x0]
nindexes = peakutils.indexes(-diff[:,x0], thres=thres,
thres_abs=True, min_dist=12)
pn, nn = len(pindexes), len(nindexes)
#plt.plot(prof)
self.xarr = np.arange(2048)
yarr = np.arange(2048)
slitp = []
slitn = []
for x in range(16, 2048-16, 16):
px = peakutils.indexes(diff[:,x], thres=thres/2.,
thres_abs=True, min_dist=8)
if len(px) == 1:
peaky = peakutils.interpolate(yarr+0.5, diff[:,x],
ind=px, width=10)
slitp.append([x, peaky[0]])
nx = peakutils.indexes(-diff[:,x], thres=thres/2.,
thres_abs=True, min_dist=8)
if len(nx) == 1:
peaky = peakutils.interpolate(yarr+0.5, -diff[:,x],
ind=nx, width=10)
slitn.append([x, peaky[0]])
# Fit the curved traces
i = 0
ap = np.array(slitp)
an = np.array(slitn)
#print('xx', an.shape, ap.shape)
ok = (np.abs(ap[:,1]-np.nanmedian(ap[:,1])) < 20)
cp = np.polyfit(ap[ok,0]-1024, ap[ok,1], 2)
vp = np.polyval(cp, ap[:,0]-1024)
ok = (np.abs(vp-ap[:,1]) < 1) & (np.abs(ap[:,1]-np.median(ap[:,1])) < 10)
cp = np.polyfit(ap[ok,0]-1024, ap[ok,1], 2)
ap = ap[ok,:]
ok = (np.abs(an[:,1]-np.nanmedian(an[:,1])) < 20)
cn = np.polyfit(an[ok,0]-1024, an[ok,1], 2)
vn = np.polyval(cn, an[:,0]-1024)
ok = (np.abs(vn-an[:,1]) < 1) & (np.abs(an[:,1]-np.median(an[:,1])) < 10)
cn = np.polyfit(an[ok,0]-1024, an[ok,1], 2)
an = an[ok,:]
#print('xx', nindexes, pindexes)
start = np.minimum(nindexes, pindexes)[0]-20
stop = np.maximum(nindexes, pindexes)[0]+20
## print('xxx', start, stop)
#stop = start+30
#start = start+10
#start = stop-30
#stop = stop-10
self.slit_start = np.array([start])
self.slit_stop = np.array([stop])
dither_offset = 0.1799*(stop-start-40)/2.
for p in [pa, pb]:
off = self.groups[p].yoffset*1.
for j, o in enumerate(off):
off_i = dither_offset*(1-2*(o < 0))
print(f'Set {p} yoffset {off_i:.2f}')
self.groups[p].yoffset[j] = off_i
#print('xx off', self.groups[p].yoffset)
cpp = cp*1
cnn = cp*1
if nindexes[0] > pindexes[0]:
cpp[-1] = cp[-1] - 20
cnn[-1] = cn[-1] + 20
self.trace_start = [cpp]
self.trace_stop = [cnn]
else:
cpp[-1] = cp[-1] + 20
cnn[-1] = cn[-1] - 20
self.trace_start = [cnn]
self.trace_stop = [cpp]
self.ssl['Target_to_center_of_slit_distance'] = 0.
self.nslits = 1
targ = self.groups['A'].img[0][0].header['TARGNAME'].strip().replace(' ','') + f'-{pa}'
self.ssl.remove_column('Target_Name')
self.ssl['Target_Name'] = targ
self.ssl = self.ssl[:1]
#self.target_names = [t.strip() for t in self.ssl['Target_Name']]
#self.target_keys = [f'{self.datemask}-{self.filter}-{t}' for t in self.target_names]
if make_figure:
sli = slice(start, stop)
fig, ax = plt.subplots(1,1,figsize=(14,5))
arr = diff
#arr = self.groups[pa].sci[0,:,:]
ax.imshow(arr[sli,:], extent=(0, 2048, start, stop), origin='lower', vmin=-peak_flux, vmax=peak_flux)
ax.set_aspect('auto')
ax.plot(*an.T, color='r')
ax.plot(*ap.T, color='r')
ax.set_ylim(start, stop)
ax.text(0.5, 0.5, self.target_keys[0], ha='center', va='center', color='w', transform=ax.transAxes)
ax.grid()
fig.tight_layout(pad=0.5)
else:
fig = None
return fig
def get_slit_params(self, slit, img_data=None, pad=16, skip=4, plan=None, xy_order=3, wave_order=3, verbose=True, show=True, wave_kwargs={}):
"""
Traace parameters of a single slit
"""
start_trace = np.polyval(self.trace_start[slit], self.xarr-1024)
stop_trace = np.polyval(self.trace_stop[slit], self.xarr-1024)
i0 = np.maximum(int(start_trace.min()) - pad, 0)
i1 = np.minimum(int(stop_trace.max()) + pad, 2048)
istop = np.polyval(self.trace_stop[slit], 0)
istart = np.polyval(self.trace_start[slit], 0)
imed = int((i0+i1)/2)
msg = f'{self.datemask} Limits for slit {slit}: {i0} - {i1} ({imed})'
utils.log_comment(self.logfile, msg, verbose=True)
if plan is None:
plan = self.plans[0]
pa, pb = plan
if img_data is None:
img_data = self.groups[pa].sci[0,:,:]
####### median along center of the slit with sky lines
sky_row = np.nanmedian(img_data[imed-5:imed+5,:], axis=0)
############
# Cross-correlate xshifts
y0 = int(start_trace.max())
y1 = int(stop_trace.min())
ysh = np.arange(y0+skip//2, y1-skip//2, skip)
xsh = ysh*0.
for i, yi in tqdm(enumerate(ysh)):
row = np.nanmedian(img_data[yi-skip//2:yi+skip//2+1,:],
axis=0)[None, :]
cc = phase_cross_correlation(row, sky_row[None,:],
upsample_factor=100,
reference_mask=~np.isnan(row),
moving_mask=~np.isnan(sky_row[None,:]),
return_error=True)
try:
(_, xsh[i]), error, diffphase = cc
except ValueError:
(_, xsh[i]) = cc
if (i1-i0 > 200):
dxmax = 300
else:
dxmax = 50
xok = np.isfinite(xsh)
if len(xsh) > 1:
xok &= np.abs(np.gradient(xsh)) < 3
xok &= np.abs(xsh) < dxmax
for it in range(3):
xy_coeffs = np.polyfit(ysh[xok]-imed, xsh[xok], xy_order)
xfit = np.polyval(xy_coeffs, ysh-imed)
xok = np.isfinite(xsh)
if len(xsh) > 1:
xok &= (np.abs(np.gradient(xsh)) < 3) & (np.abs(xfit-xsh) < 3)
#targname = self.ssl['Target_Name'][slit].strip()
#slit_num = int(self.ssl['Slit_Number'][slit])
targname = self.target_names[slit]
slit_num = self.target_slit_numbers[slit]
slit_info = {}
slit_info['slit'] = slit
slit_info['i0'] = i0
slit_info['i1'] = i1
slit_info['sky_row'] = sky_row
slit_info['xy_coeffs'] = xy_coeffs
slit_info['filter'] = self.filter
slit_info['wave_order'] = wave_order
slit_info['slice'] = slice(i0, i1)
slit_info['trace_coeffs'] = self.trace_stop[slit]
slit_info['pad'] = pad
slit_info['istart'] = istart
slit_info['istop'] = istop
slit_info['xsh'] = xsh
slit_info['ysh'] = ysh
slit_info['xok'] = xok
slit_info['target_name'] = targname
slit_info['width'] = float(self.ssl['Slit_width'][slit])
slit_info['length'] = float(self.ssl['Slit_length'][slit])
slit_info['target_offset'] = float(self.ssl['Target_to_center_of_slit_distance'][slit])
# Center of target in slit cutout
yoff = float(slit_info['target_offset'])/0.1799
half = slit_info['istop'] - slit_info['istart']
ty = slit_info['istart'] - slit_info['i0'] + half/2. + yoff
slit_info['target_y'] = ty
slit_info['slit_ra'] = self.ssl['slit_ra'][slit]
slit_info['slit_dec'] = self.ssl['slit_dec'][slit]
slit_info['target_ra'] = self.ssl['target_ra'][slit]
slit_info['target_dec'] = self.ssl['target_dec'][slit]
slit_info['target_mag'] = self.ssl['Magnitude'][slit]
slit_info['target_orig_slit'] = slit_num
slit_info['datemask'] = self.datemask
############
# Fit wavelength
if show:
# Show difference in slit figure
#pa, pb = self.plans[0]
diff = self.groups[pb].sci[0,:,:] - self.groups[pa].sci[0,:,:]
diff /= np.sqrt(self.groups[pa].var[0,:,:])
fig, axes = plt.subplots(3,2,figsize=(12,7),
gridspec_kw={'height_ratios':[2,2,1],
'width_ratios':[3.5,1]})
# Difference
for ia, _data in enumerate([diff, img_data]):
ax = axes[ia][0]
perc = np.nanpercentile(_data[i0:i1,:], [5, 90])
ax.imshow(_data, vmin=perc[0], vmax=perc[1])
if ia == 0:
ax.text(0.02, 0.96,
f'{self.datemask} {self.filter} {slit_num}: {targname}',
ha='left', va='top', transform=ax.transAxes,
bbox=dict(facecolor='w', edgecolor='None'))
ax.set_aspect('auto')
ax.plot(self.xarr, start_trace, color='pink')
ax.plot(self.xarr, stop_trace, color='pink')
ax.set_ylim(i0, i1)
ax.hlines(imed, 0, 200, color='r')
ax.set_xticklabels([])
# x(y) shift
ax = axes[1][1]
ax.scatter(xsh[xok], ysh[xok])
ax.scatter(xsh, ysh, alpha=0.3)
ax.set_yticklabels([])
ax.grid()
ax.set_ylim(*axes[0][0].get_ylim())
yy = np.linspace(y0, y1, 256)
xx = np.polyval(xy_coeffs, yy-imed)
ax.plot(xx, yy, alpha=0.4)
#ax.set_xlim(xsh[xok].min()-1, xsh[xok].max()+1)
ax.set_xlim(xsh.min()-1, xsh.max()+1)
ax.set_xticklabels([])
#ax.set_xlabel(r'$\Delta x$')
_lfit = fit_wavelength_from_sky(sky_row, self.filter,
order=wave_order,
make_figure=False, nsplit=5,
plot_axis=axes[2][0],
**wave_kwargs)
lam_coeffs, wave_fit, figs = _lfit
axes[2][0].set_xlim(wave_fit.min()/1.e4, wave_fit.max()/1.e4)
axes[2][0].legend(loc='upper left')
axes[0][1].axis('off')
axes[2][1].axis('off')
fig.tight_layout(pad=0.5)
else:
fig = None
_lfit = fit_wavelength_from_sky(sky_row, self.filter,
order=wave_order,
make_figure=False,
nsplit=5, **wave_kwargs)
lam_coeffs, wave_fit, figs = _lfit
slit_info['lam_coeffs'] = lam_coeffs
slit_info['wave'] = wave_fit
msg = ('Slit {0}: {1} {2}x{3} {4:.5f} {5:.5f}'.format(slit_num,
slit_info['target_name'], slit_info['width'],
slit_info['length'], slit_info['target_ra'],
slit_info['target_dec']))
utils.log_comment(self.logfile, msg, verbose=verbose)
return slit_info, fig
def drizzle_slit_plan_single(self, slit_info, plan_i=0, linearize_wave=False, log_wave=False, kernel='point', pixfrac=1., sig_clip=(3,3), mask_trace=True, mask_offset=True, mask_overlap=False, mask_single=False, **kwargs):
"""
Drizzle a rectified slit
"""
from drizzlepac import adrizzle
import astropy.wcs as pywcs
plan = self.plans[plan_i]
pd = self.plan_pairs[plan_i]
pa, pb = plan
gra = self.groups[pa]
grb = self.groups[pb]
ia = pd['ia'][0]
ib = pd['ib'][0]
ysl = slit_info['slice']
cutout = (grb.sci[ib,:,:] - gra.sci[ia,:,:])[ysl,:]
cutout_var = (grb.var[ib,:,:] + gra.var[ia,:,:])[ysl,:]
exptime = grb.truitime[pd['ib']].sum() + gra.truitime[pd['ia']].sum()
nexp = pd['n']*2
cutout_wht = 1/cutout_var
cutout_wht[cutout_var == 0] = 0
# Mask out-of-slit pixels
slit = slit_info['slit']
start_trace = np.polyval(self.trace_start[slit], self.xarr-1024)
stop_trace = np.polyval(self.trace_stop[slit], self.xarr-1024)
yp, xp = np.indices(cutout.shape)
cutout_mask = (yp + ysl.start >= start_trace) & (yp + ysl.start <= stop_trace)
rel_offset = np.abs(self.groups[pa].yoffset[0] -
self.groups[pb].yoffset[0])
#########
# Two headers for slit distortions
h0 = pyfits.Header()
hdist = pyfits.Header()
trx = slit_info['xy_coeffs']*1
trn = slit_info['trace_coeffs']*1
trw = slit_info['lam_coeffs']*1
for h in [h0, hdist]:
h['NAXIS'] = 2
h['NAXIS1'] = cutout.shape[1]
h['NAXIS2'] = cutout.shape[0]
h['CRPIX1'] = h['NAXIS1']/2.
h['CRPIX2'] = h['NAXIS2']/2.
h['CRVAL1'] = trw[-1]#/1.e10
h['CRVAL2'] = 0.
h['CD1_1'] = trw[-2]#/1.e10
h['CD2_2'] = 0.1799 #/3600
#h['CTYPE1'] = 'RA---TAN-SIP'
#h['CTYPE2'] = 'DEC--TAN-SIP'
h['CTYPE1'] = '-SIP'
h['CTYPE2'] = '-SIP'
h['A_ORDER'] = 3
h['B_ORDER'] = 3
#hdist['CTYPE1'] = 'RA---TAN-SIP'
#hdist['CTYPE2'] = 'DEC--TAN-SIP'
##########
# Slit distortion as SIP coefficients
# ToDo: full y dependence of the trace curvature
# Tilted x(y)
ncoeff = len(trx)
for ai in range(ncoeff-1):
hdist[f'A_0_{ai+1}'] = -trx[ncoeff-2-ai]
# Curved trace y(x)
ncoeff = len(trn)
if hasattr(self, 'tr_fit'):
# Full distorted trace
print('distorted trace')
dy = (slit_info['i0'] + slit_info['i1'])/2. - 1024
for ai in range(ncoeff-1):
bcoeff = self.tr_fit[ncoeff-2-ai,:]*1.
bcoeff[1] += bcoeff[0]*dy
hdist[f'B_{ai+1}_0'] = -bcoeff[1]
hdist[f'B_{ai+1}_1'] = -bcoeff[0]
else:
for ai in range(ncoeff-1):
hdist[f'B_{ai+1}_0'] = -trn[ncoeff-2-ai]
# Wavelength
ncoeff = len(trw)
for ai in range(ncoeff-2):
h0[f'A_{ai+2}_0'] = trw[ncoeff-3-ai]/trw[-2]
hdist[f'A_{ai+2}_0'] = trw[ncoeff-3-ai]/trw[-2]
in_wcs = pywcs.WCS(hdist)
in_wcs.pscale = 0.1799
if linearize_wave:
print('Linearize wavelength array')
gr = grating_summary[self.filter]
# Recenter and set log wavelength spacing
sh = [cutout.shape[0], gr['N']]
# Set poly keywords
h0['NAXIS1'] = gr['N']
h0['CRPIX1'] = gr['N']//2
h0['CRVAL1'] = gr['ref_wave']#/1.e10
h0['CD1_1'] = gr['dlam']#/1.e10
if log_wave:
loglam_pars = get_grating_loglam('J')
#h0['NAXIS1'] = loglam_pars[0]
#h0['CRPIX1'] = loglam_pars[0]/2
#h0['CRVAL1'] = loglam_pars[1]
#h0['CD1_1'] = loglam_pars[3][0]
h0['CTYPE1'] = '-SIP'
coeffs = loglam_pars[2][::-1]
h0['CRVAL1'] = coeffs[0]
h0['CD1_1'] = coeffs[1]
h0['A_2_0'] = coeffs[2]
h0['A_3_0'] = coeffs[3]
h0['A_ORDER'] = 3
h0['B_ORDER'] = 3
else:
# Strip SIP keywords
h0.remove('CTYPE1')
h0.remove('CTYPE2')
#h0['CTYPE1'] = 'WAVE'
for k in list(h0.keys()):
test = k.startswith('A_') | k.startswith('B_')
test |= k in ['A_ORDER','B_ORDER']
if test:
h0.remove(k)
lam_coeffs = np.array([gr['dlam'], gr['ref_wave']])
else:
sh = cutout.shape
logwcs = {}
lam_coeffs = slit_info['lam_coeffs']*1
outsci = np.zeros((pd['n']*2, *sh), dtype=np.float32)
outwht = np.zeros((pd['n']*2, *sh), dtype=np.float32)
outctx = np.zeros((pd['n']*2, *sh), dtype=np.int16)
npl = pd['n']
slit = slit_info['slit']
targname = slit_info['target_name']
msg = f'{self.namestr} # Drizzle N={npl} {pa}-{pb} exposure pairs for slit {slit}: {targname}'
utils.log_comment(self.logfile, msg, verbose=True)
self.distorted_header = hdist
# Do the drizzling
for i, (ia, ib) in tqdm(enumerate(zip(pd['ia'], pd['ib']))):
if False: #pd['n'] > 1:
if i == 0:
nb = pd['ib'][i+1]
na = pd['ia'][i+1]
else:
nb = pd['ib'][i-1]
na = pd['ia'][i-1]
skyb = (grb.sci[ib,:,:] + grb.sci[nb,:,:])/2.
svarb = (grb.var[ib,:,:] + grb.var[nb,:,:])/2.
skya = (gra.sci[ia,:,:] + gra.sci[na,:,:])/2.
svara = (gra.var[ia,:,:] + gra.var[na,:,:])/2.
else:
skyb = grb.sci[ib,:,:]
svarb = grb.var[ib,:,:]
skya = gra.sci[ia,:,:]
svara = gra.var[ia,:,:]
diff_b = (grb.sci[ib,:,:] - skya)[ysl,:].astype(np.float32, copy=False)
diff_a = (gra.sci[ia,:,:] - skyb)[ysl,:].astype(np.float32, copy=False)
wht_b = 1./(grb.var[ib,:,:] + svara)[ysl,:].astype(np.float32, copy=False)
wht_b[~np.isfinite(wht_b)] = 0
wht_a = 1./(gra.var[ia,:,:] + svarb)[ysl,:].astype(np.float32, copy=False)
wht_a[~np.isfinite(wht_a)] = 0
if mask_trace:
#cutout_wht *= cutout_mask
wht_b *= cutout_mask
wht_a *= cutout_mask
#sci = cutout.astype(np.float32, copy=False)
#wht = cutout_wht.astype(np.float32, copy=False)
# Trace drift
hdist['CRPIX2'] = hdist['NAXIS2']/2. + pd['shift'][i]
in_wcs = pywcs.WCS(hdist)
in_wcs.pscale = 0.1799
### B position, positive
h0['CRPIX2'] = h['NAXIS2']/2. + self.groups[pb].yoffset[0]/0.1799
out_wcs = pywcs.WCS(h0)
out_wcs.pscale = 0.1799
adrizzle.do_driz(diff_b, in_wcs, wht_b, out_wcs,
outsci[i*2,:,:], outwht[i*2,:,:],
outctx[i*2,:,:],
1., 'cps', 1,
wcslin_pscale=0.1799, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval='0',
wcsmap=None)
### A position, negative
h0['CRPIX2'] = h['NAXIS2']/2. + self.groups[pa].yoffset[0]/0.1799
out_wcs = pywcs.WCS(h0)
out_wcs.pscale = 0.1799
adrizzle.do_driz(diff_a, in_wcs, wht_a, out_wcs,
outsci[i*2+1,:,:], outwht[i*2+1,:,:],
outctx[i*2+1,:,:],
1., 'cps', 1,
wcslin_pscale=0.1799, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval='0',
wcsmap=None)
scale_fwhm = pd['fwhm'] / np.nanmin(pd['fwhm'])
scale_flux = np.nanmax(pd['scale']) / pd['scale']
# weight by inverse FWHM
if 0:
msg = f'{self.namestr} # Weight by inverse FWHM'
utils.log_comment(self.logfile, msg, verbose=True)
scale_weight = 1./scale_fwhm
else:
# weight by flux rather than fwhm as per MOSDEF
if np.allclose(scale_flux, 1):
msg = f'{self.namestr} # No scales found, weight by FWHM'
utils.log_comment(self.logfile, msg, verbose=True)
scale_weight = 1./scale_fwhm
else:
msg = f'{self.namestr} # Weight by sum x fwhm'
utils.log_comment(self.logfile, msg, verbose=True)
scale_weight = 1./(scale_flux*scale_fwhm)
pd['scale_weight'] = scale_weight
for i in range(pd['n']):
outsci[i*2,:,:] *= scale_flux[i]
outwht[i*2,:,:] *= 1./scale_flux[i]**2 * scale_weight[i]
outsci[i*2+1,:,:] *= scale_flux[i]
outwht[i*2+1,:,:] *= 1./scale_flux[i]**2 * scale_weight[i]
if mask_single:
sing = (outwht[0::2,:,:] > 0).sum(axis=0) > 0
sing &= (outwht[1::2,:,:] > 0).sum(axis=0) > 0
outwht *= sing
avg = 0
if sig_clip is not None:
clip = np.isfinite(outwht) & (outwht > 0)
c0 = clip.sum()
for it in range(sig_clip[0]):
if it > 0:
resid = (outsci - avg)*np.sqrt(outwht)
clip = (np.abs(resid) < sig_clip[1]) & (outwht > 0)
msg = f'{self.namestr} # Drizzle {slit} sigma clip {it} {(1-clip.sum()/c0)*100:.2f} %'
utils.log_comment(self.logfile, msg, verbose=True)
num = (outsci*outwht*clip).sum(axis=0)
den = (outwht*clip).sum(axis=0)
avg = num/den
outwht[~clip] = 0
msk = (outwht <= 0) | ~np.isfinite(outsci+outwht)
outsci[msk] = 0
outwht[msk] = 0
h0['SIGCLIPN'] = sig_clip[0], 'Sigma clipping iterations'
h0['SIGCLIPV'] = sig_clip[1], 'Sigma clipping level'
h0['CRPIX2'] = h['NAXIS2']/2.
h0['EXPTIME'] = exptime, 'Integration time, seconds'
h0['NEXP'] = nexp, 'Number of raw exposures'
h0['KERNEL'] = kernel, 'Drizzle kernel'
h0['PIXFRAC'] = pixfrac, 'Drizzle pixfrac'
h0['PLAN'] = f'{pa}{pb}', 'Dither plan'
h0['OFFSETA'] = self.groups[pa].yoffset[0]/0.1799, f'Offset {pa}, pixels'
h0['OFFSETB'] = self.groups[pb].yoffset[0]/0.1799, f'Offset {pb}, pixels'
h0['TARGNAME'] = slit_info['target_name'], 'Target name from slit table'
h0['RA_SLIT'] = slit_info['slit_ra'], 'Target RA from slit table'
h0['DEC_SLIT'] = slit_info['slit_dec'], 'Target declination from slit table'
h0['RA_TARG'] = slit_info['target_ra'], 'Target RA from slit table'
h0['DEC_TARG'] = slit_info['target_dec'], 'Target declination from slit table'
h0['MAG_TARG'] = slit_info['target_mag'], 'Magnitude from slit table'
h0['FILTER'] = self.filter, 'MOSFIRE filter'
h0['DATEMASK'] = slit_info['datemask'], 'Unique mask identifier'
h0['SLITIDX'] = slit_info['slit'], 'Slit number, counting from y=0'
h0['SLITNUM'] = (slit_info['target_orig_slit'],
'Slit number in mask table')
h0['Y0'] = slit_info['i0'], 'Bottom of the slit cutout'
h0['Y1'] = slit_info['i1'], 'Top of the slit cutout'
h0['YSTART'] = slit_info['istart'], 'Bottom of the slit'
h0['YSTOP'] = slit_info['istop'], 'Top of the slit'
h0['YPAD'] = slit_info['pad'], 'Cutout padding'
h0['CUNIT2'] = 'arcsec'
if linearize_wave:
h0['CTYPE1'] = 'WAVE'
h0['CUNIT1'] = 'Angstrom'
if log_wave:
h0['CTYPE1'] = 'WAVE-LOG'
for k in list(h0.keys()):
test = k.startswith('A_') | k.startswith('B_')
test |= k in ['A_ORDER','B_ORDER']
if test:
h0.remove(k)
h0['MASKOFF'] = mask_offset, 'Mask pixels outside of offsets'
if mask_offset:
yp = np.arange(sh[0])
ymsk = (yp < np.abs(h0['OFFSETA'])+slit_info['pad']*mask_overlap)
ymsk |= (yp > sh[0]-np.abs(h0['OFFSETB'])-slit_info['pad']*mask_overlap)
outsci[:,ymsk,:] = 0
outwht[:,ymsk,:] = 0
# Target position
h0['TARGOFF'] = slit_info['target_offset'], 'Target offset to slit center, arcsec'
h0['TARGYPIX'] = slit_info['target_y'], 'Expected central y pixel of target'
h0['CRPIX2'] = h0['TARGYPIX']
h0['TRAORDER'] = len(trn), 'Order of curved trace fit'
for i, c in enumerate(trn):
h0[f'TRACOEF{i}'] = c, 'Trace coefficient'
h0['LAMORDER'] = len(lam_coeffs)-1, 'Order of wavelength solution'
for i, c in enumerate(lam_coeffs):
h0[f'LAMCOEF{i}'] = c, 'Wavelength solution coefficient'
meta = self.meta
for k in OBJ_KEYS:
h0[k] = meta[k]
for k in SEQ_KEYS:
for ext in ['_MIN','_MED','_MAX']:
key = f'{k}{ext}'
h0[key] = meta[key]
h0['MJD-OBS'] = h0['MJD-OBS_MED']
return h0, outsci, outwht
def drizzle_all_plans(self, slit_info, skip_plans=[], **kwargs):
"""
Drizzle and combine all available plans
Todo:
1) Find max S/N within the trace window
2) Separate extensions for combinations by offset position
3) combination across multiple plans
"""
#drz = {}
num = None
fi = 0
self.drizzled_plans = []
for i, plan in enumerate(self.plans):
if plan in skip_plans:
continue
key = ''.join(plan)
drz_i = self.drizzle_slit_plan_single(slit_info, plan_i=i,
**kwargs)
self.drizzled_plans.append(drz_i)
if num is None:
num = (drz_i[1]*drz_i[2]).sum(axis=0)
wht = drz_i[2].sum(axis=0)
head = drz_i[0]
else:
num += (drz_i[1]*drz_i[2]).sum(axis=0)
wht += drz_i[2].sum(axis=0)
head['EXPTIME'] += drz_i[0]['EXPTIME']
head['NEXP'] += drz_i[0]['NEXP']
head[f'PLAN{i+1}'] = key
pd = self.plan_pairs[i]
pa, pb = plan
gra = self.groups[pa]
grb = self.groups[pb]
for j in range(pd['n']):
fi += 1
head[f'FILEA{fi}'] = gra.files[pd['ia'][j]], f'File from plan {pa}'
head[f'FILEB{fi}'] = grb.files[pd['ib'][j]], f'File from plan {pb}'
head[f'SHIFT{fi}'] = np.float32(pd['shift'][j]), 'Shift [pix]'
head[f'FWHM{fi}'] = np.float32(pd['fwhm'][j]), 'FWHM [pix]'
head[f'SUM{fi}'] = np.float32(pd['scale'][j]), 'Profile sum'
head[f'WSCALE{fi}'] = np.float32(pd['scale_weight'][j]), 'Weight scaling'
outsci = num/wht
outwht = wht
msk = (wht == 0) | (~np.isfinite(outsci+outwht))
outsci[msk] = 0
outwht[msk] = 0
hdu = pyfits.HDUList()
hdu.append(pyfits.ImageHDU(data=outsci, header=head, name='SCI'))
hdu.append(pyfits.ImageHDU(data=outwht, header=head, name='WHT'))
return hdu
def get_plan_drift(self, slit_info, plan_i=0, ax=None, fwhm_bounds=[2,10],
driz_kwargs=dict(sig_clip=(5, 3), mask_single=True, mask_offset=False, mask_trace=True), profile_model=None, use_peak=True):
"""
Get drift from bright target
"""
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import Lorentz1D, Gaussian1D, Moffat1D
if profile_model is None:
profile_model = Moffat1D()
pd = self.plan_pairs[plan_i]
pd['fwhm'] = np.ones_like(pd['fwhm'])
pd['shift'] = np.zeros_like(pd['shift'])
pd['scale'] = np.ones_like(pd['fwhm'])
pd['scale_weight'] = np.ones_like(pd['fwhm'])
h0, outsci, outwht = self.drizzle_slit_plan_single(slit_info,
plan_i=plan_i, **driz_kwargs)
mu = slit_info['target_y']
if 'x_0' in profile_model.param_names:
profile_model.x_0 = mu
else:
profile_model.mean = mu
# bounds on fwhm
for k in ['stddev', 'fwhm','gamma']:
if k in profile_model.param_names:
pi = profile_model.param_names.index(k)
fscl = profile_model.parameters[pi] / profile_model.fwhm
profile_model.bounds[k] = [fwhm_bounds[0]*fscl, fwhm_bounds[1]*fscl]
stacksci = outsci
stackwht = stacksci*0.
profs = []
(pa, pb) = plan = self.plans[plan_i]
fwhm = self.groups[pa].meta['GUIDFWHM']
npair = pd['n']
fit_fwhm = np.zeros(npair)
fit_x = np.zeros(npair)
fit_sum = np.zeros(npair)
# Combine A-B
num = outsci[0::2,:,:]*outwht[0::2,:,:] + outsci[1::2,:,:]*outwht[1::2,:,:]
den = outwht[0::2,:,:] + outwht[1::2,:,:]
ab_avg = num/den
ab_avg[den <= 0] = 0
for i in range(npair):
kws = dict(alpha=0.5, color=plt.cm.jet((i+1)/npair))
yprof = drizzled_profile((h0, ab_avg[i,:,:], den[i,:,:]),
ax=ax, plot_kws=kws)
xprof = np.arange(len(yprof))
#ok = (yprof > 0) & (np.abs(xprof-slit_info['target_y']) < 10)
if use_peak:
xmax = xprof[np.nanargmax(yprof)]
#print('xxx', xmax, slit_info['target_y'])
else:
xcut = np.abs(xprof-slit_info['target_y']) < 10
xmax = xprof[np.nanargmax(yprof*xcut)]
ok = (yprof > 0) & (np.abs(xprof-xmax) < 10)
if 'x_0' in profile_model.param_names:
profile_model.x_0 = xmax
else:
profile_model.mean = xmax
profile_model.amplitude = yprof[ok].max()
#print(xprof.shape, yprof.shape, ok.sum())
mfit = LevMarLSQFitter()(profile_model, xprof[ok], yprof[ok])
fit_fwhm[i] = mfit.fwhm
fit_x[i] = mfit.x_0.value
fit_sum[i] = np.trapz(mfit(xprof), xprof)
#fit_sum[i] = mfit(xprof).max()
#if ax is not None:
# ax.plot(xprof, mfit(xprof), color='r')
ok = np.abs(fit_x - np.median(fit_x)) < 2.5
ok &= (fit_fwhm < 17) & (fit_fwhm > 0.8)
ok &= np.isfinite(fit_x + fit_fwhm + fit_sum)
bad = np.where(~ok)[0]
if len(bad) > 0.5*pd['n']:
msg = f'{self.namestr} # Too many bad exposures found in drift'
msg += f" ({len(bad)} / {pd['n']})\n"
utils.log_comment(self.logfile, msg, verbose=True)
pd['shift'] = np.zeros(pd['n'])
pd['fwhm'] = np.array(self.groups[pd['plan'][0]].meta['GUIDFWHM'])[pd['ia']]/0.1799
pd['scale'] = np.ones(pd['n'])
pd['scale_weight'] = np.ones(pd['n'])
return pd['fwhm'], pd['shift'], pd['scale']
if len(bad) > 0:
for i in bad[::-1]:
msg = f'{self.namestr} # Remove bad exposure from list: '
msg += f'{i} fwhm={fit_fwhm[i]:.2f}'
msg += f' shift={fit_x[i]:.2f} scale={fit_sum[i]:.2f}'
utils.log_comment(self.logfile, msg, verbose=True)
#print('xxx', i, len(pd['ia']), len(pd['ib']))
pd['ia'].pop(i)
pd['ib'].pop(i)
#pd['ia'] = pd['ia'][ok]
#pd['ib'] = pd['ib'][ok]
#pd['ta'] = pd['ta'][ok]
pd['n'] = len(pd['ia'])
#print('xxx set fwhm')
pd['fwhm'] = np.maximum(fit_fwhm[ok], 1.1)
pd['shift'] = fit_x[ok] - slit_info['target_y'] #fit_x[ok][0]
pd['scale'] = fit_sum[ok]
#self.plan_pairs[plan_i] = pd
return fit_fwhm, fit_x, fit_sum
def get_target_drift(self, slit, use_peak=True, profile_model=None):
"""
Get drifts of all offset plans and make a figure
"""
fig, axes = plt.subplots(1,2,figsize=(14,5))
slit_info, fig = self.get_slit_params(slit=slit, xy_order=3, pad=16,
show=False)
for i in range(len(self.plans)):
self.get_plan_drift(slit_info, plan_i=i, ax=axes[0],
use_peak=use_peak,
profile_model=profile_model)
td = self.plan_pairs[i]
gra = self.groups[self.plans[i][0]]
airm = np.array(gra.meta['AIRMASS'])[td['ia']]
guid = | np.array(gra.meta['GUIDFWHM']) | numpy.array |
import os
import time
import numpy as np
import tifffile as tiff
from PIL import Image
from src.utils import load_product, get_cls, extract_collapsed_cls, extract_cls_mask, predict_img, image_normalizer
def evaluate_test_set(model, dataset, num_gpus, params, save_output=False, write_csv=True):
if dataset == 'Biome_gt':
__evaluate_biome_dataset__(model, num_gpus, params, save_output=save_output, write_csv=write_csv)
elif dataset == 'SPARCS_gt':
__evaluate_sparcs_dataset__(model, num_gpus, params, save_output=save_output, write_csv=write_csv)
def __evaluate_sparcs_dataset__(model, num_gpus, params, save_output=False, write_csv=True):
# Find the number of classes and bands
if params.collapse_cls:
n_cls = 1
else:
n_cls = np.size(params.cls)
n_bands = np.size(params.bands)
# Get the name of all the products (scenes)
data_path = params.project_path + "data/raw/SPARCS_dataset/"
toa_path = params.project_path + "data/processed/SPARCS_TOA/"
products = sorted(os.listdir(data_path))
products = [p for p in products if 'data.tif' in p]
products = [p for p in products if 'xml' not in p]
# If doing CV, only evaluate on test split
if params.split_dataset:
products = params.test_tiles[1]
# Define thresholds and initialize evaluation metrics dict
thresholds = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
evaluation_metrics = {}
evaluating_product_no = 1 # Used in print statement later
for product in products:
# Time the prediction
start_time = time.time()
# Load data
img_all_bands = tiff.imread(data_path + product)
img_all_bands[:, :, 0:8] = tiff.imread(toa_path + product[:-8] + 'toa.TIF')
# Load the relevant bands and the mask
img = np.zeros((np.shape(img_all_bands)[0], np.shape(img_all_bands)[1], np.size(params.bands)))
for i, b in enumerate(params.bands):
if b < 8:
img[:, :, i] = img_all_bands[:, :, b-1]
else: # Band 8 is not included in the tiff file
img[:, :, i] = img_all_bands[:, :, b-2]
# Load true mask
mask_true = np.array(Image.open(data_path + product[0:25] + 'mask.png'))
# Pad the image for improved borders
padding_size = params.overlap
npad = ((padding_size, padding_size), (padding_size, padding_size), (0, 0))
img_padded = np.pad(img, pad_width=npad, mode='symmetric')
# Get the masks
#cls = get_cls(params)
cls = [5] # TODO: Currently hardcoded to look at clouds - fix it!
# Create the binary masks
if params.collapse_cls:
mask_true = extract_collapsed_cls(mask_true, cls)
else:
for l, c in enumerate(params.cls):
y = extract_cls_mask(mask_true, cls)
# Save the binary masks as one hot representations
mask_true[:, :, l] = y[:, :, 0]
# Predict the images
predicted_mask_padded, _ = predict_img(model, params, img_padded, n_bands, n_cls, num_gpus)
# Remove padding
predicted_mask = predicted_mask_padded[padding_size:-padding_size, padding_size:-padding_size, :]
# Create a nested dict to save evaluation metrics for each product
evaluation_metrics[product] = {}
# Find the valid pixels and cast to uint8 to reduce processing time
valid_pixels_mask = np.uint8(np.clip(img[:, :, 0], 0, 1))
mask_true = np.uint8(mask_true)
# Loop over different threshold values
for j, threshold in enumerate(thresholds):
predicted_binary_mask = np.uint8(predicted_mask >= threshold)
accuracy, omission, comission, pixel_jaccard, precision, recall, f_one_score, tp, tn, fp, fn, npix = calculate_evaluation_criteria(
valid_pixels_mask, predicted_binary_mask, mask_true)
# Create an additional nesting in the dict for each threshold value
evaluation_metrics[product]['threshold_' + str(threshold)] = {}
# Save the values in the dict
evaluation_metrics[product]['threshold_' + str(threshold)]['tp'] = tp
evaluation_metrics[product]['threshold_' + str(threshold)]['fp'] = fp
evaluation_metrics[product]['threshold_' + str(threshold)]['fn'] = fn
evaluation_metrics[product]['threshold_' + str(threshold)]['tn'] = tn
evaluation_metrics[product]['threshold_' + str(threshold)]['npix'] = npix
evaluation_metrics[product]['threshold_' + str(threshold)]['accuracy'] = accuracy
evaluation_metrics[product]['threshold_' + str(threshold)]['precision'] = precision
evaluation_metrics[product]['threshold_' + str(threshold)]['recall'] = recall
evaluation_metrics[product]['threshold_' + str(threshold)]['f_one_score'] = f_one_score
evaluation_metrics[product]['threshold_' + str(threshold)]['omission'] = omission
evaluation_metrics[product]['threshold_' + str(threshold)]['comission'] = comission
evaluation_metrics[product]['threshold_' + str(threshold)]['pixel_jaccard'] = pixel_jaccard
print('Testing product ', evaluating_product_no, ':', product)
exec_time = str(time.time() - start_time)
print("Prediction finished in : " + exec_time + "s")
for threshold in thresholds:
print("threshold=" + str(threshold) +
": tp=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['tp']) +
": fp=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['fp']) +
": fn=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['fn']) +
": tn=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['tn']) +
": Accuracy=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['accuracy']) +
": precision=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['precision']) +
": recall=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['recall']) +
": omission=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['omission']) +
": comission=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['comission']) +
": pixel_jaccard=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['pixel_jaccard']))
evaluating_product_no += 1
# Save images and predictions
data_output_path = params.project_path + "data/output/SPARCS/"
if not os.path.isfile(data_output_path + '%s_photo.png' % product[0:24]):
Image.open(data_path + product[0:25] + 'photo.png').save(data_output_path + '%s_photo.png' % product[0:24])
Image.open(data_path + product[0:25] + 'mask.png').save(data_output_path + '%s_mask.png' % product[0:24])
if save_output:
# Save predicted mask as 16 bit png file (https://github.com/python-pillow/Pillow/issues/2970)
arr = np.uint16(predicted_mask[:, :, 0] * 65535)
array_buffer = arr.tobytes()
img = Image.new("I", arr.T.shape)
img.frombytes(array_buffer, 'raw', "I;16")
if save_output:
img.save(data_output_path + '%s-model%s-prediction.png' % (product[0:24], params.modelID))
#Image.fromarray(np.uint8(predicted_mask[:, :, 0]*255)).save(data_output_path + '%s-model%s-prediction.png' % (product[0:24], params.modelID))
exec_time = str(time.time() - start_time)
print("Dataset evaluated in: " + exec_time + "s")
print("Or " + str(float(exec_time)/np.size(products)) + "s per image")
if write_csv:
write_csv_files(evaluation_metrics, params)
def __evaluate_biome_dataset__(model, num_gpus, params, save_output=False, write_csv=True):
"""
Evaluates all products in data/processed/visualization folder, and returns performance metrics
"""
print('------------------------------------------')
print("Evaluate model on visualization data set:")
# Find the number of classes and bands
if params.collapse_cls:
n_cls = 1
else:
n_cls = np.size(params.cls)
n_bands = np.size(params.bands)
folders = sorted(os.listdir(params.project_path + "data/raw/Biome_dataset/"))
folders = [f for f in folders if '.' not in f] # Filter out .gitignore
product_names = []
thresholds = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
evaluation_metrics = {}
evaluating_product_no = 1 # Used in print statement later
# Used for timing tests
load_time = []
prediction_time = []
threshold_loop_time = []
save_time = []
total_time = []
for folder in folders:
print('#########################')
print('TESTING BIOME: ' + folder)
print('#########################')
products = sorted(os.listdir(params.project_path + "data/raw/Biome_dataset/" + folder + "/BC/"))
# If doing CV, only evaluate on test split
if params.split_dataset:
print('NOTE: THE BIOME DATASET HAS BEEN SPLIT INTO TRAIN AND TEST')
products = [f for f in products if f in params.test_tiles[1]]
else:
print('NOTE: THE ENTIRE BIOME DATASET IS CURRENTLY BEING USED FOR TEST')
for product in products:
print('------------------------------------------')
print('Testing product ', evaluating_product_no, ':', product)
data_path = params.project_path + "data/raw/Biome_dataset/" + folder + "/BC/" + product + "/"
toa_path = params.project_path + "data/processed/Biome_TOA/" + folder + "/BC/" + product + "/"
product_names.append(product)
start_time = time.time()
img, img_rgb, valid_pixels_mask = load_product(product, params, data_path, toa_path)
load_time.append(time.time() - start_time)
# Load the true classification mask
mask_true = tiff.imread(data_path + product + '_fixedmask.TIF') # The 30 m is the native resolution
# Get the masks
cls = get_cls('Landsat8', 'Biome_gt', params.cls)
# Create the binary masks
if params.collapse_cls:
mask_true = extract_collapsed_cls(mask_true, cls)
else:
for l, c in enumerate(params.cls):
y = extract_cls_mask(mask_true, cls)
# Save the binary masks as one hot representations
mask_true[:, :, l] = y[:, :, 0]
prediction_time_start = time.time()
predicted_mask, _ = predict_img(model, params, img, n_bands, n_cls, num_gpus)
prediction_time.append(time.time() - prediction_time_start)
# Create a nested dict to save evaluation metrics for each product
evaluation_metrics[product] = {}
threshold_loop_time_start = time.time()
mask_true = np.uint8(mask_true)
# Loop over different threshold values
for j, threshold in enumerate(thresholds):
predicted_binary_mask = np.uint8(predicted_mask >= threshold)
accuracy, omission, comission, pixel_jaccard, precision, recall, f_one_score, tp, tn, fp, fn, npix = calculate_evaluation_criteria(
valid_pixels_mask, predicted_binary_mask, mask_true)
# Create an additional nesting in the dict for each threshold value
evaluation_metrics[product]['threshold_' + str(threshold)] = {}
# Save the values in the dict
evaluation_metrics[product]['threshold_' + str(threshold)]['biome'] = folder # Save biome type too
evaluation_metrics[product]['threshold_' + str(threshold)]['tp'] = tp
evaluation_metrics[product]['threshold_' + str(threshold)]['fp'] = fp
evaluation_metrics[product]['threshold_' + str(threshold)]['fn'] = fn
evaluation_metrics[product]['threshold_' + str(threshold)]['tn'] = tn
evaluation_metrics[product]['threshold_' + str(threshold)]['npix'] = npix
evaluation_metrics[product]['threshold_' + str(threshold)]['accuracy'] = accuracy
evaluation_metrics[product]['threshold_' + str(threshold)]['precision'] = precision
evaluation_metrics[product]['threshold_' + str(threshold)]['recall'] = recall
evaluation_metrics[product]['threshold_' + str(threshold)]['f_one_score'] = f_one_score
evaluation_metrics[product]['threshold_' + str(threshold)]['omission'] = omission
evaluation_metrics[product]['threshold_' + str(threshold)]['comission'] = comission
evaluation_metrics[product]['threshold_' + str(threshold)]['pixel_jaccard'] = pixel_jaccard
for threshold in thresholds:
print("threshold=" + str(threshold) +
": tp=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['tp']) +
": fp=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['fp']) +
": fn=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['fn']) +
": tn=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['tn']) +
": Accuracy=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['accuracy']) +
": precision=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['precision'])+
": recall=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['recall']) +
": omission=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['omission']) +
": comission=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['comission'])+
": pixel_jaccard=" + str(evaluation_metrics[product]['threshold_' + str(threshold)]['pixel_jaccard']))
threshold_loop_time.append(time.time() - threshold_loop_time_start)
evaluating_product_no += 1
# Save images and predictions
save_time_start = time.time()
data_output_path = params.project_path + "data/output/Biome/"
if not os.path.isfile(data_output_path + '%s-photo.png' % product):
img_enhanced_contrast = image_normalizer(img_rgb, params, type='enhance_contrast')
Image.fromarray(np.uint8(img_enhanced_contrast * 255)).save(data_output_path + '%s-photo.png' % product)
Image.open(data_path + product + '_fixedmask.TIF').save(data_output_path + '%s-mask.png' % product)
# Save predicted mask as 16 bit png file (https://github.com/python-pillow/Pillow/issues/2970)
arr = np.uint16(predicted_mask[:, :, 0] * 65535)
array_buffer = arr.tobytes()
img = Image.new("I", arr.T.shape)
img.frombytes(array_buffer, 'raw', "I;16")
if save_output:
img.save(data_output_path + '%s-model%s-prediction.png' % (product, params.modelID))
save_time.append(time.time() - save_time_start)
#Image.fromarray(np.uint16(predicted_mask[:, :, 0] * 65535)).\
# save(data_output_path + '%s-model%s-prediction.png' % (product, params.modelID))
total_time.append(time.time() - start_time)
print("Data loaded in : " + str(load_time[-1]) + "s")
print("Prediction finished in : " + str(prediction_time[-1]) + "s")
print("Threshold loop finished in : " + str(threshold_loop_time[-1]) + "s")
print("Results saved in : " + str(save_time[-1]) + "s")
print("Total time for product finished in : " + str(total_time[-1]) + "s")
# Print timing results
print("Timing analysis for Biome dataset:")
print("Load time: mean val.=" + str(np.mean(load_time)) + ", with std.=" + str(np.std(load_time)))
print("Prediction time: mean val.=" + str(np.mean(prediction_time)) + ", with std.=" + str(np.std(prediction_time)))
print("Threshold loop time: mean val.=" + str(np.mean(threshold_loop_time)) + ", with std.=" + str(np.std(threshold_loop_time)))
print("Save time: mean val.=" + str(np.mean(save_time)) + ", with std.=" + str(np.std(save_time)))
print("Total time: mean val.=" + str(np.mean(total_time)) + ", with std.=" + str(np.std(total_time)))
# The mean jaccard index is not a weighted average of the number of pixels, because the number of pixels in the
# product is dependent on the angle of the product. I.e., if the visible pixels are tilted 45 degrees, there will
# be a lot of NaN pixels. Hence, the number of visible pixels is constant for all products.
# for i, threshold in enumerate(thresholds):
# params.threshold = threshold # Used when writing the csv files
# write_csv_files(np.mean(pixel_jaccard[i, :]), pixel_jaccard[i, :], product_names, params)
if write_csv:
write_csv_files(evaluation_metrics, params)
def calculate_evaluation_criteria(valid_pixels_mask, predicted_binary_mask, true_binary_mask):
# From https://www.kaggle.com/lopuhin/full-pipeline-demo-poly-pixels-ml-poly
# with correction for toggling true/false from
# https://stackoverflow.com/questions/39164786/invert-0-and-1-in-a-binary-array
# Need to AND with the a mask showing where there are pixels to avoid including pixels with value=0
# Count number of actual pixels
npix = valid_pixels_mask.sum()
if | np.ndim(predicted_binary_mask) | numpy.ndim |
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
import numpy as np
import time
from . import lime_base
from ._session_preparation import paddle_get_fc_weights, compute_features_for_kmeans, gen_user_home
from .normlime_base import combine_normlime_and_lime, get_feature_for_kmeans, load_kmeans_model
from paddlex.interpret.as_data_reader.readers import read_image
import paddlex.utils.logging as logging
import cv2
class CAM(object):
def __init__(self, predict_fn, label_names):
"""
Args:
predict_fn: input: images_show [N, H, W, 3], RGB range(0, 255)
output: [
logits [N, num_classes],
feature map before global average pooling [N, num_channels, h_, w_]
]
"""
self.predict_fn = predict_fn
self.label_names = label_names
def preparation_cam(self, data_):
image_show = read_image(data_)
result = self.predict_fn(image_show)
logit = result[0][0]
if abs(np.sum(logit) - 1.0) > 1e-4:
# softmax
logit = logit - np.max(logit)
exp_result = np.exp(logit)
probability = exp_result / np.sum(exp_result)
else:
probability = logit
# only interpret top 1
pred_label = np.argsort(probability)
pred_label = pred_label[-1:]
self.predicted_label = pred_label[0]
self.predicted_probability = probability[pred_label[0]]
self.image = image_show[0]
self.labels = pred_label
fc_weights = paddle_get_fc_weights()
feature_maps = result[1]
l = pred_label[0]
ln = l
if self.label_names is not None:
ln = self.label_names[l]
prob_str = "%.3f" % (probability[pred_label[0]])
logging.info("predicted result: {} with probability {}.".format(
ln, prob_str))
return feature_maps, fc_weights
def interpret(self, data_, visualization=True, save_outdir=None):
feature_maps, fc_weights = self.preparation_cam(data_)
cam = get_cam(self.image, feature_maps, fc_weights,
self.predicted_label)
if visualization or save_outdir is not None:
import matplotlib.pyplot as plt
from skimage.segmentation import mark_boundaries
l = self.labels[0]
ln = l
if self.label_names is not None:
ln = self.label_names[l]
psize = 5
nrows = 1
ncols = 2
plt.close()
f, axes = plt.subplots(
nrows, ncols, figsize=(psize * ncols, psize * nrows))
for ax in axes.ravel():
ax.axis("off")
axes = axes.ravel()
axes[0].imshow(self.image)
prob_str = "{%.3f}" % (self.predicted_probability)
axes[0].set_title("label {}, proba: {}".format(ln, prob_str))
axes[1].imshow(cam)
axes[1].set_title("CAM")
if save_outdir is not None:
save_fig(data_, save_outdir, 'cam')
if visualization:
plt.show()
return
class LIME(object):
def __init__(self,
predict_fn,
label_names,
num_samples=3000,
batch_size=50):
"""
LIME wrapper. See lime_base.py for the detailed LIME implementation.
Args:
predict_fn: from image [N, H, W, 3] to logits [N, num_classes], this is necessary for computing LIME.
num_samples: the number of samples that LIME takes for fitting.
batch_size: batch size for model inference each time.
"""
self.num_samples = num_samples
self.batch_size = batch_size
self.predict_fn = predict_fn
self.labels = None
self.image = None
self.lime_interpreter = None
self.label_names = label_names
def preparation_lime(self, data_):
image_show = read_image(data_)
result = self.predict_fn(image_show)
result = result[0] # only one image here.
if abs(np.sum(result) - 1.0) > 1e-4:
# softmax
result = result - np.max(result)
exp_result = np.exp(result)
probability = exp_result / np.sum(exp_result)
else:
probability = result
# only interpret top 1
pred_label = np.argsort(probability)
pred_label = pred_label[-1:]
self.predicted_label = pred_label[0]
self.predicted_probability = probability[pred_label[0]]
self.image = image_show[0]
self.labels = pred_label
l = pred_label[0]
ln = l
if self.label_names is not None:
ln = self.label_names[l]
prob_str = "%.3f" % (probability[pred_label[0]])
logging.info("predicted result: {} with probability {}.".format(
ln, prob_str))
end = time.time()
algo = lime_base.LimeImageInterpreter()
interpreter = algo.interpret_instance(
self.image,
self.predict_fn,
self.labels,
0,
num_samples=self.num_samples,
batch_size=self.batch_size)
self.lime_interpreter = interpreter
logging.info('lime time: ' + str(time.time() - end) + 's.')
def interpret(self, data_, visualization=True, save_outdir=None):
if self.lime_interpreter is None:
self.preparation_lime(data_)
if visualization or save_outdir is not None:
import matplotlib.pyplot as plt
from skimage.segmentation import mark_boundaries
l = self.labels[0]
ln = l
if self.label_names is not None:
ln = self.label_names[l]
psize = 5
nrows = 2
weights_choices = [0.6, 0.7, 0.75, 0.8, 0.85]
ncols = len(weights_choices)
plt.close()
f, axes = plt.subplots(
nrows, ncols, figsize=(psize * ncols, psize * nrows))
for ax in axes.ravel():
ax.axis("off")
axes = axes.ravel()
axes[0].imshow(self.image)
prob_str = "{%.3f}" % (self.predicted_probability)
axes[0].set_title("label {}, proba: {}".format(ln, prob_str))
axes[1].imshow(
mark_boundaries(self.image, self.lime_interpreter.segments))
axes[1].set_title("superpixel segmentation")
# LIME visualization
for i, w in enumerate(weights_choices):
num_to_show = auto_choose_num_features_to_show(
self.lime_interpreter, l, w)
temp, mask = self.lime_interpreter.get_image_and_mask(
l,
positive_only=True,
hide_rest=False,
num_features=num_to_show)
axes[ncols + i].imshow(mark_boundaries(temp, mask))
axes[ncols + i].set_title(
"label {}, first {} superpixels".format(ln, num_to_show))
if save_outdir is not None:
save_fig(data_, save_outdir, 'lime', self.num_samples)
if visualization:
plt.show()
return
class NormLIMEStandard(object):
def __init__(self,
predict_fn,
label_names,
num_samples=3000,
batch_size=50,
kmeans_model_for_normlime=None,
normlime_weights=None):
root_path = gen_user_home()
root_path = osp.join(root_path, '.paddlex')
h_pre_models = osp.join(root_path, "pre_models")
if not osp.exists(h_pre_models):
if not osp.exists(root_path):
os.makedirs(root_path)
url = "https://bj.bcebos.com/paddlex/interpret/pre_models.tar.gz"
pdx.utils.download_and_decompress(url, path=root_path)
h_pre_models_kmeans = osp.join(h_pre_models, "kmeans_model.pkl")
if kmeans_model_for_normlime is None:
try:
self.kmeans_model = load_kmeans_model(h_pre_models_kmeans)
except:
raise ValueError(
"NormLIME needs the KMeans model, where we provided a default one in "
"pre_models/kmeans_model.pkl.")
else:
logging.debug("Warning: It is *strongly* suggested to use the \
default KMeans model in pre_models/kmeans_model.pkl. \
Use another one will change the final result.")
self.kmeans_model = load_kmeans_model(kmeans_model_for_normlime)
self.num_samples = num_samples
self.batch_size = batch_size
try:
self.normlime_weights = np.load(
normlime_weights, allow_pickle=True).item()
except:
self.normlime_weights = None
logging.debug(
"Warning: not find the correct precomputed Normlime result.")
self.predict_fn = predict_fn
self.labels = None
self.image = None
self.label_names = label_names
def predict_cluster_labels(self, feature_map, segments):
X = get_feature_for_kmeans(feature_map, segments)
try:
cluster_labels = self.kmeans_model.predict(X)
except AttributeError:
from sklearn.metrics import pairwise_distances_argmin_min
cluster_labels, _ = pairwise_distances_argmin_min(
X, self.kmeans_model.cluster_centers_)
return cluster_labels
def predict_using_normlime_weights(self, pred_labels,
predicted_cluster_labels):
# global weights
g_weights = {y: [] for y in pred_labels}
for y in pred_labels:
cluster_weights_y = self.normlime_weights.get(y, {})
g_weights[y] = [(i, cluster_weights_y.get(k, 0.0))
for i, k in enumerate(predicted_cluster_labels)]
g_weights[y] = sorted(
g_weights[y], key=lambda x: np.abs(x[1]), reverse=True)
return g_weights
def preparation_normlime(self, data_):
self._lime = LIME(self.predict_fn, self.label_names, self.num_samples,
self.batch_size)
self._lime.preparation_lime(data_)
image_show = read_image(data_)
self.predicted_label = self._lime.predicted_label
self.predicted_probability = self._lime.predicted_probability
self.image = image_show[0]
self.labels = self._lime.labels
logging.info('performing NormLIME operations ...')
cluster_labels = self.predict_cluster_labels(
compute_features_for_kmeans(image_show).transpose((1, 2, 0)),
self._lime.lime_interpreter.segments)
g_weights = self.predict_using_normlime_weights(self.labels,
cluster_labels)
return g_weights
def interpret(self, data_, visualization=True, save_outdir=None):
if self.normlime_weights is None:
raise ValueError(
"Not find the correct precomputed NormLIME result. \n"
"\t Try to call compute_normlime_weights() first or load the correct path."
)
g_weights = self.preparation_normlime(data_)
lime_weights = self._lime.lime_interpreter.local_weights
if visualization or save_outdir is not None:
import matplotlib.pyplot as plt
from skimage.segmentation import mark_boundaries
l = self.labels[0]
ln = l
if self.label_names is not None:
ln = self.label_names[l]
psize = 5
nrows = 4
weights_choices = [0.6, 0.7, 0.75, 0.8, 0.85]
nums_to_show = []
ncols = len(weights_choices)
plt.close()
f, axes = plt.subplots(
nrows, ncols, figsize=(psize * ncols, psize * nrows))
for ax in axes.ravel():
ax.axis("off")
axes = axes.ravel()
axes[0].imshow(self.image)
prob_str = "{%.3f}" % (self.predicted_probability)
axes[0].set_title("label {}, proba: {}".format(ln, prob_str))
axes[1].imshow(
mark_boundaries(self.image,
self._lime.lime_interpreter.segments))
axes[1].set_title("superpixel segmentation")
# LIME visualization
for i, w in enumerate(weights_choices):
num_to_show = auto_choose_num_features_to_show(
self._lime.lime_interpreter, l, w)
nums_to_show.append(num_to_show)
temp, mask = self._lime.lime_interpreter.get_image_and_mask(
l,
positive_only=False,
hide_rest=False,
num_features=num_to_show)
axes[ncols + i].imshow(mark_boundaries(temp, mask))
axes[ncols + i].set_title("LIME: first {} superpixels".format(
num_to_show))
# NormLIME visualization
self._lime.lime_interpreter.local_weights = g_weights
for i, num_to_show in enumerate(nums_to_show):
temp, mask = self._lime.lime_interpreter.get_image_and_mask(
l,
positive_only=False,
hide_rest=False,
num_features=num_to_show)
axes[ncols * 2 + i].imshow(mark_boundaries(temp, mask))
axes[ncols * 2 + i].set_title(
"NormLIME: first {} superpixels".format(num_to_show))
# NormLIME*LIME visualization
combined_weights = combine_normlime_and_lime(lime_weights,
g_weights)
self._lime.lime_interpreter.local_weights = combined_weights
for i, num_to_show in enumerate(nums_to_show):
temp, mask = self._lime.lime_interpreter.get_image_and_mask(
l,
positive_only=False,
hide_rest=False,
num_features=num_to_show)
axes[ncols * 3 + i].imshow(mark_boundaries(temp, mask))
axes[ncols * 3 + i].set_title(
"Combined: first {} superpixels".format(num_to_show))
self._lime.lime_interpreter.local_weights = lime_weights
if save_outdir is not None:
save_fig(data_, save_outdir, 'normlime', self.num_samples)
if visualization:
plt.show()
class NormLIME(object):
def __init__(self,
predict_fn,
label_names,
num_samples=3000,
batch_size=50,
kmeans_model_for_normlime=None,
normlime_weights=None):
root_path = gen_user_home()
root_path = osp.join(root_path, '.paddlex')
h_pre_models = osp.join(root_path, "pre_models")
if not osp.exists(h_pre_models):
if not osp.exists(root_path):
os.makedirs(root_path)
url = "https://bj.bcebos.com/paddlex/interpret/pre_models.tar.gz"
pdx.utils.download_and_decompress(url, path=root_path)
h_pre_models_kmeans = osp.join(h_pre_models, "kmeans_model.pkl")
if kmeans_model_for_normlime is None:
try:
self.kmeans_model = load_kmeans_model(h_pre_models_kmeans)
except:
raise ValueError(
"NormLIME needs the KMeans model, where we provided a default one in "
"pre_models/kmeans_model.pkl.")
else:
logging.debug("Warning: It is *strongly* suggested to use the \
default KMeans model in pre_models/kmeans_model.pkl. \
Use another one will change the final result.")
self.kmeans_model = load_kmeans_model(kmeans_model_for_normlime)
self.num_samples = num_samples
self.batch_size = batch_size
try:
self.normlime_weights = np.load(
normlime_weights, allow_pickle=True).item()
except:
self.normlime_weights = None
logging.debug(
"Warning: not find the correct precomputed Normlime result.")
self.predict_fn = predict_fn
self.labels = None
self.image = None
self.label_names = label_names
def predict_cluster_labels(self, feature_map, segments):
X = get_feature_for_kmeans(feature_map, segments)
try:
cluster_labels = self.kmeans_model.predict(X)
except AttributeError:
from sklearn.metrics import pairwise_distances_argmin_min
cluster_labels, _ = pairwise_distances_argmin_min(
X, self.kmeans_model.cluster_centers_)
return cluster_labels
def predict_using_normlime_weights(self, pred_labels,
predicted_cluster_labels):
# global weights
g_weights = {y: [] for y in pred_labels}
for y in pred_labels:
cluster_weights_y = self.normlime_weights.get(y, {})
g_weights[y] = [(i, cluster_weights_y.get(k, 0.0))
for i, k in enumerate(predicted_cluster_labels)]
g_weights[y] = sorted(
g_weights[y], key=lambda x: np.abs(x[1]), reverse=True)
return g_weights
def preparation_normlime(self, data_):
self._lime = LIME(self.predict_fn, self.label_names, self.num_samples,
self.batch_size)
self._lime.preparation_lime(data_)
image_show = read_image(data_)
self.predicted_label = self._lime.predicted_label
self.predicted_probability = self._lime.predicted_probability
self.image = image_show[0]
self.labels = self._lime.labels
logging.info('performing NormLIME operations ...')
cluster_labels = self.predict_cluster_labels(
compute_features_for_kmeans(image_show).transpose((1, 2, 0)),
self._lime.lime_interpreter.segments)
g_weights = self.predict_using_normlime_weights(self.labels,
cluster_labels)
return g_weights
def interpret(self, data_, visualization=True, save_outdir=None):
if self.normlime_weights is None:
raise ValueError(
"Not find the correct precomputed NormLIME result. \n"
"\t Try to call compute_normlime_weights() first or load the correct path."
)
g_weights = self.preparation_normlime(data_)
lime_weights = self._lime.lime_interpreter.local_weights
if visualization or save_outdir is not None:
import matplotlib.pyplot as plt
from skimage.segmentation import mark_boundaries
l = self.labels[0]
ln = l
if self.label_names is not None:
ln = self.label_names[l]
psize = 5
nrows = 4
weights_choices = [0.6, 0.7, 0.75, 0.8, 0.85]
nums_to_show = []
ncols = len(weights_choices)
plt.close()
f, axes = plt.subplots(
nrows, ncols, figsize=(psize * ncols, psize * nrows))
for ax in axes.ravel():
ax.axis("off")
axes = axes.ravel()
axes[0].imshow(self.image)
prob_str = "{%.3f}" % (self.predicted_probability)
axes[0].set_title("label {}, proba: {}".format(ln, prob_str))
axes[1].imshow(
mark_boundaries(self.image,
self._lime.lime_interpreter.segments))
axes[1].set_title("superpixel segmentation")
# LIME visualization
for i, w in enumerate(weights_choices):
num_to_show = auto_choose_num_features_to_show(
self._lime.lime_interpreter, l, w)
nums_to_show.append(num_to_show)
temp, mask = self._lime.lime_interpreter.get_image_and_mask(
l,
positive_only=True,
hide_rest=False,
num_features=num_to_show)
axes[ncols + i].imshow(mark_boundaries(temp, mask))
axes[ncols + i].set_title("LIME: first {} superpixels".format(
num_to_show))
# NormLIME visualization
self._lime.lime_interpreter.local_weights = g_weights
for i, num_to_show in enumerate(nums_to_show):
temp, mask = self._lime.lime_interpreter.get_image_and_mask(
l,
positive_only=True,
hide_rest=False,
num_features=num_to_show)
axes[ncols * 2 + i].imshow(mark_boundaries(temp, mask))
axes[ncols * 2 + i].set_title(
"NormLIME: first {} superpixels".format(num_to_show))
# NormLIME*LIME visualization
combined_weights = combine_normlime_and_lime(lime_weights,
g_weights)
self._lime.lime_interpreter.local_weights = combined_weights
for i, num_to_show in enumerate(nums_to_show):
temp, mask = self._lime.lime_interpreter.get_image_and_mask(
l,
positive_only=True,
hide_rest=False,
num_features=num_to_show)
axes[ncols * 3 + i].imshow(mark_boundaries(temp, mask))
axes[ncols * 3 + i].set_title(
"Combined: first {} superpixels".format(num_to_show))
self._lime.lime_interpreter.local_weights = lime_weights
if save_outdir is not None:
save_fig(data_, save_outdir, 'normlime', self.num_samples)
if visualization:
plt.show()
def auto_choose_num_features_to_show(lime_interpreter, label,
percentage_to_show):
segments = lime_interpreter.segments
lime_weights = lime_interpreter.local_weights[label]
num_pixels_threshold_in_a_sp = segments.shape[0] * segments.shape[
1] // len(np.unique(segments)) // 8
# l1 norm with filtered weights.
used_weights = [(tuple_w[0], tuple_w[1])
for i, tuple_w in enumerate(lime_weights)
if tuple_w[1] > 0]
norm = np.sum([tuple_w[1] for i, tuple_w in enumerate(used_weights)])
normalized_weights = [(tuple_w[0], tuple_w[1] / norm)
for i, tuple_w in enumerate(lime_weights)]
a = 0.0
n = 0
for i, tuple_w in enumerate(normalized_weights):
if tuple_w[1] < 0:
continue
if len(np.where(segments == tuple_w[0])[
0]) < num_pixels_threshold_in_a_sp:
continue
a += tuple_w[1]
if a > percentage_to_show:
n = i + 1
break
if percentage_to_show <= 0.0:
return 5
if n == 0:
return auto_choose_num_features_to_show(lime_interpreter, label,
percentage_to_show - 0.1)
return n
def get_cam(image_show,
feature_maps,
fc_weights,
label_index,
cam_min=None,
cam_max=None):
_, nc, h, w = feature_maps.shape
cam = feature_maps * fc_weights[:, label_index].reshape(1, nc, 1, 1)
cam = cam.sum((0, 1))
if cam_min is None:
cam_min = np.min(cam)
if cam_max is None:
cam_max = np.max(cam)
cam = cam - cam_min
cam = cam / cam_max
cam = np.uint8(255 * cam)
cam_img = cv2.resize(
cam, image_show.shape[0:2], interpolation=cv2.INTER_LINEAR)
heatmap = cv2.applyColorMap(np.uint8(255 * cam_img), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap)
cam = heatmap + np.float32(image_show)
cam = cam / | np.max(cam) | numpy.max |
import numpy as np
import sys
import cv2
import time
import copy
import os
import traceback
import ffmpeg
import subprocess as sp
import os.path as path
from pprint import pprint
from concurrent.futures import ThreadPoolExecutor
from PIL import ImageColor
np.set_printoptions(threshold=np.inf)
def is_point_in_box(point, box, camera_position):
return ((point[0] >= box[0][0] - camera_position[0]) and
(point[0] < box[1][0] - camera_position[0]) and
(point[1] >= box[0][1] - camera_position[1]) and
(point[1] < box[1][1] - camera_position[1]))
def get_points(frame, feature_detector, feature_sparsity, negate_boxes, camera_position):
kp = feature_detector.detect(frame, None)
if len(kp) == 0:
return []
pt = np.array([kp[i].pt for i in range(len(kp))])
pt_key = np.sum(
pt // feature_sparsity * np.array([frame.shape[0] // feature_sparsity, 1]),
axis=-1)
p0 = []
p0_buckets = dict()
for i in range(len(pt)):
p0_key = pt_key[i]
if p0_key not in p0_buckets:
p0_buckets[p0_key] = True
flag = True
for box in negate_boxes:
if is_point_in_box(pt[i], box, camera_position):
flag = False
break
if flag:
p0.append(pt[i])
p0 = np.float32(p0).reshape(-1, 1, 2) # convert to numpy
return p0
def preproc_frame(frame, proc_xres, proc_yres, pad_x, pad_y, pad_color):
colored = frame #[:585, :1040]
colored = cv2.resize(
colored, (proc_xres, proc_yres), interpolation = cv2.INTER_NEAREST)
colored = np.pad(
colored, ((pad_y, pad_y), (pad_x, pad_x), (0, 0)))
gray = cv2.cvtColor(colored, cv2.COLOR_BGR2GRAY) # make grayscale frame
pad_bgr = ImageColor.getcolor(pad_color, "RGB")[::-1]
colored[:,:pad_x,:] = pad_bgr
colored[:,-pad_x:,:] = pad_bgr
colored[:pad_y,:,:] = pad_bgr
colored[-pad_y:,:,:] = pad_bgr
return colored, gray
def read_logic(reader):
try:
frame = reader.read()
return frame
except Exception as e:
traceback.print_exc()
def write_logic(writer, frame_w, tracker, frame_track):
try:
tracker.write(frame_track)
return writer.write(frame_w)
except Exception as e:
traceback.print_exc()
def core_logic(frame_r,
frame_ctr,
out_xres,
out_yres,
proc_xres,
proc_yres,
pad_x,
pad_y,
pad_color,
feature_detector,
feature_sparsity,
negate_boxes,
draw_features,
camera_position,
reference_gray,
reference_p0):
try:
current_colored, current_gray = preproc_frame(
frame_r, proc_xres, proc_yres, pad_x, pad_y, pad_color)
current_gray = np.roll(
current_gray, (-int(camera_position[1]), -int(camera_position[0])), axis=(0,1))
if frame_ctr == 0 or len(reference_p0) == 0:
frame_w = current_colored
frame_w = np.roll(current_colored, (-int(camera_position[1]), -int(camera_position[0])), axis=(0,1))
if draw_features:
for box in negate_boxes:
frame_w = cv2.rectangle(
frame_w,
(box[0][0] - camera_position[0], box[0][1] - camera_position[1]),
(box[1][0] - camera_position[0], box[1][1] - camera_position[1]),
(0, 0, 255), 5)
else:
buckets = dict()
p1, st, err = cv2.calcOpticalFlowPyrLK(
reference_gray, current_gray, reference_p0, None)
diff = (p1[:,0,:] - reference_p0[:,0,:]) #* 1080 / proc_yres
for i in range(len(diff)):
key_x = int(round(diff[i][0]))
key_y = int(round(diff[i][1]))
key = str(key_x) + "," + str(key_y)
if key not in buckets:
buckets[key] = []
buckets[key].append(p1[i][0])
if len(buckets) > 0:
argmax = max(buckets, key=lambda key: len(buckets[key])) # choose bucket with most elements
#argmax *= proc_yres / 1080
current_stage_points = list(buckets[argmax])
current_non_stage_points = []
for bucket in buckets:
if bucket != argmax:
current_non_stage_points += list(buckets[bucket])
argmax = argmax.split(",")
adjustment = [int(argmax[0]), int(argmax[1])]
else:
current_stage_points = []
current_non_stage_points = []
adjustment = [0, 0]
current_gray = np.roll(current_gray, (-adjustment[1], -adjustment[0]), axis=(0,1))
# draw all the FAST features for debugging
camera_position[0] += adjustment[0]
camera_position[1] += adjustment[1]
frame_w = np.roll(current_colored, (-int(camera_position[1]), -int(camera_position[0])), axis=(0,1))
if draw_features:
for i in range(len(current_stage_points)):
center = (int(current_stage_points[i][0]), int(current_stage_points[i][1]))
frame_w = cv2.circle(frame_w, center, 4, (255,255,0), -1)
for i in range(len(current_non_stage_points)):
center = (int(current_non_stage_points[i][0]), int(current_non_stage_points[i][1]))
frame_w = cv2.circle(frame_w, center, 4, (0,255,0), -1)
for box in negate_boxes:
frame_w = cv2.rectangle(
frame_w,
(box[0][0] - camera_position[0], box[0][1] - camera_position[1]),
(box[1][0] - camera_position[0], box[1][1] - camera_position[1]),
(0, 0, 255), 5)
reference_gray = current_gray
reference_p0 = get_points(
reference_gray, feature_detector, feature_sparsity, negate_boxes, camera_position)
frame_w = cv2.resize(frame_w, (out_xres, out_yres), interpolation = cv2.INTER_NEAREST)
frame_track = | np.zeros((proc_yres, proc_xres, 3), dtype=np.uint8) | numpy.zeros |
import copy
import numpy as np
import open3d as o3d
from tqdm import tqdm
from scipy import stats
import utils_o3d as utils
def remove_ground_plane(pcd, z_thresh=-2.7):
cropped = copy.deepcopy(pcd)
cropped_points = np.array(cropped.points)
cropped_points = cropped_points[cropped_points[:, -1] > z_thresh]
pcd_final = o3d.geometry.PointCloud()
pcd_final.points = o3d.utility.Vector3dVector(cropped_points)
return pcd_final
def remove_y_plane(pcd, y_thresh=5):
cropped = copy.deepcopy(pcd)
cropped_points = np.array(cropped.points)
cropped_points = cropped_points[cropped_points[:, 0] < y_thresh]
cropped_points[:, -1] = -cropped_points[:, -1]
pcd_final = o3d.geometry.PointCloud()
pcd_final.points = o3d.utility.Vector3dVector(cropped_points)
return pcd_final
def compute_features(pcd, voxel_size, normals_nn=100, features_nn=120, downsample=True):
normals_radius = voxel_size * 2
features_radius = voxel_size * 4
# Downsample the point cloud using Voxel grids
if downsample:
print(':: Input size:', np.array(pcd.points).shape)
pcd_down = utils.downsample_point_cloud(pcd, voxel_size)
print(':: Downsample with a voxel size %.3f' % voxel_size)
print(':: Downsample size', np.array(pcd_down.points).shape)
else: pcd_down = copy.deepcopy(pcd)
# Estimate normals
print(':: Estimate normal with search radius %.3f' % normals_radius)
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn))
# Compute FPFH features
print(':: Compute FPFH feature with search radius %.3f' % features_radius)
features = o3d.registration.compute_fpfh_feature(pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn))
return pcd_down, features
def match_features(pcd0, pcd1, feature0, feature1, thresh=None, display=False):
pcd0, pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1)
print(':: Input size 0:', np.array(pcd0.points).shape)
print(':: Input size 1:', np.array(pcd1.points).shape)
print(':: Features size 0:', np.array(feature0.data).shape)
print(':: Features size 1:', np.array(feature1.data).shape)
utils.paint_uniform_color(pcd0, color=[1, 0.706, 0])
utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929])
scores, indices = [], []
fpfh_tree = o3d.geometry.KDTreeFlann(feature1)
for i in tqdm(range(len(pcd0.points)), desc=':: Feature Matching'):
[_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1)
scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]]))
indices.append([i, idx[0]])
scores, indices = np.array(scores), np.array(indices)
median = np.median(scores)
if thresh is None: thresh = median
inliers_idx = np.where(scores <= thresh)[0]
pcd0_idx = indices[inliers_idx, 0]
pcd1_idx = indices[inliers_idx, 1]
print(':: Score stats: Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d' % (
| np.min(scores) | numpy.min |
# pylint: disable=R0201
import os
import numpy as np
import pytest
from PartSegImage import Image, ImageWriter, TiffImageReader
from PartSegImage.image import FRAME_THICKNESS
class TestImageBase:
image_class = Image
def needed_shape(self, shape, axes: str, drop: str):
new_axes = self.image_class.axis_order
for el in drop:
new_axes = new_axes.replace(el, "")
res_shape = [1] * len(new_axes)
for size, name in zip(shape, axes):
res_shape[new_axes.index(name)] = size
return tuple(res_shape)
def image_shape(self, shape, axes):
return self.needed_shape(shape, axes, "")
def mask_shape(self, shape, axes):
return self.needed_shape(shape, axes, "C")
def reorder_axes_letter(self, letters: str):
res = "".join(x for x in self.image_class.axis_order if x in letters)
assert len(res) == len(letters)
return res
def prepare_mask_shape(self, shape):
base_axes = set("TZYX")
refer_axes = self.image_class.axis_order.replace("C", "")
i, j = 0, 0
new_shape = [1] * len(refer_axes)
for i, val in enumerate(refer_axes):
if val in base_axes:
new_shape[i] = shape[j]
j += 1
return new_shape
def prepare_image_initial_shape(self, shape, channel):
new_shape = self.prepare_mask_shape(shape)
new_shape.insert(self.image_class.axis_order.index("C"), channel)
return new_shape
def test_fit_mask_simple(self):
initial_shape = self.prepare_image_initial_shape([1, 10, 20, 20], 1)
data = np.zeros(initial_shape, np.uint8)
image = self.image_class(data, (1, 1, 1), "")
mask = np.zeros((1, 10, 20, 20), np.uint8)
mask[0, 2:-2, 4:-4, 4:-4] = 5
image.fit_mask_to_image(mask)
def test_fit_mask_mapping_val(self):
initial_shape = self.prepare_image_initial_shape([1, 10, 20, 20], 1)
data = np.zeros(initial_shape, np.uint8)
image = self.image_class(data, (1, 1, 1), "")
mask = np.zeros((1, 10, 20, 20), np.uint16)
mask[0, 2:-2, 4:-4, 4:10] = 5
mask[0, 2:-2, 4:-4, 11:-4] = 7
mask2 = image.fit_mask_to_image(mask)
assert np.all(np.unique(mask2) == [0, 1, 2])
assert np.all(np.unique(mask) == [0, 5, 7])
map_arr = np.array([0, 0, 0, 0, 0, 1, 0, 2])
assert np.all(map_arr[mask] == mask2)
assert mask2.dtype == np.uint8
def test_fit_mask_to_image_change_type(self):
initial_shape = self.prepare_image_initial_shape([1, 30, 50, 50], 1)
data = np.zeros(initial_shape, np.uint8)
image = self.image_class(data, (1, 1, 1), "")
mask_base = np.zeros(30 * 50 * 50, dtype=np.uint32)
mask_base[:50] = np.arange(50, dtype=np.uint32)
image.set_mask( | np.reshape(mask_base, (1, 30, 50, 50)) | numpy.reshape |
import os
import math
from functools import partial
import argparse
import numpy as np
import cv2
def transform(img, factor, group="euclid"):
"""Apply perspective transform with selected homography"""
height, width, _ = img.shape
if group=="euclid" or group=="e":
angle = math.pi*2/100. * factor
sin_a = math.sin(angle)
cos_a = math.cos(angle)
homography = np.array([
[cos_a, -sin_a, 0],
[sin_a, cos_a, 0],
[0, 0, 1]
])
elif group=="similarity" or group=="s":
angle = math.pi*2/100. * factor
sin_a = math.sin(angle)
cos_a = math.cos(angle)
s = 1 + factor/100.
homography = np.array([
[s * cos_a, -s*sin_a, 0],
[s*sin_a, s*cos_a, 0],
[0, 0, 1]
])
elif group=="affine" or group=="a":
homography = np.array([
[1 + factor/50., 0 + factor/20., 0],
[0, 1 + factor/20., 0],
[0, 0, 1]
])
elif group=="projective" or group=="p":
homography = np.array([
[1, 0, 0],
[0, 1, 0],
[factor**2/100000., factor**2/100000., 1]
])
# Target size of the image
dst_size = (height*3//2, width*3//2)
# Add a translation to the homography so the transformed image stays in center
image_center = np.array([width/2, height/2, 1]).T
warped_center = | np.matmul(homography, image_center) | numpy.matmul |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : <NAME>
# E-mail : <EMAIL>
# Description:
# Date : 05/08/2018 6:04 PM
# File Name : kinect2grasp_python2.py
# Note: this file is inspired by PyntCloud
# Reference web: https://github.com/daavoo/pyntcloud
import numpy as np
from scipy.spatial import cKDTree
from numba import jit
is_numba_avaliable = True
@jit
def groupby_count(xyz, indices, out):
for i in range(xyz.shape[0]):
out[indices[i]] += 1
return out
@jit
def groupby_sum(xyz, indices, N, out):
for i in range(xyz.shape[0]):
out[indices[i]] += xyz[i][N]
return out
@jit
def groupby_max(xyz, indices, N, out):
for i in range(xyz.shape[0]):
if xyz[i][N] > out[indices[i]]:
out[indices[i]] = xyz[i][N]
return out
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
class VoxelGrid:
def __init__(self, points, n_x=1, n_y=1, n_z=1, size_x=None, size_y=None, size_z=None, regular_bounding_box=True):
"""Grid of voxels with support for different build methods.
Parameters
----------
points: (N, 3) numpy.array
n_x, n_y, n_z : int, optional
Default: 1
The number of segments in which each axis will be divided.
Ignored if corresponding size_x, size_y or size_z is not None.
size_x, size_y, size_z : float, optional
Default: None
The desired voxel size along each axis.
If not None, the corresponding n_x, n_y or n_z will be ignored.
regular_bounding_box : bool, optional
Default: True
If True, the bounding box of the point cloud will be adjusted
in order to have all the dimensions of equal length.
"""
self._points = points
self.x_y_z = [n_x, n_y, n_z]
self.sizes = [size_x, size_y, size_z]
self.regular_bounding_box = regular_bounding_box
def compute(self):
xyzmin = self._points.min(0)
xyzmax = self._points.max(0)
if self.regular_bounding_box:
#: adjust to obtain a minimum bounding box with all sides of equal length
margin = max(xyzmax - xyzmin) - (xyzmax - xyzmin)
xyzmin = xyzmin - margin / 2
xyzmax = xyzmax + margin / 2
for n, size in enumerate(self.sizes):
if size is None:
continue
margin = (((self._points.ptp(0)[n] // size) + 1) * size) - self._points.ptp(0)[n]
xyzmin[n] -= margin / 2
xyzmax[n] += margin / 2
self.x_y_z[n] = ((xyzmax[n] - xyzmin[n]) / size).astype(int)
self.xyzmin = xyzmin
self.xyzmax = xyzmax
segments = []
shape = []
for i in range(3):
# note the +1 in num
s, step = np.linspace(xyzmin[i], xyzmax[i], num=(self.x_y_z[i] + 1), retstep=True)
segments.append(s)
shape.append(step)
self.segments = segments
self.shape = shape
self.n_voxels = self.x_y_z[0] * self.x_y_z[1] * self.x_y_z[2]
self.id = "V({},{},{})".format(self.x_y_z, self.sizes, self.regular_bounding_box)
# find where each point lies in corresponding segmented axis
# -1 so index are 0-based; clip for edge cases
self.voxel_x = np.clip(np.searchsorted(self.segments[0], self._points[:, 0]) - 1, 0, self.x_y_z[0])
self.voxel_y = np.clip(np.searchsorted(self.segments[1], self._points[:, 1]) - 1, 0, self.x_y_z[1])
self.voxel_z = np.clip(np.searchsorted(self.segments[2], self._points[:, 2]) - 1, 0, self.x_y_z[2])
self.voxel_n = np.ravel_multi_index([self.voxel_x, self.voxel_y, self.voxel_z], self.x_y_z)
# compute center of each voxel
midsegments = [(self.segments[i][1:] + self.segments[i][:-1]) / 2 for i in range(3)]
self.voxel_centers = cartesian(midsegments).astype(np.float32)
def query(self, points):
"""ABC API. Query structure.
TODO Make query_voxelgrid an independent function, and add a light
save mode where only segments and x_y_z are saved.
"""
voxel_x = np.clip(np.searchsorted(
self.segments[0], points[:, 0]) - 1, 0, self.x_y_z[0])
voxel_y = np.clip(np.searchsorted(
self.segments[1], points[:, 1]) - 1, 0, self.x_y_z[1])
voxel_z = np.clip(np.searchsorted(
self.segments[2], points[:, 2]) - 1, 0, self.x_y_z[2])
voxel_n = np.ravel_multi_index([voxel_x, voxel_y, voxel_z], self.x_y_z)
return voxel_n
def get_feature_vector(self, mode="binary"):
"""Return a vector of size self.n_voxels. See mode options below.
Parameters
----------
mode: str in available modes. See Notes
Default "binary"
Returns
-------
feature_vector: [n_x, n_y, n_z] ndarray
See Notes.
Notes
-----
Available modes are:
binary
0 for empty voxels, 1 for occupied.
density
number of points inside voxel / total number of points.
TDF
Truncated Distance Function. Value between 0 and 1 indicating the distance
between the voxel's center and the closest point. 1 on the surface,
0 on voxels further than 2 * voxel side.
x_max, y_max, z_max
Maximum coordinate value of points inside each voxel.
x_mean, y_mean, z_mean
Mean coordinate value of points inside each voxel.
"""
vector = np.zeros(self.n_voxels)
if mode == "binary":
vector[np.unique(self.voxel_n)] = 1
elif mode == "density":
count = np.bincount(self.voxel_n)
vector[:len(count)] = count
vector /= len(self.voxel_n)
elif mode == "TDF":
# truncation = np.linalg.norm(self.shape)
kdt = cKDTree(self._points)
vector, i = kdt.query(self.voxel_centers, n_jobs=-1)
elif mode.endswith("_max"):
if not is_numba_avaliable:
raise ImportError("numba is required to compute {}".format(mode))
axis = {"x_max": 0, "y_max": 1, "z_max": 2}
vector = groupby_max(self._points, self.voxel_n, axis[mode], vector)
elif mode.endswith("_mean"):
if not is_numba_avaliable:
raise ImportError("numba is required to compute {}".format(mode))
axis = {"x_mean": 0, "y_mean": 1, "z_mean": 2}
voxel_sum = groupby_sum(self._points, self.voxel_n, axis[mode], np.zeros(self.n_voxels))
voxel_count = groupby_count(self._points, self.voxel_n, np.zeros(self.n_voxels))
vector = | np.nan_to_num(voxel_sum / voxel_count) | numpy.nan_to_num |
import os
import json
import matplotlib.pyplot as plt
import numpy as np
file = os.path.join("..", "InvolutionGAN", "logs", "mnist_igan_5.0",'loss.log')
with open(file, 'r') as f:
s = f.readline()
s = s.strip()
log = json.loads(s)
# print(len(log['lossG']))
# print(len(log['lossD']))
lossg,=plt.plot( | np.array(log['lossg']) | numpy.array |
import numpy as np
import pytest
from dnnv.nn.graph import OperationGraph
from dnnv.nn import operations
from dnnv.properties.expressions import Network
from dnnv.verifiers.common.reductions.iopolytope import *
from dnnv.verifiers.common.reductions.iopolytope import Variable
def setup_function():
Variable._count = 0
def test_init_merge():
input_op_0 = operations.Input((1, 5), np.dtype(np.float32))
add_op_0 = operations.Add(input_op_0, 1)
op_graph_0 = OperationGraph([add_op_0])
N0 = Network("N0").concretize(op_graph_0)
input_op_1 = operations.Input((1, 5), np.dtype(np.float32))
sub_op_1 = operations.Sub(input_op_1, 1)
op_graph_1 = OperationGraph([sub_op_1])
N1 = Network("N1").concretize(op_graph_1)
input_constraint = HalfspacePolytope()
output_constraint = HalfspacePolytope()
prop = IOPolytopeProperty([N0, N1], input_constraint, output_constraint)
assert len(prop.op_graph.output_operations) == 2
assert isinstance(prop.op_graph.output_operations[0], operations.Add)
assert isinstance(prop.op_graph.output_operations[1], operations.Sub)
assert len(prop.op_graph.input_details) == 1
def test_str():
input_op = operations.Input((1, 5), np.dtype(np.float32))
add_op = operations.Add(input_op, 1)
op_graph = OperationGraph([add_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 5))
input_constraint = HalfspacePolytope(vi)
input_constraint.update_constraint([vi], np.array([(0, 1)]), np.array([1.0]), 5.0)
vo = Variable((1, 5))
output_constraint = HalfspacePolytope(vo)
output_constraint.update_constraint([vo], np.array([(0, 0)]), np.array([2.0]), 12.0)
prop = IOPolytopeProperty([N], input_constraint, output_constraint)
assert str(prop) == (
"Property:\n"
" Networks:\n"
" [Network('N')]\n"
" Input Constraint:\n"
" 1.0 * x_0[(0, 1)] <= 5.0\n"
" Output Constraint:\n"
" 2.0 * x_1[(0, 0)] <= 12.0"
)
def test_validate_counter_example_true():
input_op = operations.Input((1, 2), np.dtype(np.float32))
matmul_op = operations.MatMul(input_op, np.array([[1.0], [1.0]], dtype=np.float32))
add_op = operations.Add(matmul_op, 1)
op_graph = OperationGraph([add_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 2))
input_constraint = HalfspacePolytope(vi)
variables = [vi, vi]
indices = np.array([(0, 0), (0, 1)])
coeffs = np.array([1.0, 1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, -1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([1.0, -1.0])
b = np.array(4)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, 1.0])
b = np.array(11)
input_constraint.update_constraint(variables, indices, coeffs, b)
vo = Variable((1, 1))
output_constraint = HalfspacePolytope(vo)
variables = [vo]
indices = np.array([(0, 0)])
coeffs = np.array([1.0])
b = np.array(2)
output_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
prop = IOPolytopeProperty([N], input_constraint, output_constraint)
x = np.array([[0.0, 0.0]]).astype(np.float32)
assert prop.validate_counter_example(x)[0]
x = np.array([[0.5, 0.5]]).astype(np.float32)
assert prop.validate_counter_example(x)[0]
x = np.array([[-1.0, 0.0]]).astype(np.float32)
assert prop.validate_counter_example(x)[0]
def test_validate_counter_example_false():
input_op = operations.Input((1, 2), np.dtype(np.float32))
matmul_op = operations.MatMul(input_op, np.array([[1.0], [1.0]], dtype=np.float32))
add_op = operations.Add(matmul_op, 1)
op_graph = OperationGraph([add_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 2))
input_constraint = HalfspacePolytope(vi)
variables = [vi, vi]
indices = np.array([(0, 0), (0, 1)])
coeffs = np.array([1.0, 1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, -1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([1.0, -1.0])
b = np.array(4)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, 1.0])
b = np.array(11)
input_constraint.update_constraint(variables, indices, coeffs, b)
vo = Variable((1, 1))
output_constraint = HalfspacePolytope(vo)
variables = [vo]
indices = np.array([(0, 0)])
coeffs = np.array([1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
prop = IOPolytopeProperty([N], input_constraint, output_constraint)
x = np.array([[0.0, 110.0]]).astype(np.float32)
assert not prop.validate_counter_example(x)[0]
x = np.array([[1.0, 0.5]]).astype(np.float32)
assert not prop.validate_counter_example(x)[0]
def test_suffixed_op_graph():
input_op = operations.Input((1, 2), np.dtype(np.float32))
matmul_op = operations.MatMul(input_op, np.array([[1.0], [1.0]], dtype=np.float32))
add_op = operations.Add(matmul_op, 1)
relu_op = operations.Relu(add_op)
op_graph = OperationGraph([relu_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 2))
input_constraint = HalfspacePolytope(vi)
variables = [vi, vi]
indices = np.array([(0, 0), (0, 1)])
coeffs = np.array([1.0, 1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, -1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([1.0, -1.0])
b = np.array(4)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, 1.0])
b = np.array(11)
input_constraint.update_constraint(variables, indices, coeffs, b)
vo = Variable((1, 1))
output_constraint = HalfspacePolytope(vo)
variables = [vo]
indices = np.array([(0, 0)])
coeffs = np.array([1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
prop = IOPolytopeProperty([N], input_constraint, output_constraint)
suffixed_op_graph = prop.suffixed_op_graph()
x = np.array([[1.0, 0.5]]).astype(np.float32)
assert suffixed_op_graph(x).item() > 0
x = np.array([[0.0, 0.0]]).astype(np.float32)
assert suffixed_op_graph(x).item() <= 0
x = np.array([[0.25, -0.25]]).astype(np.float32)
assert suffixed_op_graph(x).item() <= 0
x = np.array([[-1.0, 0.0]]).astype(np.float32)
assert suffixed_op_graph(x).item() <= 0
def test_suffixed_op_graph_multiple_output_ops():
input_op = operations.Input((1, 2), np.dtype(np.float32))
matmul_op = operations.MatMul(input_op, np.array([[1.0], [1.0]], dtype=np.float32))
add_op = operations.Add(matmul_op, 1)
relu_op = operations.Relu(add_op)
tanh_op = operations.Tanh(add_op)
op_graph = OperationGraph([relu_op, tanh_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 2))
input_constraint = HalfspacePolytope(vi)
variables = [vi, vi]
indices = np.array([(0, 0), (0, 1)])
coeffs = np.array([1.0, 1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, -1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([1.0, -1.0])
b = np.array(4)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, 1.0])
b = np.array(11)
input_constraint.update_constraint(variables, indices, coeffs, b)
vo1 = Variable((1, 1))
vo2 = Variable((1, 1))
output_constraint = HalfspacePolytope(vo1)
output_constraint.add_variable(vo2)
variables = [vo1]
indices = np.array([(0, 0)])
coeffs = np.array([1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
variables = [vo2]
indices = np.array([(0, 0)])
coeffs = np.array([0.5])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b, is_open=True)
coeffs = np.array([-0.5])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b, is_open=True)
prop = IOPolytopeProperty([N], input_constraint, output_constraint)
suffixed_op_graph = prop.suffixed_op_graph()
x = np.array([[1.0, 0.5]]).astype(np.float32)
assert suffixed_op_graph(x).item() > 0
x = np.array([[2.0, 1.0]]).astype(np.float32)
assert prop.validate_counter_example(x)
assert suffixed_op_graph(x).item() > 0
x = np.array([[0.0, 0.0]]).astype(np.float32)
assert suffixed_op_graph(x).item() <= 0
x = np.array([[0.25, -0.25]]).astype(np.float32)
assert suffixed_op_graph(x).item() <= 0
x = np.array([[-1.0, 0.0]]).astype(np.float32)
assert suffixed_op_graph(x).item() <= 0
def test_prefixed_and_suffixed_op_graph_hspoly_input_constraints():
input_op = operations.Input((1, 2), np.dtype(np.float32))
matmul_op = operations.MatMul(input_op, np.array([[1.0], [1.0]], dtype=np.float32))
add_op = operations.Add(matmul_op, 1)
relu_op = operations.Relu(add_op)
op_graph = OperationGraph([relu_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 2))
input_constraint = HalfspacePolytope(vi)
variables = [vi]
indices = np.array([(0, 0)])
coeffs = np.array([1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
input_constraint.update_constraint(variables, indices, -coeffs, b)
indices = np.array([(0, 1)])
input_constraint.update_constraint(variables, indices, coeffs, b)
input_constraint.update_constraint(variables, indices, -coeffs, b)
vo = Variable((1, 1))
output_constraint = HalfspacePolytope(vo)
variables = [vo]
indices = np.array([(0, 0)])
coeffs = np.array([1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
prop = IOPolytopeProperty([N], input_constraint, output_constraint)
with pytest.raises(
ValueError,
match="HalfspacePolytope input constraints are not yet supported",
):
_ = prop.prefixed_and_suffixed_op_graph()
def test_prefixed_and_suffixed_op_graph():
input_op = operations.Input((1, 2), np.dtype(np.float32))
matmul_op = operations.MatMul(input_op, np.array([[1.0], [1.0]], dtype=np.float32))
add_op = operations.Add(matmul_op, 1)
relu_op = operations.Relu(add_op)
op_graph = OperationGraph([relu_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 2))
input_constraint = HyperRectangle(vi)
variables = [vi]
indices = np.array([(0, 0)])
coeffs = np.array([1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
input_constraint.update_constraint(variables, indices, -coeffs, b)
indices = np.array([(0, 1)])
input_constraint.update_constraint(variables, indices, coeffs, b)
input_constraint.update_constraint(variables, indices, -coeffs, b)
vo = Variable((1, 1))
output_constraint = HalfspacePolytope(vo)
variables = [vo]
indices = np.array([(0, 0)])
coeffs = np.array([1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
prop = IOPolytopeProperty([N], input_constraint, output_constraint)
prefixed_and_suffixed_op_graph, (lbs, ubs) = prop.prefixed_and_suffixed_op_graph()
x = np.array([[1.0, 0.5]]).astype(np.float32)
assert prefixed_and_suffixed_op_graph(x).item() > 0
x = np.array([[1.0, 1.0]]).astype(np.float32)
assert prefixed_and_suffixed_op_graph(x).item() > 0
x = np.array([[0.0, 0.0]]).astype(np.float32)
assert prefixed_and_suffixed_op_graph(x).item() <= 0
x = np.array([[0.5, 0.5]]).astype(np.float32)
assert prefixed_and_suffixed_op_graph(x).item() <= 0
x = | np.array([[-1.0, 0.0]]) | numpy.array |
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal,
assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings
)
# Test data
_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
[0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
[np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
[0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
# Rows of _ndat with nans removed
_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
np.array([0.1042, -0.5954]),
np.array([0.1610, 0.1859, 0.3146])]
# Rows of _ndat with nans converted to ones
_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170],
[0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833],
[1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954],
[0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]])
# Rows of _ndat with nans converted to zeros
_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
[0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833],
[0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954],
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
class TestNanFunctions_MinMax(TestCase):
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalars
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
def test_masked(self):
mat = np.ma.fix_invalid(_ndat)
msk = mat._mask.copy()
for f in [np.nanmin]:
res = f(mat, axis=1)
tgt = f(_ndat, axis=1)
assert_equal(res, tgt)
assert_equal(mat._mask, msk)
assert_(not np.isinf(mat).any())
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
# check that rows of nan are dealt with for subclasses (#4628)
mat[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
and not np.isnan(res[2, 0]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat)
assert_(np.isscalar(res))
assert_(res != np.nan)
assert_(len(w) == 0)
class TestNanFunctions_ArgminArgmax(TestCase):
nanfuncs = [np.nanargmin, np.nanargmax]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_result_values(self):
for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
for row in _ndat:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in")
ind = f(row)
val = row[ind]
# comparing with NaN is tricky as the result
# is always false except for NaN != NaN
assert_(not np.isnan(val))
assert_(not fcmp(val, row).any())
assert_(not np.equal(val, row[:ind]).any())
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
assert_raises(ValueError, f, mat, axis=axis)
assert_raises(ValueError, f, np.nan)
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
assert_raises(ValueError, f, mat, axis=axis)
for axis in [1]:
res = f(mat, axis=axis)
assert_equal(res, np.zeros(0))
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_IntTypes(TestCase):
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
mat = np.array([127, 39, 93, 87, 46])
def integer_arrays(self):
for dtype in self.int_types:
yield self.mat.astype(dtype)
def test_nanmin(self):
tgt = np.min(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmin(mat), tgt)
def test_nanmax(self):
tgt = np.max(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmax(mat), tgt)
def test_nanargmin(self):
tgt = np.argmin(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmin(mat), tgt)
def test_nanargmax(self):
tgt = np.argmax(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmax(mat), tgt)
def test_nansum(self):
tgt = np.sum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nansum(mat), tgt)
def test_nanprod(self):
tgt = np.prod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanprod(mat), tgt)
def test_nancumsum(self):
tgt = np.cumsum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nancumsum(mat), tgt)
def test_nancumprod(self):
tgt = np.cumprod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nancumprod(mat), tgt)
def test_nanmean(self):
tgt = np.mean(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmean(mat), tgt)
def test_nanvar(self):
tgt = np.var(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat), tgt)
tgt = np.var(mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat, ddof=1), tgt)
def test_nanstd(self):
tgt = np.std(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat), tgt)
tgt = np.std(self.mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat, ddof=1), tgt)
class SharedNanFunctionsTestsMixin(object):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
with suppress_warnings() as sup:
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
# Giving the warning is a small bug, see gh-8000
sup.filter(np.ComplexWarning)
tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_char(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
with suppress_warnings() as sup:
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
# Giving the warning is a small bug, see gh-8000
sup.filter(np.ComplexWarning)
tgt = rf(mat, dtype=c, axis=1).dtype.type
res = nf(mat, dtype=c, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=c, axis=None).dtype.type
res = nf(mat, dtype=c, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt, "res %s, tgt %s" % (res, tgt))
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nansum, np.nanprod]
stdfuncs = [np.sum, np.prod]
def test_allnans(self):
# Check for FutureWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = np.nansum([np.nan]*3, axis=None)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check scalar
res = np.nansum(np.nan)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check there is no warning for not all-nan
np.nansum([0]*3, axis=None)
assert_(len(w) == 0, 'unwanted warning raised')
def test_empty(self):
for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]):
mat = np.zeros((0, 3))
tgt = [tgt_value]*3
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = []
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = tgt_value
res = f(mat, axis=None)
assert_equal(res, tgt)
class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nancumsum, np.nancumprod]
stdfuncs = [np.cumsum, np.cumprod]
def test_allnans(self):
for f, tgt_value in zip(self.nanfuncs, [0, 1]):
# Unlike other nan-functions, sum/prod/cumsum/cumprod don't warn on all nan input
with assert_no_warnings():
res = f([np.nan]*3, axis=None)
tgt = tgt_value*np.ones((3))
assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((3))' % (tgt_value))
# Check scalar
res = f(np.nan)
tgt = tgt_value*np.ones((1))
assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((1))' % (tgt_value))
# Check there is no warning for not all-nan
f([0]*3, axis=None)
def test_empty(self):
for f, tgt_value in zip(self.nanfuncs, [0, 1]):
mat = np.zeros((0, 3))
tgt = tgt_value*np.ones((0, 3))
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = mat
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = np.zeros((0))
res = f(mat, axis=None)
assert_equal(res, tgt)
def test_keepdims(self):
for f, g in zip(self.nanfuncs, self.stdfuncs):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = f(mat, axis=axis, out=None)
res = g(mat, axis=axis, out=None)
assert_(res.ndim == tgt.ndim)
for f in self.nanfuncs:
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
rs = np.random.RandomState(0)
d[rs.rand(*d.shape) < 0.5] = np.nan
res = f(d, axis=None)
assert_equal(res.shape, (1155,))
for axis in np.arange(4):
res = f(d, axis=axis)
assert_equal(res.shape, (3, 5, 7, 11))
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
for axis in np.arange(2):
res = f(mat, axis=axis)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 3))
res = f(mat)
assert_(res.shape == (1, 3*3))
def test_result_values(self):
for axis in (-2, -1, 0, 1, None):
tgt = np.cumprod(_ndat_ones, axis=axis)
res = np.nancumprod(_ndat, axis=axis)
assert_almost_equal(res, tgt)
tgt = np.cumsum(_ndat_zeros,axis=axis)
res = np.nancumsum(_ndat, axis=axis)
assert_almost_equal(res, tgt)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.eye(3)
for axis in (-2, -1, 0, 1):
tgt = rf(mat, axis=axis)
res = nf(mat, axis=axis, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
stdfuncs = [np.mean, np.var, np.std]
def test_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)
def test_out_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
out = np.empty(_ndat.shape[0], dtype=dtype)
assert_raises(TypeError, f, _ndat, axis=1, out=out)
def test_ddof(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in [0, 1]:
tgt = [rf(d, ddof=ddof) for d in _rdat]
res = nf(_ndat, axis=1, ddof=ddof)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
dsize = [len(d) for d in _rdat]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in range(5):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
sup.filter(np.ComplexWarning)
tgt = [ddof >= d for d in dsize]
res = nf(_ndat, axis=1, ddof=ddof)
assert_equal(np.isnan(res), tgt)
if any(tgt):
assert_(len(sup.log) == 1)
else:
assert_(len(sup.log) == 0)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 2)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
class TestNanFunctions_Median(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanmedian(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.median(mat, axis=axis, out=None, overwrite_input=False)
res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
res = np.nanmedian(d, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanmedian(d, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.median(mat, axis=1)
res = np.nanmedian(nan_mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.median(mat, axis=None)
res = np.nanmedian(nan_mat, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_small_large(self):
# test the small and large code paths, current cutoff 400 elements
for s in [5, 20, 51, 200, 1000]:
d = np.random.randn(4, s)
# Randomly set some elements to NaN:
w = np.random.randint(0, d.size, size=d.size // 5)
d.ravel()[w] = np.nan
d[:,0] = 1. # ensure at least one good value
# use normal median without nans to compare
tgt = []
for x in d:
nonan = np.compress(~np.isnan(x), x)
tgt.append(np.median(nonan, overwrite_input=True))
assert_array_equal(np.nanmedian(d, axis=-1), tgt)
def test_result_values(self):
tgt = [np.median(d) for d in _rdat]
res = np.nanmedian(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
if axis is None:
assert_(len(sup.log) == 1)
else:
assert_(len(sup.log) == 3)
# Check scalar
assert_(np.isnan(np.nanmedian(np.nan)))
if axis is None:
assert_(len(sup.log) == 2)
else:
assert_(len(sup.log) == 4)
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanmedian(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanmedian(0.) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(np.AxisError, np.nanmedian, d, axis=-5)
assert_raises(np.AxisError, np.nanmedian, d, axis=(0, -5))
assert_raises(np.AxisError, np.nanmedian, d, axis=4)
assert_raises(np.AxisError, np.nanmedian, d, axis=(0, 4))
assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
def test_float_special(self):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for inf in [np.inf, -np.inf]:
a = np.array([[inf, np.nan], [np.nan, np.nan]])
assert_equal(np.nanmedian(a, axis=0), [inf, np.nan])
assert_equal(np.nanmedian(a, axis=1), [inf, np.nan])
assert_equal(np.nanmedian(a), inf)
# minimum fill value check
a = np.array([[np.nan, np.nan, inf],
[np.nan, np.nan, inf]])
assert_equal(np.nanmedian(a), inf)
assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf])
assert_equal(np.nanmedian(a, axis=1), inf)
# no mask path
a = np.array([[inf, inf], [inf, inf]])
assert_equal(np.nanmedian(a, axis=1), inf)
a = np.array([[inf, 7, -inf, -9],
[-10, np.nan, np.nan, 5],
[4, np.nan, np.nan, inf]],
dtype=np.float32)
if inf > 0:
assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.])
assert_equal(np.nanmedian(a), 4.5)
else:
assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.])
assert_equal( | np.nanmedian(a) | numpy.nanmedian |
"""
Group-wise function alignment using SRSF framework and Dynamic Programming
moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import matplotlib.pyplot as plt
import fdasrsf.utility_functions as uf
import fdasrsf.bayesian_functions as bf
import fdasrsf.fPCA as fpca
import fdasrsf.geometry as geo
from scipy.integrate import trapz, cumtrapz
from scipy.interpolate import interp1d
from scipy.linalg import svd, cholesky
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import squareform, pdist
import GPy
from numpy.linalg import norm, inv
from numpy.random import rand, normal
from joblib import Parallel, delayed
from fdasrsf.fPLS import pls_svd
from tqdm import tqdm
import fdasrsf.plot_style as plot
import fpls_warp as fpls
import collections
class fdawarp:
"""
This class provides alignment methods for functional data using the SRVF framework
Usage: obj = fdawarp(f,t)
:param f: (M,N): matrix defining N functions of M samples
:param time: time vector of length M
:param fn: aligned functions
:param qn: aligned srvfs
:param q0: initial srvfs
:param fmean: function mean
:param mqn: mean srvf
:param gam: warping functions
:param psi: srvf of warping functions
:param stats: alignment statistics
:param qun: cost function
:param lambda: lambda
:param method: optimization method
:param gamI: inverse warping function
:param rsamps: random samples
:param fs: random aligned functions
:param gams: random warping functions
:param ft: random warped functions
:param qs: random aligned srvfs
:param type: alignment type
:param mcmc: mcmc output if bayesian
Author : <NAME> (JDT) <jdtuck AT sandia.gov>
Date : 15-Mar-2018
"""
def __init__(self, f, time):
"""
Construct an instance of the fdawarp class
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param time: vector of size M describing the sample points
"""
a = time.shape[0]
if f.shape[0] != a:
raise Exception('Columns of f and time must be equal')
self.f = f
self.time = time
self.rsamps = False
def srsf_align(self, method="mean", omethod="DP2", center=True,
smoothdata=False, MaxItr=20, parallel=False, lam=0.0,
cores=-1, grid_dim=7):
"""
This function aligns a collection of functions using the elastic
square-root slope (srsf) framework.
:param method: (string) warp calculate Karcher Mean or Median
(options = "mean" or "median") (default="mean")
:param omethod: optimization method (DP, DP2, RBFGS) (default = DP2)
:param center: center warping functions (default = T)
:param smoothdata: Smooth the data using a box filter (default = F)
:param MaxItr: Maximum number of iterations (default = 20)
:param parallel: run in parallel (default = F)
:param lam: controls the elasticity (default = 0)
:param cores: number of cores for parallel (default = -1 (all))
:param grid_dim: size of the grid, for the DP2 method only (default = 7)
:type lam: double
:type smoothdata: bool
Examples
>>> import tables
>>> fun=tables.open_file("../Data/simu_data.h5")
>>> f = fun.root.f[:]
>>> f = f.transpose()
>>> time = fun.root.time[:]
>>> obj = fs.fdawarp(f,time)
>>> obj.srsf_align()
"""
M = self.f.shape[0]
N = self.f.shape[1]
self.lam = lam
if M > 500:
parallel = True
elif N > 100:
parallel = True
eps = np.finfo(np.double).eps
f0 = self.f
self.method = omethod
methods = ["mean", "median"]
self.type = method
# 0 mean, 1-median
method = [i for i, x in enumerate(methods) if x == method]
if len(method) == 0:
method = 0
else:
method = method[0]
# Compute SRSF function from data
f, g, g2 = uf.gradient_spline(self.time, self.f, smoothdata)
q = g / np.sqrt(abs(g) + eps)
print("Initializing...")
mnq = q.mean(axis=1)
a = mnq.repeat(N)
d1 = a.reshape(M, N)
d = (q - d1) ** 2
dqq = np.sqrt(d.sum(axis=0))
min_ind = dqq.argmin()
mq = q[:, min_ind]
mf = f[:, min_ind]
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq, self.time,
q[:, n], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = np.zeros((M,N))
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq,self.time,q[:,k],omethod,lam,grid_dim)
gamI = uf.SqrtMeanInverse(gam)
mf = np.interp((self.time[-1] - self.time[0]) * gamI + self.time[0], self.time, mf)
mq = uf.f_to_srsf(mf, self.time)
# Compute Karcher Mean
if method == 0:
print("Compute Karcher Mean of %d function in SRSF space..." % N)
if method == 1:
print("Compute Karcher Median of %d function in SRSF space..." % N)
ds = np.repeat(0.0, MaxItr + 2)
ds[0] = np.inf
qun = np.repeat(0.0, MaxItr + 1)
tmp = np.zeros((M, MaxItr + 2))
tmp[:, 0] = mq
mq = tmp
tmp = np.zeros((M, MaxItr+2))
tmp[:,0] = mf
mf = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = self.f
f = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = q
q = tmp
for r in range(0, MaxItr):
print("updating step: r=%d" % (r + 1))
if r == (MaxItr - 1):
print("maximal number of iterations is reached")
# Matching Step
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq[:, r],
self.time, q[:, n, 0], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq[:, r], self.time, q[:, k, 0],
omethod, lam, grid_dim)
gam_dev = np.zeros((M, N))
vtil = np.zeros((M,N))
dtil = np.zeros(N)
for k in range(0, N):
f[:, k, r + 1] = np.interp((self.time[-1] - self.time[0]) * gam[:, k]
+ self.time[0], self.time, f[:, k, 0])
q[:, k, r + 1] = uf.f_to_srsf(f[:, k, r + 1], self.time)
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
v = q[:, k, r + 1] - mq[:,r]
d = np.sqrt(trapz(v*v, self.time))
vtil[:,k] = v/d
dtil[k] = 1.0/d
mqt = mq[:, r]
a = mqt.repeat(N)
d1 = a.reshape(M, N)
d = (q[:, :, r + 1] - d1) ** 2
if method == 0:
d1 = sum(trapz(d, self.time, axis=0))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, self.time, axis=0))
ds_tmp = d1 + lam * d2
ds[r + 1] = ds_tmp
# Minimization Step
# compute the mean of the matched function
qtemp = q[:, :, r + 1]
ftemp = f[:, :, r + 1]
mq[:, r + 1] = qtemp.mean(axis=1)
mf[:, r + 1] = ftemp.mean(axis=1)
qun[r] = norm(mq[:, r + 1] - mq[:, r]) / norm(mq[:, r])
if method == 1:
d1 = np.sqrt(sum(trapz(d, self.time, axis=0)))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, self.time, axis=0))
ds_tmp = d1 + lam * d2
ds[r + 1] = ds_tmp
# Minimization Step
# compute the mean of the matched function
stp = .3
vbar = vtil.sum(axis=1)*(1/dtil.sum())
qtemp = q[:, :, r + 1]
ftemp = f[:, :, r + 1]
mq[:, r + 1] = mq[:,r] + stp*vbar
tmp = np.zeros(M)
tmp[1:] = cumtrapz(mq[:, r + 1] * np.abs(mq[:, r + 1]), self.time)
mf[:, r + 1] = np.median(f0[1, :])+tmp
qun[r] = norm(mq[:, r + 1] - mq[:, r]) / norm(mq[:, r])
if qun[r] < 1e-2 or r >= MaxItr:
break
# Last Step with centering of gam
if center:
r += 1
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq[:, r], self.time,
q[:, n, 0], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq[:, r], self.time, q[:, k, 0], omethod,
lam, grid_dim)
gam_dev = np.zeros((M, N))
for k in range(0, N):
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
gamI = uf.SqrtMeanInverse(gam)
gamI_dev = np.gradient(gamI, 1 / float(M - 1))
time0 = (self.time[-1] - self.time[0]) * gamI + self.time[0]
mq[:, r + 1] = np.interp(time0, self.time, mq[:, r]) * np.sqrt(gamI_dev)
for k in range(0, N):
q[:, k, r + 1] = np.interp(time0, self.time, q[:, k, r]) * np.sqrt(gamI_dev)
f[:, k, r + 1] = np.interp(time0, self.time, f[:, k, r])
gam[:, k] = np.interp(time0, self.time, gam[:, k])
else:
gamI = uf.SqrtMeanInverse(gam)
gamI_dev = np.gradient(gamI, 1 / float(M - 1))
# Aligned data & stats
self.fn = f[:, :, r + 1]
self.qn = q[:, :, r + 1]
self.q0 = q[:, :, 0]
mean_f0 = f0.mean(axis=1)
std_f0 = f0.std(axis=1)
mean_fn = self.fn.mean(axis=1)
std_fn = self.fn.std(axis=1)
self.gam = gam
self.mqn = mq[:, r + 1]
tmp = np.zeros(M)
tmp[1:] = cumtrapz(self.mqn * np.abs(self.mqn), self.time)
self.fmean = np.mean(f0[1, :]) + tmp
fgam = np.zeros((M, N))
for k in range(0, N):
time0 = (self.time[-1] - self.time[0]) * gam[:, k] + self.time[0]
fgam[:, k] = np.interp(time0, self.time, self.fmean)
var_fgam = fgam.var(axis=1)
self.orig_var = trapz(std_f0 ** 2, self.time)
self.amp_var = trapz(std_fn ** 2, self.time)
self.phase_var = trapz(var_fgam, self.time)
return
def plot(self):
"""
plot plot functional alignment results
Usage: obj.plot()
"""
M = self.f.shape[0]
plot.f_plot(self.time, self.f, title="f Original Data")
fig, ax = plot.f_plot(np.arange(0, M) / float(M - 1), self.gam,
title="Warping Functions")
ax.set_aspect('equal')
plot.f_plot(self.time, self.fn, title="Warped Data")
mean_f0 = self.f.mean(axis=1)
std_f0 = self.f.std(axis=1)
mean_fn = self.fn.mean(axis=1)
std_fn = self.fn.std(axis=1)
tmp = np.array([mean_f0, mean_f0 + std_f0, mean_f0 - std_f0])
tmp = tmp.transpose()
plot.f_plot(self.time, tmp, title=r"Original Data: Mean $\pm$ STD")
tmp = np.array([mean_fn, mean_fn + std_fn, mean_fn - std_fn])
tmp = tmp.transpose()
plot.f_plot(self.time, tmp, title=r"Warped Data: Mean $\pm$ STD")
plot.f_plot(self.time, self.fmean, title="$f_{mean}$")
plt.show()
return
def gauss_model(self, n=1, sort_samples=False):
"""
This function models the functional data using a Gaussian model
extracted from the principal components of the srvfs
:param n: number of random samples
:param sort_samples: sort samples (default = T)
:type n: integer
:type sort_samples: bool
"""
fn = self.fn
time = self.time
qn = self.qn
gam = self.gam
# Parameters
eps = np.finfo(np.double).eps
binsize = np.diff(time)
binsize = binsize.mean()
M = time.size
# compute mean and covariance in q-domain
mq_new = qn.mean(axis=1)
mididx = np.round(time.shape[0] / 2)
m_new = np.sign(fn[mididx, :]) * np.sqrt(np.abs(fn[mididx, :]))
mqn = np.append(mq_new, m_new.mean())
qn2 = np.vstack((qn, m_new))
C = np.cov(qn2)
q_s = np.random.multivariate_normal(mqn, C, n)
q_s = q_s.transpose()
# compute the correspondence to the original function domain
fs = np.zeros((M, n))
for k in range(0, n):
fs[:, k] = uf.cumtrapzmid(time, q_s[0:M, k] * np.abs(q_s[0:M, k]),
np.sign(q_s[M, k]) * (q_s[M, k] ** 2),
mididx)
fbar = fn.mean(axis=1)
fsbar = fs.mean(axis=1)
err = np.transpose(np.tile(fbar-fsbar, (n,1)))
fs += err
# random warping generation
rgam = uf.randomGamma(gam, n)
gams = np.zeros((M, n))
for k in range(0, n):
gams[:, k] = uf.invertGamma(rgam[:, k])
# sort functions and warping
if sort_samples:
mx = fs.max(axis=0)
seq1 = mx.argsort()
# compute the psi-function
fy = np.gradient(rgam, binsize)
psi = fy / np.sqrt(abs(fy) + eps)
ip = np.zeros(n)
len = np.zeros(n)
for i in range(0, n):
tmp = np.ones(M)
ip[i] = tmp.dot(psi[:, i] / M)
len[i] = np.arccos(tmp.dot(psi[:, i] / M))
seq2 = len.argsort()
# combine x-variability and y-variability
ft = np.zeros((M, n))
for k in range(0, n):
ft[:, k] = np.interp(gams[:, seq2[k]], np.arange(0, M) /
np.double(M - 1), fs[:, seq1[k]])
tmp = np.isnan(ft[:, k])
while tmp.any():
rgam2 = uf.randomGamma(gam, 1)
ft[:, k] = np.interp(gams[:, seq2[k]], np.arange(0, M) /
np.double(M - 1), uf.invertGamma(rgam2))
else:
# combine x-variability and y-variability
ft = np.zeros((M, n))
for k in range(0, n):
ft[:, k] = np.interp(gams[:, k], np.arange(0, M) /
np.double(M - 1), fs[:, k])
tmp = np.isnan(ft[:, k])
while tmp.any():
rgam2 = uf.randomGamma(gam, 1)
ft[:, k] = np.interp(gams[:, k], np.arange(0, M) /
np.double(M - 1), uf.invertGamma(rgam2))
self.rsamps = True
self.fs = fs
self.gams = rgam
self.ft = ft
self.qs = q_s[0:M,:]
return
def joint_gauss_model(self, n=1, no=3):
"""
This function models the functional data using a joint Gaussian model
extracted from the principal components of the srsfs
:param n: number of random samples
:param no: number of principal components (default = 3)
:type n: integer
:type no: integer
"""
# Parameters
fn = self.fn
time = self.time
qn = self.qn
gam = self.gam
M = time.size
# Perform PCA
jfpca = fpca.fdajpca(self)
jfpca.calc_fpca(no=no)
s = jfpca.latent
U = jfpca.U
C = jfpca.C
mu_psi = jfpca.mu_psi
# compute mean and covariance
mq_new = qn.mean(axis=1)
mididx = jfpca.id
m_new = np.sign(fn[mididx, :]) * np.sqrt(np.abs(fn[mididx, :]))
mqn = np.append(mq_new, m_new.mean())
# generate random samples
vals = np.random.multivariate_normal(np.zeros(s.shape), np.diag(s), n)
tmp = np.matmul(U, np.transpose(vals))
qhat = np.tile(mqn.T,(n,1)).T + tmp[0:M+1,:]
tmp = np.matmul(U, np.transpose(vals)/C)
vechat = tmp[(M+1):,:]
psihat = np.zeros((M,n))
gamhat = np.zeros((M,n))
for ii in range(n):
psihat[:,ii] = geo.exp_map(mu_psi,vechat[:,ii])
gam_tmp = cumtrapz(psihat[:,ii]**2,np.linspace(0,1,M),initial=0.0)
gamhat[:,ii] = (gam_tmp - gam_tmp.min())/(gam_tmp.max()-gam_tmp.min())
ft = np.zeros((M,n))
fhat = np.zeros((M,n))
for ii in range(n):
fhat[:,ii] = uf.cumtrapzmid(time, qhat[0:M,ii]*np.fabs(qhat[0:M,ii]), np.sign(qhat[M,ii])*(qhat[M,ii]*qhat[M,ii]), mididx)
ft[:,ii] = uf.warp_f_gamma(np.linspace(0,1,M),fhat[:,ii],gamhat[:,ii])
self.rsamps = True
self.fs = fhat
self.gams = gamhat
self.ft = ft
self.qs = qhat[0:M,:]
return
def multiple_align_functions(self, mu, omethod="DP2", smoothdata=False,
parallel=False, lam=0.0, cores=-1, grid_dim=7):
"""
This function aligns a collection of functions using the elastic square-root
slope (srsf) framework.
Usage: obj.multiple_align_functions(mu)
obj.multiple_align_functions(lambda)
obj.multiple_align_functions(lambda, ...)
:param mu: vector of function to align to
:param omethod: optimization method (DP, DP2, RBFGS) (default = DP)
:param smoothdata: Smooth the data using a box filter (default = F)
:param parallel: run in parallel (default = F)
:param lam: controls the elasticity (default = 0)
:param cores: number of cores for parallel (default = -1 (all))
:param grid_dim: size of the grid, for the DP2 method only (default = 7)
:type lam: double
:type smoothdata: bool
"""
M = self.f.shape[0]
N = self.f.shape[1]
self.lam = lam
if M > 500:
parallel = True
elif N > 100:
parallel = True
eps = np.finfo(np.double).eps
self.method = omethod
self.type = "multiple"
# Compute SRSF function from data
f, g, g2 = uf.gradient_spline(self.time, self.f, smoothdata)
q = g / np.sqrt(abs(g) + eps)
mq = uf.f_to_srsf(mu, self.time)
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq, self.time,
q[:, n], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = np.zeros((M,N))
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq,self.time,q[:,k],omethod,lam,grid_dim)
self.gamI = uf.SqrtMeanInverse(gam)
fn = np.zeros((M,N))
qn = np.zeros((M,N))
for k in range(0, N):
fn[:, k] = np.interp((self.time[-1] - self.time[0]) * gam[:, k]
+ self.time[0], self.time, f[:, k])
qn[:, k] = uf.f_to_srsf(f[:, k], self.time)
# Aligned data & stats
self.fn = fn
self.qn = qn
self.q0 = q
mean_f0 = f.mean(axis=1)
std_f0 = f.std(axis=1)
mean_fn = self.fn.mean(axis=1)
std_fn = self.fn.std(axis=1)
self.gam = gam
self.mqn = mq
self.fmean = mu
fgam = np.zeros((M, N))
for k in range(0, N):
time0 = (self.time[-1] - self.time[0]) * gam[:, k] + self.time[0]
fgam[:, k] = np.interp(time0, self.time, self.fmean)
var_fgam = fgam.var(axis=1)
self.orig_var = trapz(std_f0 ** 2, self.time)
self.amp_var = trapz(std_fn ** 2, self.time)
self.phase_var = trapz(var_fgam, self.time)
return
def pairwise_align_functions(f1, f2, time, omethod="DP2", lam=0, grid_dim=7):
"""
This function aligns f2 to f1 using the elastic square-root
slope (srsf) framework.
Usage: out = pairwise_align_functions(f1, f2, time)
out = pairwise_align_functions(f1, f2, time, omethod, lam, grid_dim)
:param f1: vector defining M samples of function 1
:param f2: vector defining M samples of function 2
:param time: time vector of length M
:param omethod: optimization method (DP, DP2, RBFGS) (default = DP)
:param lam: controls the elasticity (default = 0)
:param grid_dim: size of the grid, for the DP2 method only (default = 7)
:rtype list containing
:return f2n: aligned f2
:return gam: warping function
:return q2n: aligned q2 (srsf)
"""
q1 = uf.f_to_srsf(f1, time)
q2 = uf.f_to_srsf(f2, time)
gam = uf.optimum_reparam(q1, time, q2, omethod, lam, grid_dim)
f2n = uf.warp_f_gamma(time, f2 , gam)
q2n = uf.f_to_srsf(f2n, time)
return (f2n, gam, q2n)
def pairwise_align_bayes(f1i, f2i, time, mcmcopts=None):
"""
This function aligns two functions using Bayesian framework. It will align
f2 to f1. It is based on mapping warping functions to a hypersphere, and a
subsequent exponential mapping to a tangent space. In the tangent space,
the Z-mixture pCN algorithm is used to explore both local and global
structure in the posterior distribution.
The Z-mixture pCN algorithm uses a mixture distribution for the proposal
distribution, controlled by input parameter zpcn. The zpcn$betas must be
between 0 and 1, and are the coefficients of the mixture components, with
larger coefficients corresponding to larger shifts in parameter space. The
zpcn["probs"] give the probability of each shift size.
Usage: out = pairwise_align_bayes(f1i, f2i, time)
out = pairwise_align_bayes(f1i, f2i, time, mcmcopts)
:param f1i: vector defining M samples of function 1
:param f2i: vector defining M samples of function 2
:param time: time vector of length M
:param mcmopts: dict of mcmc parameters
:type mcmcopts: dict
default mcmc options:
tmp = {"betas":np.array([0.5,0.5,0.005,0.0001]),"probs":np.array([0.1,0.1,0.7,0.1])}
mcmcopts = {"iter":2*(10**4) ,"burnin":np.minimum(5*(10**3),2*(10**4)//2),
"alpha0":0.1, "beta0":0.1,"zpcn":tmp,"propvar":1,
"initcoef":np.repeat(0,20), "npoints":200, "extrainfo":True}
:rtype collection containing
:return f2_warped: aligned f2
:return gamma: warping function
:return g_coef: final g_coef
:return psi: final psi
:return sigma1: final sigma
if extrainfo
:return accept: accept of psi samples
:return betas_ind
:return logl: log likelihood
:return gamma_mat: posterior gammas
:return gamma_stats: posterior gamma stats
:return xdist: phase distance posterior
:return ydist: amplitude distance posterior)
"""
if mcmcopts is None:
tmp = {"betas":np.array([0.5,0.5,0.005,0.0001]),"probs":np.array([0.1,0.1,0.7,0.1])}
mcmcopts = {"iter":2*(10**4) ,"burnin":np.minimum(5*(10**3),2*(10**4)//2),"alpha0":0.1,
"beta0":0.1,"zpcn":tmp,"propvar":1,
"initcoef":np.repeat(0,20), "npoints":200, "extrainfo":True}
if f1i.shape[0] != f2i.shape[0]:
raise Exception('Length of f1 and f2 must be equal')
if f1i.shape[0] != time.shape[0]:
raise Exception('Length of f1 and time must be equal')
if mcmcopts["zpcn"]["betas"].shape[0] != mcmcopts["zpcn"]["probs"].shape[0]:
raise Exception('In zpcn, betas must equal length of probs')
if np.mod(mcmcopts["initcoef"].shape[0], 2) != 0:
raise Exception('Length of mcmcopts.initcoef must be even')
# Number of sig figs to report in gamma_mat
SIG_GAM = 13
iter = mcmcopts["iter"]
# parameter settings
pw_sim_global_burnin = mcmcopts["burnin"]
valid_index = np.arange(pw_sim_global_burnin-1,iter)
pw_sim_global_Mg = mcmcopts["initcoef"].shape[0]//2
g_coef_ini = mcmcopts["initcoef"]
numSimPoints = mcmcopts["npoints"]
pw_sim_global_domain_par = np.linspace(0,1,numSimPoints)
g_basis = uf.basis_fourier(pw_sim_global_domain_par, pw_sim_global_Mg, 1)
sigma1_ini = 1
zpcn = mcmcopts["zpcn"]
pw_sim_global_sigma_g = mcmcopts["propvar"]
def propose_g_coef(g_coef_curr):
pCN_beta = zpcn["betas"]
pCN_prob = zpcn["probs"]
probm = np.insert(np.cumsum(pCN_prob),0,0)
z = np.random.rand()
result = {"prop":g_coef_curr,"ind":1}
for i in range (0,pCN_beta.shape[0]):
if z <= probm[i+1] and z > probm[i]:
g_coef_new = normal(0, pw_sim_global_sigma_g / np.repeat(np.arange(1,pw_sim_global_Mg+1),2))
result["prop"] = np.sqrt(1-pCN_beta[i]**2) * g_coef_curr + pCN_beta[i] * g_coef_new
result["ind"] = i
return result
# normalize time to [0,1]
time = (time - time.min())/(time.max()-time.min())
timet = np.linspace(0,1,numSimPoints)
f1 = uf.f_predictfunction(f1i,timet,0)
f2 = uf.f_predictfunction(f2i,timet,0)
# srsf transformation
q1 = uf.f_to_srsf(f1,timet)
q1i = uf.f_to_srsf(f1i,time)
q2 = uf.f_to_srsf(f2,timet)
tmp = uf.f_exp1(uf.f_basistofunction(g_basis["x"],0,g_coef_ini,g_basis))
if tmp.min() < 0:
raise Exception("Invalid initial value of g")
# result vectors
g_coef = np.zeros((iter,g_coef_ini.shape[0]))
sigma1 = np.zeros(iter)
logl = np.zeros(iter)
SSE = np.zeros(iter)
accept = np.zeros(iter, dtype=bool)
accept_betas = np.zeros(iter)
# init
g_coef_curr = g_coef_ini
sigma1_curr = sigma1_ini
SSE_curr = bf.f_SSEg_pw(uf.f_basistofunction(g_basis["x"],0,g_coef_ini,g_basis),q1,q2)
logl_curr = bf.f_logl_pw(uf.f_basistofunction(g_basis["x"],0,g_coef_ini,g_basis),q1,q2,sigma1_ini**2,SSE_curr)
g_coef[0,:] = g_coef_ini
sigma1[0] = sigma1_ini
SSE[0] = SSE_curr
logl[0] = logl_curr
# update the chain for iter-1 times
for m in tqdm(range(1,iter)):
# update g
g_coef_curr, tmp, SSE_curr, accepti, zpcnInd = bf.f_updateg_pw(g_coef_curr, g_basis, sigma1_curr**2, q1, q2, SSE_curr, propose_g_coef)
# update sigma1
newshape = q1.shape[0]/2 + mcmcopts["alpha0"]
newscale = 1/2 * SSE_curr + mcmcopts["beta0"]
sigma1_curr = np.sqrt(1/np.random.gamma(newshape,1/newscale))
logl_curr = bf.f_logl_pw(uf.f_basistofunction(g_basis["x"],0,g_coef_curr,g_basis), q1, q2, sigma1_curr**2, SSE_curr)
# save updates to results
g_coef[m,:] = g_coef_curr
sigma1[m] = sigma1_curr
SSE[m] = SSE_curr
if mcmcopts["extrainfo"]:
logl[m] = logl_curr
accept[m] = accepti
accept_betas[m] = zpcnInd
# calculate posterior mean of psi
pw_sim_est_psi_matrix = np.zeros((numSimPoints,valid_index.shape[0]))
for k in range(0,valid_index.shape[0]):
g_temp = uf.f_basistofunction(g_basis["x"],0,g_coef[valid_index[k],:],g_basis)
psi_temp = uf.f_exp1(g_temp)
pw_sim_est_psi_matrix[:,k] = psi_temp
result_posterior_psi_simDomain = uf.f_psimean(pw_sim_global_domain_par, pw_sim_est_psi_matrix)
# resample to same number of points as the input f1 and f2
interp = interp1d(np.linspace(0,1,result_posterior_psi_simDomain.shape[0]), result_posterior_psi_simDomain, fill_value="extrapolate")
result_posterior_psi = interp(np.linspace(0,1,f1i.shape[0]))
# transform posterior mean of psi to gamma
result_posterior_gamma = uf.f_phiinv(result_posterior_psi)
result_posterior_gamma = uf.norm_gam(result_posterior_gamma)
# warped f2
f2_warped = uf.warp_f_gamma(time, f2i, result_posterior_gamma)
if mcmcopts["extrainfo"]:
M,N = pw_sim_est_psi_matrix.shape
gamma_mat = np.zeros((time.shape[0],N))
one_v = np.ones(M)
Dx = np.zeros(N)
Dy = Dx
for ii in range(0,N):
interp = interp1d(np.linspace(0,1,result_posterior_psi_simDomain.shape[0]), pw_sim_est_psi_matrix[:,ii], fill_value="extrapolate")
result_i = interp(time)
tmp = uf.f_phiinv(result_i)
gamma_mat[:,ii] = uf.norm_gam(tmp)
v, theta = geo.inv_exp_map(one_v,pw_sim_est_psi_matrix[:,ii])
Dx[ii] = np.sqrt(trapz(v**2,pw_sim_global_domain_par))
q2warp = uf.warp_q_gamma(pw_sim_global_domain_par,q2,gamma_mat[:,ii])
Dy[ii] = np.sqrt(trapz((q1i-q2warp)**2,time))
gamma_stats = uf.statsFun(gamma_mat)
results_o = collections.namedtuple('align_bayes', ['f2_warped', 'gamma','g_coef', 'psi', 'sigma1', 'accept', 'betas_ind', 'logl', 'gamma_mat', 'gamma_stats', 'xdist', 'ydist'])
out = results_o(f2_warped, result_posterior_gamma, g_coef, result_posterior_psi, sigma1, accept[1:], accept_betas[1:], logl, gamma_mat, gamma_stats, Dx, Dy)
return(out)
def pairwise_align_bayes_infHMC(y1i, y2i, time, mcmcopts=None):
"""
This function aligns two functions using Bayesian framework. It uses a
hierarchical Bayesian framework assuming mearsurement error error It will
align f2 to f1. It is based on mapping warping functions to a hypersphere,
and a subsequent exponential mapping to a tangent space. In the tangent space,
the \infty-HMC algorithm is used to explore both local and global
structure in the posterior distribution.
Usage: out = pairwise_align_bayes_infHMC(f1i, f2i, time)
out = pairwise_align_bayes_infHMC(f1i, f2i, time, mcmcopts)
:param y1i: vector defining M samples of function 1
:param y2i: vector defining M samples of function 2
:param time: time vector of length M
:param mcmopts: dict of mcmc parameters
:type mcmcopts: dict
default mcmc options:
mcmcopts = {"iter":1*(10**4), "nchains":4, "vpriorvar":1,
"burnin":np.minimum(5*(10**3),2*(10**4)//2),
"alpha0":0.1, "beta0":0.1, "alpha":1, "beta":1,
"h":0.01, "L":4, "f1propvar":0.0001, "f2propvar":0.0001,
"L1propvar":0.3, "L2propvar":0.3, "npoints":200, "thin":1,
"sampfreq":1, "initcoef":np.repeat(0,20), "nbasis":10,
"basis":'fourier', "extrainfo":True}
Basis can be 'fourier' or 'legendre'
:rtype collection containing
:return f2_warped: aligned f2
:return gamma: warping function
:return v_coef: final v_coef
:return psi: final psi
:return sigma1: final sigma
if extrainfo
:return theta_accept: accept of psi samples
:return f2_accept: accept of f2 samples
:return SSE: SSE
:return gamma_mat: posterior gammas
:return gamma_stats: posterior gamma stats
:return xdist: phase distance posterior
:return ydist: amplitude distance posterior)
<NAME>, <NAME>, and <NAME>. “Multimodal Bayesian Registration of Noisy Functions using Hamiltonian Monte Carlo”, Computational Statistics and Data Analysis, accepted, 2021.
"""
if mcmcopts is None:
mcmcopts = {"iter":1*(10**4), "nchains":4 , "vpriorvar":1,
"burnin":np.minimum(5*(10**3),2*(10**4)//2),
"alpha0":0.1, "beta0":0.1, "alpha":1, "beta":1,
"h":0.01, "L":4, "f1propvar":0.0001, "f2propvar":0.0001,
"L1propvar":0.3, "L2propvar":0.3, "npoints":200, "thin":1,
"sampfreq":1, "initcoef":np.repeat(0,20), "nbasis":10,
"basis":'fourier', "extrainfo":True}
if y1i.shape[0] != y2i.shape[0]:
raise Exception('Length of f1 and f2 must be equal')
if y1i.shape[0] != time.shape[0]:
raise Exception('Length of f1 and time must be equal')
if np.mod(mcmcopts["initcoef"].shape[0], 2) != 0:
raise Exception('Length of mcmcopts.initcoef must be even')
if np.mod(mcmcopts["nbasis"], 2) != 0:
raise Exception('Length of mcmcopts.nbasis must be even')
# set up random start points for more than 1 chain
random_starts = np.zeros((mcmcopts["initcoef"].shape[0], mcmcopts["nchains"]))
if mcmcopts["nchains"] > 1:
for i in range(0, mcmcopts["nchains"]):
randcoef = -1 + (2)*rand(mcmcopts["initcoef"].shape[0])
random_starts[:, i] = randcoef
isparallel = True
if mcmcopts["nchains"] == 1:
isparallel = False
if isparallel:
mcmcopts_p = []
for i in range(0, mcmcopts["nchains"]):
mcmcopts["initcoef"] = random_starts[:, i]
mcmcopts_p.append(mcmcopts)
# run chains
if isparallel:
chains = Parallel(n_jobs=-1)(delayed(run_mcmc)(y1i, y2i, time,
mcmcopts_p[n]) for n in range(mcmcopts["nchains"]))
else:
chains = []
chains1 = run_mcmc(y1i, y2i, time, mcmcopts)
chains.append(chains1)
# combine outputs
Nsamples = chains[0]['f1'].shape[0]
M = chains[0]['f1'].shape[1]
f1 = np.zeros((Nsamples*mcmcopts["nchains"], M))
f2 = np.zeros((Nsamples*mcmcopts["nchains"], M))
gamma = np.zeros((M, mcmcopts["nchains"]))
v_coef = np.zeros((Nsamples*mcmcopts["nchains"], chains[0]['v_coef'].shape[1]))
psi = np.zeros((M, Nsamples*mcmcopts["nchains"]))
sigma = np.zeros(Nsamples*mcmcopts["nchains"])
sigma1 = np.zeros(Nsamples*mcmcopts["nchains"])
sigma2 = np.zeros(Nsamples*mcmcopts["nchains"])
s1 = | np.zeros(Nsamples*mcmcopts["nchains"]) | numpy.zeros |
from hackernews import HackerNews
import json
import numpy as np
import unicodedata
class article:
url = ""
title = ""
article_id = 0
article_vector = None
mod_weight = 0.1
def __init__(self, url, title, article_id):
self.url = url
self.title = title
self.article_id = article_id
# create 1000 dimensional row vector, with entries between 0 and 1
random_vector = np.random.rand(1, 1000)
# normalize vector
vec_sum = | np.sum(random_vector) | numpy.sum |
#m.space Standard imports
import copy
import itertools
# Scientific computing imports
import numpy
import matplotlib.pyplot as plt
import networkx
import pandas
import seaborn
class Person(object):
"""
Person class, which encapsulates the entire behavior of a person.
"""
def __init__(self, model, person_id, is_infected=False, condom_budget=1.0, prob_hookup=0.5):
"""
Constructor for Person class. By default,
* not infected
* will always buy condoms
* will hookup 50% of the time
Note that we must "link" the Person to their "parent" Model object.
"""
# Set model link and ID
self.model = model
self.person_id = person_id
# Set Person parameters.
self.is_infected = is_infected
self.condom_budget = condom_budget
self.prob_hookup = prob_hookup
def decide_condom(self):
"""
Decide if we will use a condom.
"""
if self.condom_budget >= (self.model.condom_cost - self.model.condom_subsidy):
return True
else:
return False
def decide_hookup(self):
"""
Decide if we want to hookup with a potential partner.
"""
if numpy.random.random() <= self.prob_hookup:
return True
else:
return False
def get_position(self):
"""
Return position, calling through model.
"""
return self.model.get_person_position(self.person_id)
def get_neighbors(self):
"""
Return neighbors, calling through model.
"""
return self.model.get_person_neighbors(self.person_id)
def __repr__(self):
'''
Return string representation.
'''
skip_none = True
repr_string = type(self).__name__ + " ["
except_list = "model"
elements = [e for e in dir(self) if str(e) not in except_list]
for e in elements:
# Make sure we only display "public" fields; skip anything private (_*), that is a method/function, or that is a module.
if not e.startswith("_") and eval('type(self.{0}).__name__'.format(e)) not in ['DataFrame', 'function', 'method', 'builtin_function_or_method', 'module', 'instancemethod']:
value = eval("self." + e)
if value != None and skip_none == True:
repr_string += "{0}={1}, ".format(e, value)
# Clean up trailing space and comma.
return repr_string.strip(" ").strip(",") + "]"
class Model(object):
"""
Model class, which encapsulates the entire behavior of a single "run" in our HIV ABM.
"""
def __init__(self, grid_size, num_people, min_subsidy=0.0, max_subsidy=1.0,
min_condom_budget=0.0, max_condom_budget=2.0,
condom_cost=1.0, min_prob_hookup=0.0, max_prob_hookup=1.0,
prob_transmit=0.9, prob_transmit_condom=0.1):
"""
Class constructor.
"""
# Set our model parameters; this is long but simple!
self.grid_size = grid_size
self.num_people = num_people
self.min_subsidy = min_subsidy
self.max_subsidy = max_subsidy
self.min_condom_budget = min_condom_budget
self.max_condom_budget = max_condom_budget
self.condom_cost = condom_cost
self.min_prob_hookup = min_prob_hookup
self.max_prob_hookup = max_prob_hookup
self.prob_transmit = prob_transmit
self.prob_transmit_condom = prob_transmit_condom
# Set our state variables
self.t = 0
self.space = numpy.array((0,0))
self.condom_subsidy = 0.0
self.people = []
self.num_interactions = 0
self.num_interactions_condoms = 0
self.num_infected = 0
# Setup our history variables.
self.history_space = []
self.history_space_infected = []
self.history_interactions = []
self.history_num_infected = []
self.history_num_interactions = []
self.history_num_interactions_condoms = []
# Call our setup methods to initialize space, people, and institution.
self.setup_space()
self.setup_people()
self.setup_institution()
def setup_space(self):
"""
Method to setup our space.
"""
# Initialize a space with a NaN's
self.space = numpy.full((self.grid_size, self.grid_size), numpy.nan)
def setup_people(self):
"""
Method to setup our space.
"""
# First, begin by creating all agents without placing them.
for i in xrange(self.num_people):
self.people.append(Person(model=self,
person_id=i,
is_infected=False,
condom_budget=numpy.random.uniform(self.min_condom_budget, self.max_condom_budget),
prob_hookup= | numpy.random.uniform(self.min_prob_hookup, self.max_prob_hookup) | numpy.random.uniform |
""" Defines the BarPlot class.
"""
from __future__ import with_statement
import logging
from numpy import array, compress, column_stack, invert, isnan, transpose, zeros
from traits.api import Any, Bool, Enum, Float, Instance, Property, \
Range, Tuple, cached_property, on_trait_change
from enable.api import black_color_trait
from kiva.constants import FILL_STROKE
# Local relative imports
from .abstract_plot_renderer import AbstractPlotRenderer
from .abstract_mapper import AbstractMapper
from .array_data_source import ArrayDataSource
from .base import reverse_map_1d
logger = logging.getLogger(__name__)
# TODO: make child of BaseXYPlot
class BarPlot(AbstractPlotRenderer):
"""
A renderer for bar charts.
"""
#: The data source to use for the index coordinate.
index = Instance(ArrayDataSource)
#: The data source to use as value points.
value = Instance(ArrayDataSource)
#: The data source to use as "starting" values for bars (along value axis).
#: For instance, if the values are [10, 20] and starting_value
#: is [3, 7], BarPlot will plot two bars, one between 3 and 10, and
#: one between 7 and 20
starting_value = Instance(ArrayDataSource)
#: Labels for the indices.
index_mapper = Instance(AbstractMapper)
#: Labels for the values.
value_mapper = Instance(AbstractMapper)
#: The orientation of the index axis.
orientation = Enum("h", "v")
#: The direction of the index axis with respect to the graphics context's
#: direction.
index_direction = Enum("normal", "flipped")
#: The direction of the value axis with respect to the graphics context's
#: direction.
value_direction = Enum("normal", "flipped")
#: Type of width used for bars:
#:
#: 'data'
#: The width is in the units along the x-dimension of the data space.
#: 'screen'
#: The width uses a fixed width of pixels.
bar_width_type = Enum("data", "screen")
#: Width of the bars, in data or screen space (determined by
#: **bar_width_type**).
bar_width = Float(10)
#: Round on rectangle dimensions? This is not strictly an "antialias", but
#: it has the same effect through exact pixel drawing.
antialias = Bool(True)
#: Width of the border of the bars.
line_width = Float(1.0)
#: Color of the border of the bars.
line_color = black_color_trait
#: Color to fill the bars.
fill_color = black_color_trait
#: The RGBA tuple for rendering lines. It is always a tuple of length 4.
#: It has the same RGB values as line_color_, and its alpha value is the
#: alpha value of self.line_color multiplied by self.alpha.
effective_line_color = Property(Tuple, depends_on=['line_color', 'alpha'])
#: The RGBA tuple for rendering the fill. It is always a tuple of length 4.
#: It has the same RGB values as fill_color_, and its alpha value is the
#: alpha value of self.fill_color multiplied by self.alpha.
effective_fill_color = Property(Tuple, depends_on=['fill_color', 'alpha'])
#: Overall alpha value of the image. Ranges from 0.0 for transparent to 1.0
alpha = Range(0.0, 1.0, 1.0)
#use_draw_order = False
# Convenience properties that correspond to either index_mapper or
# value_mapper, depending on the orientation of the plot.
#: Corresponds to either **index_mapper** or **value_mapper**, depending on
#: the orientation of the plot.
x_mapper = Property
#: Corresponds to either **value_mapper** or **index_mapper**, depending on
#: the orientation of the plot.
y_mapper = Property
#: Corresponds to either **index_direction** or **value_direction**,
#: depending on the orientation of the plot.
x_direction = Property
#: Corresponds to either **value_direction** or **index_direction**,
#: depending on the orientation of the plot
y_direction = Property
#: Convenience property for accessing the index data range.
index_range = Property
#: Convenience property for accessing the value data range.
value_range = Property
#------------------------------------------------------------------------
# Private traits
#------------------------------------------------------------------------
# Indicates whether or not the data cache is valid
_cache_valid = Bool(False)
# Cached data values from the datasources. If **bar_width_type** is "data",
# then this is an Nx4 array of (bar_left, bar_right, start, end) for a
# bar plot in normal orientation. If **bar_width_type** is "screen", then
# this is an Nx3 array of (bar_center, start, end).
_cached_data_pts = Any
#------------------------------------------------------------------------
# AbstractPlotRenderer interface
#------------------------------------------------------------------------
def __init__(self, *args, **kw):
# These Traits depend on others, so we'll defer setting them until
# after the HasTraits initialization has been completed.
later_list = ['index_direction', 'value_direction']
postponed = {}
for name in later_list:
if name in kw:
postponed[name] = kw.pop(name)
super(BarPlot, self).__init__(*args, **kw)
# Set any keyword Traits that were postponed.
self.trait_set(**postponed)
def map_screen(self, data_array):
""" Maps an array of data points into screen space and returns it as
an array.
Implements the AbstractPlotRenderer interface.
"""
# data_array is Nx2 array
if len(data_array) == 0:
return []
x_ary, y_ary = transpose(data_array)
sx = self.index_mapper.map_screen(x_ary)
sy = self.value_mapper.map_screen(y_ary)
if self.orientation == "h":
return transpose(array((sx,sy)))
else:
return transpose(array((sy,sx)))
def map_data(self, screen_pt):
""" Maps a screen space point into the "index" space of the plot.
Implements the AbstractPlotRenderer interface.
"""
if self.orientation == "h":
screen_coord = screen_pt[0]
else:
screen_coord = screen_pt[1]
return self.index_mapper.map_data(screen_coord)
def map_index(self, screen_pt, threshold=2.0, outside_returns_none=True,
index_only=False):
""" Maps a screen space point to an index into the plot's index array(s).
Implements the AbstractPlotRenderer interface.
"""
data_pt = self.map_data(screen_pt)
if ((data_pt < self.index_mapper.range.low) or \
(data_pt > self.index_mapper.range.high)) and outside_returns_none:
return None
index_data = self.index.get_data()
value_data = self.value.get_data()
if len(value_data) == 0 or len(index_data) == 0:
return None
try:
ndx = reverse_map_1d(index_data, data_pt, self.index.sort_order)
except IndexError:
return None
x = index_data[ndx]
y = value_data[ndx]
result = self.map_screen(array([[x,y]]))
if result is None:
return None
sx, sy = result[0]
if index_only and ((screen_pt[0]-sx) < threshold):
return ndx
elif ((screen_pt[0]-sx)**2 + (screen_pt[1]-sy)**2 < threshold*threshold):
return ndx
else:
return None
#------------------------------------------------------------------------
# PlotComponent interface
#------------------------------------------------------------------------
def _gather_points(self):
""" Collects data points that are within the range of the plot, and
caches them in **_cached_data_pts**.
"""
index, index_mask = self.index.get_data_mask()
value, value_mask = self.value.get_data_mask()
if not self.index or not self.value:
return
if len(index) == 0 or len(value) == 0 or len(index) != len(value):
logger.warning(
"Chaco: using empty dataset; index_len=%d, value_len=%d."
% (len(index), len(value)))
self._cached_data_pts = array([])
self._cache_valid = True
return
# TODO: Until we code up a better handling of value-based culling that
# takes into account starting_value and dataspace bar widths, just use
# the index culling for now.
# value_range_mask = self.value_mapper.range.mask_data(value)
# nan_mask = invert(isnan(index_mask)) & invert(isnan(value_mask))
# point_mask = index_mask & value_mask & nan_mask & \
# index_range_mask & value_range_mask
index_range_mask = self.index_mapper.range.mask_data(index)
nan_mask = invert(isnan(index_mask))
point_mask = index_mask & nan_mask & index_range_mask
if self.starting_value is None:
starting_values = zeros(len(index))
else:
starting_values = self.starting_value.get_data()
if self.bar_width_type == "data":
half_width = self.bar_width / 2.0
points = column_stack((index-half_width, index+half_width,
starting_values, value))
else:
points = | column_stack((index, starting_values, value)) | numpy.column_stack |
"""
2D Disc models
==============
Classes: Rosenfeld2d, General2d, Velocity, Intensity, Cube, Tools
"""
#TODO in show(): Perhaps use text labels on line profiles to distinguish profiles for more than 2 cubes.
#TODO in make_model(): Find a smart way to detect and pass only the coords needed by a prop attribute.
#TODO in run_mcmc(): Enable an arg to allow the user see the position of parameter walkers every 'arg' steps.
#TODO in General2d: Implement irregular grids (see e.g. meshio from nschloe on github) for the disc grid.
#TODO in General2d: Compute props in the interpolated grid (not in the original grid) to avoid interpolation of props and save time.
#TODO in General2d: Allow the lower surface to have independent intensity and line width parametrisations.
#TODO in General2d: Implement pressure support term
#TODO in make_model(): Allow for warped emitting surfaces, check notes for ideas as to how to solve for multiple intersections between l.o.s and emission surface.
#TODO in __main__(): show intro message when python -m disc2d
#TODO in run_mcmc(): use get() methods instead of allowing the user to use self obj attributes.
#TODO in make_model(): Allow R_disc to be a free parameter.
#TODO in make_model(): Enable 3D velocities too when subpixel algorithm is used
#TODO in v1.0: migrate to astropy units
from __future__ import print_function
from ..utils import constants as sfc
from ..utils import units as sfu
from astropy.convolution import Gaussian2DKernel, convolve
from scipy.interpolate import griddata, interp1d
from scipy.special import ellipk, ellipe
from scipy.optimize import curve_fit
from scipy.integrate import quad
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from matplotlib import ticker
import numpy as np
import matplotlib
import itertools
import warnings
import numbers
import pprint
import copy
import time
import sys
import os
from multiprocessing import Pool
os.environ["OMP_NUM_THREADS"] = "1"
try:
import termtables
found_termtables = True
except ImportError:
print ("\n*** For nicer outputs we recommend installing 'termtables' by typing in terminal: pip install termtables ***")
found_termtables = False
#warnings.filterwarnings("error")
__all__ = ['Cube', 'Tools', 'Intensity', 'Velocity', 'General2d', 'Rosenfeld2d']
path_file = os.path.dirname(os.path.realpath(__file__))+'/'
"""
matplotlib.rcParams['font.family'] = 'monospace'
matplotlib.rcParams['font.weight'] = 'normal'
matplotlib.rcParams['lines.linewidth'] = 1.5
matplotlib.rcParams['axes.linewidth'] = 3.0
matplotlib.rcParams['xtick.major.width']=1.6
matplotlib.rcParams['ytick.major.width']=1.6
matplotlib.rc('font', size=MEDIUM_SIZE) # controls default text sizes
matplotlib.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of axes title
matplotlib.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of x and y labels
matplotlib.rc('xtick', labelsize=MEDIUM_SIZE-2) # fontsize of y tick labels
matplotlib.rc('ytick', labelsize=MEDIUM_SIZE-2) # fontsize of x tick labels
matplotlib.rc('legend', fontsize=SMALL_SIZE-1) # legend fontsize
matplotlib.rc('figure', titlesize=BIGGER_SIZE) # fontsize of figure title
params = {'xtick.major.size': 6.5,
'ytick.major.size': 6.5
}
matplotlib.rcParams.update(params)
"""
SMALL_SIZE = 10
MEDIUM_SIZE = 15
BIGGER_SIZE = 22
hypot_func = lambda x,y: np.sqrt(x**2 + y**2) #Slightly faster than np.hypot<np.linalg.norm<scipydistance. Checked precision up to au**2 orders and seemed ok.
class InputError(Exception):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
def __str__(self):
return '%s --> %s'%(self.expression, self.message)
class Tools:
@staticmethod
def _rotate_sky_plane(x, y, ang):
xy = np.array([x,y])
cos_ang = np.cos(ang)
sin_ang = np.sin(ang)
rot = np.array([[cos_ang, -sin_ang],
[sin_ang, cos_ang]])
return np.dot(rot, xy)
@staticmethod
def _rotate_sky_plane3d(x, y, z, ang, axis='z'):
xyz = np.array([x,y,z])
cos_ang = np.cos(ang)
sin_ang = np.sin(ang)
if axis == 'x':
rot = np.array([[1, 0, 0],
[0, cos_ang, -sin_ang],
[0, sin_ang, cos_ang]])
if axis == 'y':
rot = np.array([[cos_ang, 0, -sin_ang],
[0, 1, 0],
[sin_ang, 0, cos_ang]])
if axis == 'z':
rot = np.array([[cos_ang, -sin_ang , 0],
[sin_ang, cos_ang, 0],
[0, 0, 1]])
return np.dot(rot, xyz)
@staticmethod
def _project_on_skyplane(x, y, z, cos_incl, sin_incl):
x_pro = x
y_pro = y * cos_incl - z * sin_incl
z_pro = y * sin_incl + z * cos_incl
return x_pro, y_pro, z_pro
@staticmethod
def get_sky_from_disc_coords(R, az, z, incl, PA):
xp = R*np.cos(az)
yp = R*np.sin(az)
zp = z
xp, yp, zp = Tools._project_on_skyplane(xp, yp, zp, np.cos(incl), np.sin(incl))
xp, yp = Tools._rotate_sky_plane(xp, yp, PA)
return xp, yp, zp
@staticmethod #should be a bound method, self.grid is constant except for z_upper, z_lower
def _compute_prop(grid, prop_funcs, prop_kwargs):
n_funcs = len(prop_funcs)
props = [{} for i in range(n_funcs)]
for side in ['upper', 'lower']:
x, y, z, R, phi, R_1d, z_1d = grid[side]
coord = {'x': x, 'y': y, 'z': z, 'phi': phi, 'R': R, 'R_1d': R_1d, 'z_1d': z_1d}
for i in range(n_funcs): props[i][side] = prop_funcs[i](coord, **prop_kwargs[i])
return props
@staticmethod
def _progress_bar(percent=0, width=50):
left = width * percent // 100
right = width - left
"""
print('\r[', '#' * left, ' ' * right, ']',
f' {percent:.0f}%',
sep='', end='', flush=True)
"""
print('\r[', '#' * left, ' ' * right, ']', ' %.0f%%'%percent, sep='', end='') #compatible with python2 docs
sys.stdout.flush()
@staticmethod
def _break_line(init='', border='*', middle='=', end='\n', width=100):
print('\r', init, border, middle * width, border, sep='', end=end)
@staticmethod
def _print_logo(filename=path_file+'logo.txt'):
logo = open(filename, 'r')
print(logo.read())
logo.close()
@staticmethod
def _get_beam_from(beam, dpix=None, distance=None, frac_pixels=1.0):
"""
beam must be str pointing to fits file to extract beam from header or radio_beam Beam object.
If radio_beam Beam instance is provided, pixel size (in SI units) will be extracted from grid obj. Distance (in pc) must be provided.
#frac_pixels: number of averaged pixels on the data (useful to reduce computing time)
"""
from radio_beam import Beam
from astropy.io import fits
from astropy import units as u
sigma2fwhm = np.sqrt(8* | np.log(2) | numpy.log |
''' All DUT alignment functions in space and time are listed here plus additional alignment check functions'''
from __future__ import division
import logging
import sys
import os
from collections import Iterable
import math
import tables as tb
import numpy as np
import scipy
from matplotlib.backends.backend_pdf import PdfPages
from tqdm import tqdm
from beam_telescope_analysis.telescope.telescope import Telescope
from beam_telescope_analysis.tools import analysis_utils
from beam_telescope_analysis.tools import plot_utils
from beam_telescope_analysis.tools import geometry_utils
from beam_telescope_analysis.tools import data_selection
from beam_telescope_analysis.track_analysis import find_tracks, fit_tracks, line_fit_3d, _fit_tracks_kalman_loop
from beam_telescope_analysis.result_analysis import calculate_residuals, histogram_track_angle, get_angles
from beam_telescope_analysis.tools.storage_utils import save_arguments
default_alignment_parameters = ["translation_x", "translation_y", "translation_z", "rotation_alpha", "rotation_beta", "rotation_gamma"]
default_cluster_shapes = [1, 3, 5, 13, 14, 7, 11, 15]
kfa_alignment_descr = np.dtype([('translation_x', np.float64),
('translation_y', np.float64),
('translation_z', np.float64),
('rotation_alpha', np.float64),
('rotation_beta', np.float64),
('rotation_gamma', np.float64),
('translation_x_err', np.float64),
('translation_y_err', np.float64),
('translation_z_err', np.float64),
('rotation_alpha_err', np.float64),
('rotation_beta_err', np.float64),
('rotation_gamma_err', np.float64),
('translation_x_delta', np.float64),
('translation_y_delta', np.float64),
('translation_z_delta', np.float64),
('rotation_alpha_delta', np.float64),
('rotation_beta_delta', np.float64),
('rotation_gamma_delta', np.float64),
('annealing_factor', np.float64)])
@save_arguments
def apply_alignment(telescope_configuration, input_file, output_file=None, local_to_global=True, align_to_beam=False, chunk_size=1000000):
'''Convert local to global coordinates and vice versa.
Note:
-----
This function cannot be easily made faster with multiprocessing since the computation function (apply_alignment_to_chunk) does not
contribute significantly to the runtime (< 20 %), but the copy overhead for not shared memory needed for multipgrocessing is higher.
Also the hard drive IO can be limiting (30 Mb/s read, 20 Mb/s write to the same disk)
Parameters
----------
telescope_configuration : string
Filename of the telescope configuration file.
input_file : string
Filename of the input file (merged or tracks file).
output_file : string
Filename of the output file with the converted coordinates (merged or tracks file).
local_to_global : bool
If True, convert from local to global coordinates.
align_to_beam : bool
If True, use telescope alignment to align to the beam (beam along z axis).
chunk_size : uint
Chunk size of the data when reading from file.
Returns
-------
output_file : string
Filename of the output file with new coordinates.
'''
telescope = Telescope(telescope_configuration)
n_duts = len(telescope)
logging.info('=== Apply alignment to %d DUTs ===', n_duts)
if output_file is None:
output_file = os.path.splitext(input_file)[0] + ('_global_coordinates.h5' if local_to_global else '_local_coordinates.h5')
def convert_data(dut, dut_index, node, conv, data):
if isinstance(dut, Telescope):
data['x_dut_%d' % dut_index], data['y_dut_%d' % dut_index], data['z_dut_%d' % dut_index] = conv(
x=data['x_dut_%d' % dut_index],
y=data['y_dut_%d' % dut_index],
z=data['z_dut_%d' % dut_index],
translation_x=dut.translation_x,
translation_y=dut.translation_y,
translation_z=dut.translation_z,
rotation_alpha=dut.rotation_alpha,
rotation_beta=dut.rotation_beta,
rotation_gamma=dut.rotation_gamma)
else:
data['x_dut_%d' % dut_index], data['y_dut_%d' % dut_index], data['z_dut_%d' % dut_index] = conv(
x=data['x_dut_%d' % dut_index],
y=data['y_dut_%d' % dut_index],
z=data['z_dut_%d' % dut_index])
if "Tracks" in node.name:
format_strings = ['offset_{dimension}_dut_{dut_index}']
if "DUT%d" % dut_index in node.name:
format_strings.extend(['offset_{dimension}'])
for format_string in format_strings:
if format_string.format(dimension='x', dut_index=dut_index) in node.dtype.names:
data[format_string.format(dimension='x', dut_index=dut_index)], data[format_string.format(dimension='y', dut_index=dut_index)], data[format_string.format(dimension='z', dut_index=dut_index)] = conv(
x=data[format_string.format(dimension='x', dut_index=dut_index)],
y=data[format_string.format(dimension='y', dut_index=dut_index)],
z=data[format_string.format(dimension='z', dut_index=dut_index)],
translation_x=dut.translation_x,
translation_y=dut.translation_y,
translation_z=dut.translation_z,
rotation_alpha=dut.rotation_alpha,
rotation_beta=dut.rotation_beta,
rotation_gamma=dut.rotation_gamma)
format_strings = ['slope_{dimension}_dut_{dut_index}']
if "DUT%d" % dut_index in node.name:
format_strings.extend(['slope_{dimension}'])
for format_string in format_strings:
if format_string.format(dimension='x', dut_index=dut_index) in node.dtype.names:
data[format_string.format(dimension='x', dut_index=dut_index)], data[format_string.format(dimension='y', dut_index=dut_index)], data[format_string.format(dimension='z', dut_index=dut_index)] = conv(
x=data[format_string.format(dimension='x', dut_index=dut_index)],
y=data[format_string.format(dimension='y', dut_index=dut_index)],
z=data[format_string.format(dimension='z', dut_index=dut_index)],
# no translation for the slopes
translation_x=0.0,
translation_y=0.0,
translation_z=0.0,
rotation_alpha=dut.rotation_alpha,
rotation_beta=dut.rotation_beta,
rotation_gamma=dut.rotation_gamma)
format_strings = ['{dimension}_err_dut_{dut_index}']
for format_string in format_strings:
if format_string.format(dimension='x', dut_index=dut_index) in node.dtype.names:
data[format_string.format(dimension='x', dut_index=dut_index)], data[format_string.format(dimension='y', dut_index=dut_index)], data[format_string.format(dimension='z', dut_index=dut_index)] = np.abs(conv(
x=data[format_string.format(dimension='x', dut_index=dut_index)],
y=data[format_string.format(dimension='y', dut_index=dut_index)],
z=data[format_string.format(dimension='z', dut_index=dut_index)],
# no translation for the errors
translation_x=0.0,
translation_y=0.0,
translation_z=0.0,
rotation_alpha=dut.rotation_alpha,
rotation_beta=dut.rotation_beta,
rotation_gamma=dut.rotation_gamma))
# Looper over the hits of all DUTs of all hit tables in chunks and apply the alignment
with tb.open_file(input_file, mode='r') as in_file_h5:
with tb.open_file(output_file, mode='w') as out_file_h5:
for node in in_file_h5.root: # Loop over potential hit tables in data file
logging.info('== Apply alignment to node %s ==', node.name)
hits_aligned_table = out_file_h5.create_table(
where=out_file_h5.root,
name=node.name,
description=node.dtype,
title=node.title,
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
pbar = tqdm(total=node.shape[0], ncols=80)
for data_chunk, index in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size): # Loop over the hits
for dut_index, dut in enumerate(telescope): # Loop over the DUTs
if local_to_global:
conv = dut.local_to_global_position
else:
conv = dut.global_to_local_position
if align_to_beam and not local_to_global:
convert_data(dut=telescope, dut_index=dut_index, node=node, conv=conv, data=data_chunk)
convert_data(dut=dut, dut_index=dut_index, node=node, conv=conv, data=data_chunk)
if align_to_beam and local_to_global:
convert_data(dut=telescope, dut_index=dut_index, node=node, conv=conv, data=data_chunk)
hits_aligned_table.append(data_chunk)
pbar.update(data_chunk.shape[0])
pbar.close()
return output_file
def prealign(telescope_configuration, input_correlation_file, output_telescope_configuration=None, select_duts=None, select_reference_dut=0, reduce_background=True, use_location=False, plot=True):
'''Deduce a pre-alignment from the correlations, by fitting the correlations with a straight line (gives offset, slope, but no tild angles).
The user can define cuts on the fit error and straight line offset in an interactive way.
Parameters
----------
telescope_configuration : string
Filename of the telescope configuration file.
input_correlation_file : string
Filename of the input correlation file.
output_telescope_configuration : string
Filename of the output telescope configuration file.
select_duts : iterable
List of duts for which the prealignment is done. If None, prealignment is done for all duts.
select_reference_dut : uint
DUT index of the reference plane. Default is DUT 0.
reduce_background : bool
If True, use correlation histograms with reduced background (by applying SVD method to the correlation matrix).
plot : bool
If True, create additional output plots.
'''
telescope = Telescope(telescope_configuration)
n_duts = len(telescope)
logging.info('=== Pre-alignment of %d DUTs ===' % n_duts)
if output_telescope_configuration is None:
output_telescope_configuration = os.path.splitext(telescope_configuration)[0] + '_prealigned.yaml'
elif output_telescope_configuration == telescope_configuration:
raise ValueError('Output telescope configuration file must be different from input telescope configuration file.')
# remove reference DUT from list of all DUTs
if select_duts is None:
select_duts = list(set(range(n_duts)) - set([select_reference_dut]))
else:
select_duts = list(set(select_duts) - set([select_reference_dut]))
if plot is True:
output_pdf = PdfPages(os.path.splitext(input_correlation_file)[0] + '_prealigned.pdf', keep_empty=False)
else:
output_pdf = None
with tb.open_file(input_correlation_file, mode="r") as in_file_h5:
# loop over DUTs for pre-alignment
for actual_dut_index in select_duts:
actual_dut = telescope[actual_dut_index]
logging.info("== Pre-aligning %s ==" % actual_dut.name)
x_global_pixel, y_global_pixel, z_global_pixel = [], [], []
for column in range(1, actual_dut.n_columns + 1):
global_positions = actual_dut.index_to_global_position(
column=[column] * actual_dut.n_rows,
row=range(1, actual_dut.n_rows + 1))
x_global_pixel = np.hstack([x_global_pixel, global_positions[0]])
y_global_pixel = np.hstack([y_global_pixel, global_positions[1]])
z_global_pixel = np.hstack([z_global_pixel, global_positions[2]])
# calculate rotation matrix for later rotation corrections
rotation_alpha = actual_dut.rotation_alpha
rotation_beta = actual_dut.rotation_beta
rotation_gamma = actual_dut.rotation_gamma
R = geometry_utils.rotation_matrix(
alpha=rotation_alpha,
beta=rotation_beta,
gamma=rotation_gamma)
select = None
# loop over x- and y-axis
for x_direction in [True, False]:
if reduce_background:
node = in_file_h5.get_node(in_file_h5.root, 'Correlation_%s_%d_%d_reduced_background' % ('x' if x_direction else 'y', select_reference_dut, actual_dut_index))
else:
node = in_file_h5.get_node(in_file_h5.root, 'Correlation_%s_%d_%d' % ('x' if x_direction else 'y', select_reference_dut, actual_dut_index))
dut_name = actual_dut.name
ref_name = telescope[select_reference_dut].name
pixel_size = actual_dut.column_size if x_direction else actual_dut.row_size
logging.info('Pre-aligning data from %s', node.name)
bin_size = node.attrs.resolution
ref_hist_extent = node.attrs.ref_hist_extent
ref_hist_size = (ref_hist_extent[1] - ref_hist_extent[0])
dut_hist_extent = node.attrs.dut_hist_extent
dut_hist_size = (dut_hist_extent[1] - dut_hist_extent[0])
# retrieve data
data = node[:]
# Calculate the positions on the x axis
dut_pos = np.linspace(start=dut_hist_extent[0] + bin_size / 2.0, stop=dut_hist_extent[1] - bin_size / 2.0, num=data.shape[0], endpoint=True)
# calculate maximum per column
max_select = np.argmax(data, axis=1)
hough_data = np.zeros_like(data)
hough_data[np.arange(data.shape[0]), max_select] = 1
# transpose for correct angle
hough_data = hough_data.T
accumulator, theta, rho, theta_edges, rho_edges = analysis_utils.hough_transform(hough_data, theta_res=0.1, rho_res=1.0, return_edges=True)
def largest_indices(ary, n):
''' Returns the n largest indices from a numpy array.
https://stackoverflow.com/questions/6910641/how-to-get-indices-of-n-maximum-values-in-a-numpy-array
'''
flat = ary.flatten()
indices = np.argpartition(flat, -n)[-n:]
indices = indices[np.argsort(-flat[indices])]
return np.unravel_index(indices, ary.shape)
# finding correlation
# check for non-zero values to improve speed
count_nonzero = np.count_nonzero(accumulator)
indices = np.vstack(largest_indices(accumulator, count_nonzero)).T
for index in indices:
rho_idx, th_idx = index[0], index[1]
rho_val, theta_val = rho[rho_idx], theta[th_idx]
slope_idx, offset_idx = -np.cos(theta_val) / np.sin(theta_val), rho_val / np.sin(theta_val)
slope = slope_idx
offset = offset_idx * bin_size + ref_hist_extent[0] + 0.5 * bin_size
# check for proper slope
if np.isclose(slope, 1.0, rtol=0.0, atol=0.1) or np.isclose(slope, -1.0, rtol=0.0, atol=0.1):
break
else:
raise RuntimeError('Cannot find %s correlation between %s and %s' % ("X" if x_direction else "Y", telescope[select_reference_dut].name, actual_dut.name))
# offset in the center of the pixel matrix
offset_center = offset + slope * (0.5 * dut_hist_size - 0.5 * bin_size)
# calculate offset for local frame
offset_plot = offset - slope * dut_pos[0]
# find loactions where the max. correlation is close to expected value
x_list = find_inliers(
x=dut_pos[max_select != 0],
y=(max_select[max_select != 0] * bin_size - ref_hist_size / 2.0 + bin_size / 2.0),
m=slope,
c=offset_plot,
threshold=pixel_size * np.sqrt(12) * 2)
# 1-dimensional clustering of calculated locations
kernel = scipy.stats.gaussian_kde(x_list)
densities = kernel(dut_pos)
max_density = np.max(densities)
# calculate indices where value is close to max. density
indices = np.where(densities > max_density * 0.5)
# get locations from indices
x_list = dut_pos[indices]
# calculate range where correlation exists
dut_pos_limit = [np.min(x_list), np.max(x_list)]
plot_utils.plot_hough(
dut_pos=dut_pos,
data=hough_data,
accumulator=accumulator,
offset=offset_plot,
slope=slope,
dut_pos_limit=dut_pos_limit,
theta_edges=theta_edges,
rho_edges=rho_edges,
ref_hist_extent=ref_hist_extent,
dut_hist_extent=dut_hist_extent,
ref_name=ref_name,
dut_name=dut_name,
x_direction=x_direction,
reduce_background=reduce_background,
output_pdf=output_pdf)
if select is None:
select = np.ones_like(x_global_pixel, dtype=np.bool)
if x_direction:
select &= (x_global_pixel >= dut_pos_limit[0]) & (x_global_pixel <= dut_pos_limit[1])
if slope < 0.0:
R = np.linalg.multi_dot([geometry_utils.rotation_matrix_y(beta=np.pi), R])
translation_x = offset_center
else:
select &= (y_global_pixel >= dut_pos_limit[0]) & (y_global_pixel <= dut_pos_limit[1])
if slope < 0.0:
R = np.linalg.multi_dot([geometry_utils.rotation_matrix_x(alpha=np.pi), R])
translation_y = offset_center
# Setting new parameters
# Only use new limits if they are narrower
# Convert from global to local coordinates
local_coordinates = actual_dut.global_to_local_position(
x=x_global_pixel[select],
y=y_global_pixel[select],
z=z_global_pixel[select])
if actual_dut.x_limit is None:
actual_dut.x_limit = (min(local_coordinates[0]), max(local_coordinates[0]))
else:
actual_dut.x_limit = (max((min(local_coordinates[0]), actual_dut.x_limit[0])), min((max(local_coordinates[0]), actual_dut.x_limit[1])))
if actual_dut.y_limit is None:
actual_dut.y_limit = (min(local_coordinates[1]), max(local_coordinates[1]))
else:
actual_dut.y_limit = (max((min(local_coordinates[1]), actual_dut.y_limit[0])), min((max(local_coordinates[1]), actual_dut.y_limit[1])))
# Setting geometry
actual_dut.translation_x = translation_x
actual_dut.translation_y = translation_y
rotation_alpha, rotation_beta, rotation_gamma = geometry_utils.euler_angles(R=R)
actual_dut.rotation_alpha = rotation_alpha
actual_dut.rotation_beta = rotation_beta
actual_dut.rotation_gamma = rotation_gamma
telescope.save_configuration(configuration_file=output_telescope_configuration)
if output_pdf is not None:
output_pdf.close()
return output_telescope_configuration
def find_inliers(x, y, m, c, threshold=1.0):
''' Find inliers.
Parameters
----------
x : list
X coordinates.
y : list
Y coordinates.
threshold : float
Maximum distance of the data points for inlier selection.
Returns
-------
x_list : array
X coordianates of inliers.
'''
# calculate distance to reference hit
dist = np.abs(m * x + c - y)
sel = dist < threshold
return x[sel]
def find_line_model(points):
""" find a line model for the given points
:param points selected points for model fitting
:return line model
"""
# [WARNING] vertical and horizontal lines should be treated differently
# here we just add some noise to avoid division by zero
# find a line model for these points
m = (points[1, 1] - points[0, 1]) / (points[1, 0] - points[0, 0] + sys.float_info.epsilon) # slope (gradient) of the line
c = points[1, 1] - m * points[1, 0] # y-intercept of the line
return m, c
def find_intercept_point(m, c, x0, y0):
""" find an intercept point of the line model with
a normal from point (x0,y0) to it
:param m slope of the line model
:param c y-intercept of the line model
:param x0 point's x coordinate
:param y0 point's y coordinate
:return intercept point
"""
# intersection point with the model
x = (x0 + m * y0 - m * c) / (1 + m**2)
y = (m * x0 + (m**2) * y0 - (m**2) * c) / (1 + m**2) + c
return x, y
def find_ransac(x, y, iterations=100, threshold=1.0, ratio=0.5):
''' RANSAC implementation
Note
----
Implementation from <NAME>,
https://salzis.wordpress.com/2014/06/10/robust-linear-model-estimation-using-ransac-python-implementation/
Parameters
----------
x : list
X coordinates.
y : list
Y coordinates.
iterations : int
Maximum number of iterations.
threshold : float
Maximum distance of the data points for inlier selection.
ratio : float
Break condition for inliers.
Returns
-------
model_ratio : float
Ration of inliers to outliers.
model_m : float
Slope.
model_c : float
Offset.
model_x_list : array
X coordianates of inliers.
model_y_list : array
Y coordianates of inliers.
'''
data = np.column_stack((x, y))
n_samples = x.shape[0]
model_ratio = 0.0
model_m = 0.0
model_c = 0.0
# perform RANSAC iterations
for it in range(iterations):
all_indices = np.arange(n_samples)
np.random.shuffle(all_indices)
indices_1 = all_indices[:2] # pick up two random points
indices_2 = all_indices[2:]
maybe_points = data[indices_1, :]
test_points = data[indices_2, :]
# find a line model for these points
m, c = find_line_model(maybe_points)
x_list = []
y_list = []
num = 0
# find orthogonal lines to the model for all testing points
for ind in range(test_points.shape[0]):
x0 = test_points[ind, 0]
y0 = test_points[ind, 1]
# find an intercept point of the model with a normal from point (x0,y0)
x1, y1 = find_intercept_point(m, c, x0, y0)
# distance from point to the model
dist = math.sqrt((x1 - x0)**2 + (y1 - y0)**2)
# check whether it's an inlier or not
if dist < threshold:
x_list.append(x0)
y_list.append(y0)
num += 1
# in case a new model is better - cache it
if num / float(n_samples) > model_ratio:
model_ratio = num / float(n_samples)
model_m = m
model_c = c
model_x_list = np.array(x_list)
model_y_list = np.array(y_list)
# we are done in case we have enough inliers
if num > n_samples * ratio:
break
return model_ratio, model_m, model_c, model_x_list, model_y_list
def align(telescope_configuration, input_merged_file, output_telescope_configuration=None, select_duts=None, alignment_parameters=None, select_telescope_duts=None, select_extrapolation_duts=None, select_fit_duts=None, select_hit_duts=None, max_iterations=3, max_events=None, fit_method='fit', beam_energy=None, particle_mass=None, scattering_planes=None, track_chi2=10.0, cluster_shapes=None, quality_distances=(250.0, 250.0), isolation_distances=(500.0, 500.0), use_limits=True, plot=True, chunk_size=1000000):
''' This function does an alignment of the DUTs and sets translation and rotation values for all DUTs.
The reference DUT defines the global coordinate system position at 0, 0, 0 and should be well in the beam and not heavily rotated.
To solve the chicken-and-egg problem that a good dut alignment needs hits belonging to one track, but good track finding needs a good dut alignment this
function work only on already prealigned hits belonging to one track. Thus this function can be called only after track finding.
These steps are done
1. Take the found tracks and revert the pre-alignment
2. Take the track hits belonging to one track and fit tracks for all DUTs
3. Calculate the residuals for each DUT
4. Deduce rotations from the residuals and apply them to the hits
5. Deduce the translation of each plane
6. Store and apply the new alignment
repeat step 3 - 6 until the total residual does not decrease (RMS_total = sqrt(RMS_x_1^2 + RMS_y_1^2 + RMS_x_2^2 + RMS_y_2^2 + ...))
Parameters
----------
telescope_configuration : string
Filename of the telescope configuration file.
input_merged_file : string
Filename of the input merged file.
output_telescope_configuration : string
Filename of the output telescope configuration file.
select_duts : iterable or iterable of iterable
The combination of duts that are algined at once. One should always align the high resolution planes first.
E.g. for a telesope (first and last 3 planes) with 2 devices in the center (3, 4):
select_duts=[[0, 1, 2, 5, 6, 7], # align the telescope planes first
[4], # align first DUT
[3]] # align second DUT
alignment_parameters : list of lists of strings
The list of alignment parameters for each align_dut. Valid parameters:
- translation_x: horizontal axis
- translation_y: vertical axis
- translation_z: beam axis
- rotation_alpha: rotation around x-axis
- rotation_beta: rotation around y-axis
- rotation_gamma: rotation around z-axis (beam axis)
If None, all paramters will be selected.
select_telescope_duts : iterable
The given DUTs will be used to align the telescope along the z-axis.
Usually the coordinates of these DUTs are well specified.
At least 2 DUTs need to be specified. The z-position of the selected DUTs will not be changed by default.
select_extrapolation_duts : list
The given DUTs will be used for track extrapolation for improving track finding efficiency.
In some rare cases, removing DUTs with a coarse resolution might improve track finding efficiency.
If None, select all DUTs.
If list is empty or has a single entry, disable extrapolation (at least 2 DUTs are required for extrapolation to work).
select_fit_duts : iterable or iterable of iterable
Defines for each select_duts combination wich devices to use in the track fit.
E.g. To use only the telescope planes (first and last 3 planes) but not the 2 center devices
select_fit_duts=[0, 1, 2, 5, 6, 7]
select_hit_duts : iterable or iterable of iterable
Defines for each select_duts combination wich devices must have a hit to use the track for fitting. The hit
does not have to be used in the fit itself! This is useful for time reference planes.
E.g. To use telescope planes (first and last 3 planes) + time reference plane (3)
select_hit_duts = [0, 1, 2, 4, 5, 6, 7]
max_iterations : uint
Maximum number of iterations of calc residuals, apply rotation refit loop until constant result is expected.
Usually the procedure converges rather fast (< 5 iterations).
Non-telescope DUTs usually require 2 itearations.
max_events: uint
Radomly select max_events for alignment. If None, use all events, which might slow down the alignment.
fit_method : string
Available methods are 'kalman', which uses a Kalman Filter for track calculation, and 'fit', which uses a simple
straight line fit for track calculation.
beam_energy : float
Energy of the beam in MeV, e.g., 2500.0 MeV for ELSA beam. Only used for the Kalman Filter.
particle_mass : float
Mass of the particle in MeV, e.g., 0.511 MeV for electrons. Only used for the Kalman Filter.
scattering_planes : list or dict
Specifies additional scattering planes in case of DUTs which are not used or additional material in the way of the tracks.
The list must contain dictionaries containing the following keys:
material_budget: material budget of the scattering plane
translation_x/translation_y/translation_z: x/y/z position of the plane (in um)
rotation_alpha/rotation_beta/rotation_gamma: alpha/beta/gamma angle of scattering plane (in radians)
The material budget is defined as the thickness devided by the radiation length.
If scattering_planes is None, no scattering plane will be added.
track_chi2 : float or list
Setting the limit on the track chi^2. If None or 0.0, no cut will be applied.
A smaller value reduces the number of tracks for the alignment.
A large value increases the number of tracks but at the cost of alignment efficiency bacause of potentially bad tracks.
A good start value is 5.0 to 10.0 for high energy beams and 15.0 to 50.0 for low energy beams.
cluster_shapes : iterable or iterable of iterables
List of cluster shapes (unsigned integer) for each DUT. Only the selected cluster shapes will be used for the alignment.
Cluster shapes have impact on precision of the alignment. Larger clusters and certain cluster shapes can have a significant uncertainty for the hit position.
If None, use default cluster shapes [1, 3, 5, 13, 14, 7, 11, 15], i.e. 1x1, 2x1, 1x2, 3-pixel cluster, 4-pixel cluster. If empty list, all cluster sizes will be used.
The cluster shape can be calculated with the help of beam_telescope_analysis.tools.analysis_utils.calculate_cluster_array/calculate_cluster_shape.
quality_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the quality flag. The selected track and corresponding hit
must have a smaller distance to have the quality flag to be set to 1.
The purpose of quality_distances is to find good tracks for the alignment.
A good start value is 1-2x the pixel pitch for large pixels and high-energy beams and 5-10x the pixel pitch for small pixels and low-energy beams.
A too small value will remove good tracks, a too large value will allow bad tracks to contribute to the alignment.
If None, set distance to infinite.
isolation_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the isolated track/hit flag. Any other occurence of tracks or hits from the same event
within this distance will prevent the flag from beeing set.
The purpose of isolation_distances is to find good tracks for the alignment. Hits and tracks which are too close to each other should be removed.
The value given by isolation_distances should be larger than the quality_distances value to be effective,
A too small value will remove almost no tracks, a too large value will remove good tracks.
If None, set distance to 0.
isolation_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the quality flag. Any other occurence of tracks or hits from the same event
within this distance will reject the quality flag.
The purpose of isolation_distances is to remove tracks from alignment that could be potentially fake tracks (noisy detector / high beam density).
If None, use infinite distance.
use_limits : bool
If True, use column and row limits from pre-alignment for selecting the data.
plot : bool
If True, create additional output plots.
chunk_size : uint
Chunk size of the data when reading from file.
'''
telescope = Telescope(telescope_configuration)
n_duts = len(telescope)
logging.info('=== Alignment of %d DUTs ===' % len(set(np.unique(np.hstack(np.array(select_duts))).tolist())))
# Create list with combinations of DUTs to align
if select_duts is None: # If None: align all DUTs
select_duts = list(range(n_duts))
# Check for value errors
if not isinstance(select_duts, Iterable):
raise ValueError("Parameter select_duts is not an iterable.")
elif not select_duts: # empty iterable
raise ValueError("Parameter select_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), select_duts)):
select_duts = [select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_duts)):
raise ValueError("Not all items in parameter select_duts are iterable.")
# Finally check length of all iterables in iterable
for dut in select_duts:
if not dut: # check the length of the items
raise ValueError("Item in parameter select_duts has length 0.")
# Check if some DUTs will not be aligned
non_select_duts = set(range(n_duts)) - set(np.unique(np.hstack(np.array(select_duts))).tolist())
if non_select_duts:
logging.info('These DUTs will not be aligned: %s' % ", ".join(telescope[dut_index].name for dut_index in non_select_duts))
# Create list
if alignment_parameters is None:
alignment_parameters = [[None] * len(duts) for duts in select_duts]
# Check for value errors
if not isinstance(alignment_parameters, Iterable):
raise ValueError("Parameter alignment_parameters is not an iterable.")
elif not alignment_parameters: # empty iterable
raise ValueError("Parameter alignment_parameters has no items.")
# Finally check length of all arrays
if len(alignment_parameters) != len(select_duts): # empty iterable
raise ValueError("Parameter alignment_parameters has the wrong length.")
for index, alignment_parameter in enumerate(alignment_parameters):
if alignment_parameter is None:
alignment_parameters[index] = [None] * len(select_duts[index])
if len(alignment_parameters[index]) != len(select_duts[index]): # check the length of the items
raise ValueError("Item in parameter alignment_parameter has the wrong length.")
# Create track, hit selection
if select_hit_duts is None: # If None: use all DUTs
select_hit_duts = []
# copy each item
for duts in select_duts:
select_hit_duts.append(duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(select_hit_duts, Iterable):
raise ValueError("Parameter select_hit_duts is not an iterable.")
elif not select_hit_duts: # empty iterable
raise ValueError("Parameter select_hit_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), select_hit_duts)):
select_hit_duts = [select_hit_duts[:] for _ in select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_hit_duts)):
raise ValueError("Not all items in parameter select_hit_duts are iterable.")
# Finally check length of all arrays
if len(select_hit_duts) != len(select_duts): # empty iterable
raise ValueError("Parameter select_hit_duts has the wrong length.")
for hit_dut in select_hit_duts:
if len(hit_dut) < 2: # check the length of the items
raise ValueError("Item in parameter select_hit_duts has length < 2.")
# Create track, hit selection
if select_fit_duts is None: # If None: use all DUTs
select_fit_duts = []
# copy each item from select_hit_duts
for hit_duts in select_hit_duts:
select_fit_duts.append(hit_duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(select_fit_duts, Iterable):
raise ValueError("Parameter select_fit_duts is not an iterable.")
elif not select_fit_duts: # empty iterable
raise ValueError("Parameter select_fit_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), select_fit_duts)):
select_fit_duts = [select_fit_duts[:] for _ in select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_fit_duts)):
raise ValueError("Not all items in parameter select_fit_duts are iterable.")
# Finally check length of all arrays
if len(select_fit_duts) != len(select_duts): # empty iterable
raise ValueError("Parameter select_fit_duts has the wrong length.")
for index, fit_dut in enumerate(select_fit_duts):
if len(fit_dut) < 2: # check the length of the items
raise ValueError("Item in parameter select_fit_duts has length < 2.")
if set(fit_dut) - set(select_hit_duts[index]): # fit DUTs are required to have a hit
raise ValueError("DUT in select_fit_duts is not in select_hit_duts.")
# Create chi2 array
if not isinstance(track_chi2, Iterable):
track_chi2 = [track_chi2] * len(select_duts)
# Finally check length
if len(track_chi2) != len(select_duts):
raise ValueError("Parameter track_chi2 has the wrong length.")
# expand dimensions
# Check iterable and length for each item
for index, chi2 in enumerate(track_chi2):
# Check if non-iterable
if not isinstance(chi2, Iterable):
track_chi2[index] = [chi2] * len(select_duts[index])
# again check for consistency
for index, chi2 in enumerate(track_chi2):
# Check iterable and length
if not isinstance(chi2, Iterable):
raise ValueError("Item in parameter track_chi2 is not an iterable.")
if len(chi2) != len(select_duts[index]): # empty iterable
raise ValueError("Item in parameter track_chi2 has the wrong length.")
# Create cluster shape selection
if cluster_shapes is None: # If None: set default value for all DUTs
cluster_shapes = [cluster_shapes] * len(select_duts)
# Check iterable and length
if not isinstance(cluster_shapes, Iterable):
raise ValueError("Parameter cluster_shapes is not an iterable.")
# elif not cluster_shapes: # empty iterable
# raise ValueError("Parameter cluster_shapes has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable) and val is not None, cluster_shapes)):
cluster_shapes = [cluster_shapes[:] for _ in select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable) or val is None, cluster_shapes)):
raise ValueError("Not all items in parameter cluster_shapes are iterable or None.")
# Finally check length of all arrays
if len(cluster_shapes) != len(select_duts): # empty iterable
raise ValueError("Parameter cluster_shapes has the wrong length.")
# expand dimensions
# Check iterable and length for each item
for index, shapes in enumerate(cluster_shapes):
# Check if only non-iterable in iterable
if shapes is None:
cluster_shapes[index] = [shapes] * len(select_duts[index])
elif all(map(lambda val: not isinstance(val, Iterable) and val is not None, shapes)):
cluster_shapes[index] = [shapes[:] for _ in select_duts[index]]
# again check for consistency
for index, shapes in enumerate(cluster_shapes):
# Check iterable and length
if not isinstance(shapes, Iterable):
raise ValueError("Item in parameter cluster_shapes is not an iterable.")
elif not shapes: # empty iterable
raise ValueError("Item in parameter cluster_shapes has no items.")
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable) or val is None, shapes)):
raise ValueError("Not all items of item in cluster_shapes are iterable or None.")
if len(shapes) != len(select_duts[index]): # empty iterable
raise ValueError("Item in parameter cluster_shapes has the wrong length.")
# Create quality distance
if isinstance(quality_distances, tuple) or quality_distances is None:
quality_distances = [quality_distances] * n_duts
# Check iterable and length
if not isinstance(quality_distances, Iterable):
raise ValueError("Parameter quality_distances is not an iterable.")
elif not quality_distances: # empty iterable
raise ValueError("Parameter quality_distances has no items.")
# Finally check length of all arrays
if len(quality_distances) != n_duts: # empty iterable
raise ValueError("Parameter quality_distances has the wrong length.")
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable) or val is None, quality_distances)):
raise ValueError("Not all items in parameter quality_distances are iterable or None.")
# Finally check length of all arrays
for distance in quality_distances:
if distance is not None and len(distance) != 2: # check the length of the items
raise ValueError("Item in parameter quality_distances has length != 2.")
# Create reject quality distance
if isinstance(isolation_distances, tuple) or isolation_distances is None:
isolation_distances = [isolation_distances] * n_duts
# Check iterable and length
if not isinstance(isolation_distances, Iterable):
raise ValueError("Parameter isolation_distances is no iterable.")
elif not isolation_distances: # empty iterable
raise ValueError("Parameter isolation_distances has no items.")
# Finally check length of all arrays
if len(isolation_distances) != n_duts: # empty iterable
raise ValueError("Parameter isolation_distances has the wrong length.")
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable) or val is None, isolation_distances)):
raise ValueError("Not all items in Parameter isolation_distances are iterable or None.")
# Finally check length of all arrays
for distance in isolation_distances:
if distance is not None and len(distance) != 2: # check the length of the items
raise ValueError("Item in parameter isolation_distances has length != 2.")
if not isinstance(max_iterations, Iterable):
max_iterations = [max_iterations] * len(select_duts)
# Finally check length of all arrays
if len(max_iterations) != len(select_duts): # empty iterable
raise ValueError("Parameter max_iterations has the wrong length.")
if not isinstance(max_events, Iterable):
max_events = [max_events] * len(select_duts)
# Finally check length
if len(max_events) != len(select_duts):
raise ValueError("Parameter max_events has the wrong length.")
if output_telescope_configuration is None:
if 'prealigned' in telescope_configuration:
output_telescope_configuration = telescope_configuration.replace('prealigned', 'aligned')
else:
output_telescope_configuration = os.path.splitext(telescope_configuration)[0] + '_aligned.yaml'
elif output_telescope_configuration == telescope_configuration:
raise ValueError('Output telescope configuration file must be different from input telescope configuration file.')
if os.path.isfile(output_telescope_configuration):
logging.info('Output telescope configuration file already exists. Keeping telescope configuration file.')
aligned_telescope = Telescope(configuration_file=output_telescope_configuration)
# For the case where not all DUTs are aligned,
# only revert the alignment for the DUTs that will be aligned.
for align_duts in select_duts:
for dut in align_duts:
aligned_telescope[dut] = telescope[dut]
aligned_telescope.save_configuration()
else:
telescope.save_configuration(configuration_file=output_telescope_configuration)
prealigned_track_candidates_file = os.path.splitext(input_merged_file)[0] + '_track_candidates_prealigned_tmp.h5'
# clean up remaining files
if os.path.isfile(prealigned_track_candidates_file):
os.remove(prealigned_track_candidates_file)
for index, align_duts in enumerate(select_duts):
# Find pre-aligned tracks for the 1st step of the alignment.
# This file can be used for different sets of alignment DUTs,
# so keep the file and remove later.
if not os.path.isfile(prealigned_track_candidates_file):
logging.info('= Alignment step 1: Finding pre-aligned tracks =')
find_tracks(
telescope_configuration=telescope_configuration,
input_merged_file=input_merged_file,
output_track_candidates_file=prealigned_track_candidates_file,
select_extrapolation_duts=select_extrapolation_duts,
align_to_beam=True,
max_events=None)
logging.info('== Aligning %d DUTs: %s ==', len(align_duts), ", ".join(telescope[dut_index].name for dut_index in align_duts))
_duts_alignment(
output_telescope_configuration=output_telescope_configuration, # aligned configuration
merged_file=input_merged_file,
prealigned_track_candidates_file=prealigned_track_candidates_file,
align_duts=align_duts,
alignment_parameters=alignment_parameters[index],
select_telescope_duts=select_telescope_duts,
select_extrapolation_duts=select_extrapolation_duts,
select_fit_duts=select_fit_duts[index],
select_hit_duts=select_hit_duts[index],
max_iterations=max_iterations[index],
max_events=max_events[index],
fit_method=fit_method,
beam_energy=beam_energy,
particle_mass=particle_mass,
scattering_planes=scattering_planes,
track_chi2=track_chi2[index],
cluster_shapes=cluster_shapes[index],
quality_distances=quality_distances,
isolation_distances=isolation_distances,
use_limits=use_limits,
plot=plot,
chunk_size=chunk_size)
if os.path.isfile(prealigned_track_candidates_file):
os.remove(prealigned_track_candidates_file)
return output_telescope_configuration
def align_kalman(telescope_configuration, input_merged_file, output_telescope_configuration=None, output_alignment_file=None, select_duts=None, alignment_parameters=None, select_telescope_duts=None, select_extrapolation_duts=None, select_fit_duts=None, select_hit_duts=None, max_events=None, beam_energy=None, particle_mass=None, scattering_planes=None, track_chi2=10.0, cluster_shapes=None, annealing_factor=10000, annealing_tracks=5000, max_tracks=10000, alignment_parameters_errors=None, use_limits=True, plot=True, chunk_size=1000):
''' This function does an alignment of the DUTs and sets translation and rotation values for all DUTs.
The reference DUT defines the global coordinate system position at 0, 0, 0 and should be well in the beam and not heavily rotated.
To solve the chicken-and-egg problem that a good dut alignment needs hits belonging to one track, but good track finding needs a good dut alignment this
function work only on already prealigned hits belonging to one track. Thus this function can be called only after track finding.
These steps are done
1. Take the found tracks and revert the pre-alignment
2. Take the track hits belonging to one track and fit tracks for all DUTs
3. Calculate the residuals for each DUT
4. Deduce rotations from the residuals and apply them to the hits
5. Deduce the translation of each plane
6. Store and apply the new alignment
repeat step 3 - 6 until the total residual does not decrease (RMS_total = sqrt(RMS_x_1^2 + RMS_y_1^2 + RMS_x_2^2 + RMS_y_2^2 + ...))
Parameters
----------
telescope_configuration : string
Filename of the telescope configuration file.
input_merged_file : string
Filename of the input merged file.
output_telescope_configuration : string
Filename of the output telescope configuration file.
select_duts : iterable or iterable of iterable
The combination of duts that are algined at once. One should always align the high resolution planes first.
E.g. for a telesope (first and last 3 planes) with 2 devices in the center (3, 4):
select_duts=[[0, 1, 2, 5, 6, 7], # align the telescope planes first
[4], # align first DUT
[3]] # align second DUT
alignment_parameters : list of lists of strings
The list of alignment parameters for each align_dut. Valid parameters:
- translation_x: horizontal axis
- translation_y: vertical axis
- translation_z: beam axis
- rotation_alpha: rotation around x-axis
- rotation_beta: rotation around y-axis
- rotation_gamma: rotation around z-axis (beam axis)
If None, all paramters will be selected.
select_telescope_duts : iterable
The given DUTs will be used to align the telescope along the z-axis.
Usually the coordinates of these DUTs are well specified.
At least 2 DUTs need to be specified. The z-position of the selected DUTs will not be changed by default.
select_extrapolation_duts : list
The given DUTs will be used for track extrapolation for improving track finding efficiency.
In some rare cases, removing DUTs with a coarse resolution might improve track finding efficiency.
If None, select all DUTs.
If list is empty or has a single entry, disable extrapolation (at least 2 DUTs are required for extrapolation to work).
select_fit_duts : iterable or iterable of iterable
Defines for each select_duts combination wich devices to use in the track fit.
E.g. To use only the telescope planes (first and last 3 planes) but not the 2 center devices
select_fit_duts=[0, 1, 2, 5, 6, 7]
select_hit_duts : iterable or iterable of iterable
Defines for each select_duts combination wich devices must have a hit to use the track for fitting. The hit
does not have to be used in the fit itself! This is useful for time reference planes.
E.g. To use telescope planes (first and last 3 planes) + time reference plane (3)
select_hit_duts = [0, 1, 2, 4, 5, 6, 7]
max_iterations : uint
Maximum number of iterations of calc residuals, apply rotation refit loop until constant result is expected.
Usually the procedure converges rather fast (< 5 iterations).
Non-telescope DUTs usually require 2 itearations.
max_events: uint
Radomly select max_events for alignment. If None, use all events, which might slow down the alignment.
fit_method : string
Available methods are 'kalman', which uses a Kalman Filter for track calculation, and 'fit', which uses a simple
straight line fit for track calculation.
beam_energy : float
Energy of the beam in MeV, e.g., 2500.0 MeV for ELSA beam. Only used for the Kalman Filter.
particle_mass : float
Mass of the particle in MeV, e.g., 0.511 MeV for electrons. Only used for the Kalman Filter.
scattering_planes : list or dict
Specifies additional scattering planes in case of DUTs which are not used or additional material in the way of the tracks.
The list must contain dictionaries containing the following keys:
material_budget: material budget of the scattering plane
translation_x/translation_y/translation_z: x/y/z position of the plane (in um)
rotation_alpha/rotation_beta/rotation_gamma: alpha/beta/gamma angle of scattering plane (in radians)
The material budget is defined as the thickness devided by the radiation length.
If scattering_planes is None, no scattering plane will be added.
track_chi2 : float or list
Setting the limit on the track chi^2. If None or 0.0, no cut will be applied.
A smaller value reduces the number of tracks for the alignment.
A large value increases the number of tracks but at the cost of alignment efficiency bacause of potentially bad tracks.
A good start value is 5.0 to 10.0 for high energy beams and 15.0 to 50.0 for low energy beams.
cluster_shapes : iterable or iterable of iterables
List of cluster shapes (unsigned integer) for each DUT. Only the selected cluster shapes will be used for the alignment.
Cluster shapes have impact on precision of the alignment. Larger clusters and certain cluster shapes can have a significant uncertainty for the hit position.
If None, use default cluster shapes [1, 3, 5, 13, 14, 7, 11, 15], i.e. 1x1, 2x1, 1x2, 3-pixel cluster, 4-pixel cluster. If empty list, all cluster sizes will be used.
The cluster shape can be calculated with the help of beam_telescope_analysis.tools.analysis_utils.calculate_cluster_array/calculate_cluster_shape.
quality_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the quality flag. The selected track and corresponding hit
must have a smaller distance to have the quality flag to be set to 1.
The purpose of quality_distances is to find good tracks for the alignment.
A good start value is 1-2x the pixel pitch for large pixels and high-energy beams and 5-10x the pixel pitch for small pixels and low-energy beams.
A too small value will remove good tracks, a too large value will allow bad tracks to contribute to the alignment.
If None, set distance to infinite.
isolation_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the isolated track/hit flag. Any other occurence of tracks or hits from the same event
within this distance will prevent the flag from beeing set.
The purpose of isolation_distances is to find good tracks for the alignment. Hits and tracks which are too close to each other should be removed.
The value given by isolation_distances should be larger than the quality_distances value to be effective,
A too small value will remove almost no tracks, a too large value will remove good tracks.
If None, set distance to 0.
isolation_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the quality flag. Any other occurence of tracks or hits from the same event
within this distance will reject the quality flag.
The purpose of isolation_distances is to remove tracks from alignment that could be potentially fake tracks (noisy detector / high beam density).
If None, use infinite distance.
use_limits : bool
If True, use column and row limits from pre-alignment for selecting the data.
plot : bool
If True, create additional output plots.
chunk_size : uint
Chunk size of the data when reading from file.
'''
telescope = Telescope(telescope_configuration)
n_duts = len(telescope)
logging.info('=== Alignment of %d DUTs ===' % len(set(np.unique(np.hstack(np.array(select_duts))).tolist())))
# Create list with combinations of DUTs to align
if select_duts is None: # If None: align all DUTs
select_duts = list(range(n_duts))
# Check for value errors
if not isinstance(select_duts, Iterable):
raise ValueError("Parameter select_duts is not an iterable.")
elif not select_duts: # empty iterable
raise ValueError("Parameter select_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), select_duts)):
select_duts = [select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_duts)):
raise ValueError("Not all items in parameter select_duts are iterable.")
# Finally check length of all iterables in iterable
for dut in select_duts:
if not dut: # check the length of the items
raise ValueError("Item in parameter select_duts has length 0.")
# Check if some DUTs will not be aligned
non_select_duts = set(range(n_duts)) - set(np.unique(np.hstack(np.array(select_duts))).tolist())
if non_select_duts:
logging.info('These DUTs will not be aligned: %s' % ", ".join(telescope[dut_index].name for dut_index in non_select_duts))
# Create list
if alignment_parameters is None:
alignment_parameters = [[None] * len(duts) for duts in select_duts]
# Check for value errors
if not isinstance(alignment_parameters, Iterable):
raise ValueError("Parameter alignment_parameters is not an iterable.")
elif not alignment_parameters: # empty iterable
raise ValueError("Parameter alignment_parameters has no items.")
# Finally check length of all arrays
if len(alignment_parameters) != len(select_duts): # empty iterable
raise ValueError("Parameter alignment_parameters has the wrong length.")
# for index, alignment_parameter in enumerate(alignment_parameters):
# if alignment_parameter is None:
# alignment_parameters[index] = [None] * len(select_duts[index])
# if len(alignment_parameters[index]) != len(select_duts[index]): # check the length of the items
# raise ValueError("Item in parameter alignment_parameter has the wrong length.")
# Create track, hit selection
if select_hit_duts is None: # If None: use all DUTs
select_hit_duts = []
# copy each item
for duts in select_duts:
select_hit_duts.append(duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(select_hit_duts, Iterable):
raise ValueError("Parameter select_hit_duts is not an iterable.")
elif not select_hit_duts: # empty iterable
raise ValueError("Parameter select_hit_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), select_hit_duts)):
select_hit_duts = [select_hit_duts[:] for _ in select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_hit_duts)):
raise ValueError("Not all items in parameter select_hit_duts are iterable.")
# Finally check length of all arrays
if len(select_hit_duts) != len(select_duts): # empty iterable
raise ValueError("Parameter select_hit_duts has the wrong length.")
for hit_dut in select_hit_duts:
if len(hit_dut) < 2: # check the length of the items
raise ValueError("Item in parameter select_hit_duts has length < 2.")
# Create track, hit selection
if select_fit_duts is None: # If None: use all DUTs
select_fit_duts = []
# copy each item from select_hit_duts
for hit_duts in select_hit_duts:
select_fit_duts.append(hit_duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(select_fit_duts, Iterable):
raise ValueError("Parameter select_fit_duts is not an iterable.")
elif not select_fit_duts: # empty iterable
raise ValueError("Parameter select_fit_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), select_fit_duts)):
select_fit_duts = [select_fit_duts[:] for _ in select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_fit_duts)):
raise ValueError("Not all items in parameter select_fit_duts are iterable.")
# Finally check length of all arrays
if len(select_fit_duts) != len(select_duts): # empty iterable
raise ValueError("Parameter select_fit_duts has the wrong length.")
for index, fit_dut in enumerate(select_fit_duts):
if len(fit_dut) < 2: # check the length of the items
raise ValueError("Item in parameter select_fit_duts has length < 2.")
if set(fit_dut) - set(select_hit_duts[index]): # fit DUTs are required to have a hit
raise ValueError("DUT in select_fit_duts is not in select_hit_duts.")
# Create cluster shape selection
if cluster_shapes is None: # If None: set default value for all DUTs
cluster_shapes = [cluster_shapes] * len(select_duts)
# Check iterable and length
if not isinstance(cluster_shapes, Iterable):
raise ValueError("Parameter cluster_shapes is not an iterable.")
# elif not cluster_shapes: # empty iterable
# raise ValueError("Parameter cluster_shapes has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable) and val is not None, cluster_shapes)):
cluster_shapes = [cluster_shapes[:] for _ in select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable) or val is None, cluster_shapes)):
raise ValueError("Not all items in parameter cluster_shapes are iterable or None.")
# Finally check length of all arrays
if len(cluster_shapes) != len(select_duts): # empty iterable
raise ValueError("Parameter cluster_shapes has the wrong length.")
# expand dimensions
# Check iterable and length for each item
for index, shapes in enumerate(cluster_shapes):
# Check if only non-iterable in iterable
if shapes is None:
cluster_shapes[index] = [shapes] * len(select_duts[index])
elif all(map(lambda val: not isinstance(val, Iterable) and val is not None, shapes)):
cluster_shapes[index] = [shapes[:] for _ in select_duts[index]]
# again check for consistency
for index, shapes in enumerate(cluster_shapes):
# Check iterable and length
if not isinstance(shapes, Iterable):
raise ValueError("Item in parameter cluster_shapes is not an iterable.")
elif not shapes: # empty iterable
raise ValueError("Item in parameter cluster_shapes has no items.")
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable) or val is None, shapes)):
raise ValueError("Not all items of item in cluster_shapes are iterable or None.")
if len(shapes) != len(select_duts[index]): # empty iterable
raise ValueError("Item in parameter cluster_shapes has the wrong length.")
if not isinstance(max_events, Iterable):
max_events = [max_events] * len(select_duts)
# Finally check length
if len(max_events) != len(select_duts):
raise ValueError("Parameter max_events has the wrong length.")
if not isinstance(max_tracks, Iterable):
max_tracks = [max_tracks] * len(select_duts)
# Finally check length
if len(max_tracks) != len(select_duts):
raise ValueError("Parameter max_tracks has the wrong length.")
if output_telescope_configuration is None:
if 'prealigned' in telescope_configuration:
output_telescope_configuration = telescope_configuration.replace('prealigned', 'aligned_kalman')
else:
output_telescope_configuration = os.path.splitext(telescope_configuration)[0] + '_aligned_kalman.yaml'
elif output_telescope_configuration == telescope_configuration:
raise ValueError('Output telescope configuration file must be different from input telescope configuration file.')
if os.path.isfile(output_telescope_configuration):
logging.info('Output telescope configuration file already exists. Keeping telescope configuration file.')
aligned_telescope = Telescope(configuration_file=output_telescope_configuration)
# For the case where not all DUTs are aligned,
# only revert the alignment for the DUTs that will be aligned.
for align_duts in select_duts:
for dut in align_duts:
aligned_telescope[dut] = telescope[dut]
aligned_telescope.save_configuration()
else:
telescope.save_configuration(configuration_file=output_telescope_configuration)
if output_alignment_file is None:
output_alignment_file = os.path.splitext(input_merged_file)[0] + '_KFA_alignment.h5'
else:
output_alignment_file = output_alignment_file
for index, align_duts in enumerate(select_duts):
# Find pre-aligned tracks for the 1st step of the alignment.
# This file can be used for different sets of alignment DUTs,
# so keep the file and remove later.
prealigned_track_candidates_file = os.path.splitext(input_merged_file)[0] + '_track_candidates_prealigned_%i_tmp.h5' % index
find_tracks(
telescope_configuration=telescope_configuration,
input_merged_file=input_merged_file,
output_track_candidates_file=prealigned_track_candidates_file,
select_extrapolation_duts=select_extrapolation_duts,
align_to_beam=True,
max_events=max_events[index])
logging.info('== Aligning %d DUTs: %s ==', len(align_duts), ", ".join(telescope[dut_index].name for dut_index in align_duts))
_duts_alignment_kalman(
telescope_configuration=output_telescope_configuration, # aligned configuration
output_alignment_file=output_alignment_file,
input_track_candidates_file=prealigned_track_candidates_file,
select_duts=align_duts,
alignment_parameters=alignment_parameters[index],
select_telescope_duts=select_telescope_duts,
select_fit_duts=select_fit_duts[index],
select_hit_duts=select_hit_duts[index],
beam_energy=beam_energy,
particle_mass=particle_mass,
scattering_planes=scattering_planes,
track_chi2=track_chi2[index],
annealing_factor=annealing_factor,
annealing_tracks=annealing_tracks,
max_tracks=max_tracks[index],
alignment_parameters_errors=alignment_parameters_errors,
use_limits=use_limits,
plot=plot,
chunk_size=chunk_size,
iteration_index=index)
return output_telescope_configuration
def _duts_alignment(output_telescope_configuration, merged_file, align_duts, prealigned_track_candidates_file, alignment_parameters, select_telescope_duts, select_extrapolation_duts, select_fit_duts, select_hit_duts, max_iterations, max_events, fit_method, beam_energy, particle_mass, scattering_planes, track_chi2, cluster_shapes, quality_distances, isolation_distances, use_limits, plot=True, chunk_size=100000): # Called for each list of DUTs to align
alignment_duts = "_".join(str(dut) for dut in align_duts)
aligned_telescope = Telescope(configuration_file=output_telescope_configuration)
output_track_candidates_file = None
iteration_steps = range(max_iterations)
for iteration_step in iteration_steps:
# aligning telescope DUTs to the beam axis (z-axis)
if set(align_duts) & set(select_telescope_duts):
align_telescope(
telescope_configuration=output_telescope_configuration,
select_telescope_duts=list(set(align_duts) & set(select_telescope_duts)))
actual_align_duts = align_duts
actual_fit_duts = select_fit_duts
# reqire hits in each DUT that will be aligned
actual_hit_duts = [list(set(select_hit_duts) | set([dut_index])) for dut_index in actual_align_duts]
actual_quality_duts = actual_hit_duts
fit_quality_distances = np.zeros_like(quality_distances)
for index, item in enumerate(quality_distances):
if index in align_duts:
fit_quality_distances[index, 0] = np.linspace(item[0] * 1.08447**max_iterations, item[0], max_iterations)[iteration_step]
fit_quality_distances[index, 1] = np.linspace(item[1] * 1.08447**max_iterations, item[1], max_iterations)[iteration_step]
else:
fit_quality_distances[index, 0] = item[0]
fit_quality_distances[index, 1] = item[1]
fit_quality_distances = fit_quality_distances.tolist()
if iteration_step > 0:
logging.info('= Alignment step 1 - iteration %d: Finding tracks for %d DUTs =', iteration_step, len(align_duts))
# remove temporary file
if output_track_candidates_file is not None:
os.remove(output_track_candidates_file)
output_track_candidates_file = os.path.splitext(merged_file)[0] + '_track_candidates_aligned_duts_%s_tmp_%d.h5' % (alignment_duts, iteration_step)
find_tracks(
telescope_configuration=output_telescope_configuration,
input_merged_file=merged_file,
output_track_candidates_file=output_track_candidates_file,
select_extrapolation_duts=select_extrapolation_duts,
align_to_beam=True,
max_events=max_events)
# The quality flag of the actual align DUT depends on the alignment calculated
# in the previous iteration, therefore this step has to be done every time
logging.info('= Alignment step 2 - iteration %d: Fitting tracks for %d DUTs =', iteration_step, len(align_duts))
output_tracks_file = os.path.splitext(merged_file)[0] + '_tracks_aligned_duts_%s_tmp_%d.h5' % (alignment_duts, iteration_step)
fit_tracks(
telescope_configuration=output_telescope_configuration,
input_track_candidates_file=prealigned_track_candidates_file if iteration_step == 0 else output_track_candidates_file,
output_tracks_file=output_tracks_file,
max_events=None if iteration_step > 0 else max_events,
select_duts=actual_align_duts,
select_fit_duts=actual_fit_duts,
select_hit_duts=actual_hit_duts,
exclude_dut_hit=False, # for biased residuals
select_align_duts=actual_align_duts, # correct residual offset for align DUTs
method=fit_method,
beam_energy=beam_energy,
particle_mass=particle_mass,
scattering_planes=scattering_planes,
quality_distances=quality_distances,
isolation_distances=isolation_distances,
use_limits=use_limits,
plot=plot,
chunk_size=chunk_size)
logging.info('= Alignment step 3a - iteration %d: Selecting tracks for %d DUTs =', iteration_step, len(align_duts))
output_selected_tracks_file = os.path.splitext(merged_file)[0] + '_tracks_aligned_selected_tracks_duts_%s_tmp_%d.h5' % (alignment_duts, iteration_step)
# generate query for select_tracks
# generate default selection of cluster shapes: 1x1, 2x1, 1x2, 3-pixel cluster, 4-pixel cluster
for index, shapes in enumerate(cluster_shapes):
if shapes is None:
cluster_shapes[index] = default_cluster_shapes
query_string = [((('(track_chi_red < %f)' % track_chi2[index]) if track_chi2[index] else '') + (' & ' if (track_chi2[index] and cluster_shapes[index]) else '') + (('(' + ' | '.join([('(cluster_shape_dut_{0} == %d)' % cluster_shape) for cluster_shape in cluster_shapes[index]]).format(dut_index) + ')') if cluster_shapes[index] else '')) for index, dut_index in enumerate(actual_align_duts)]
data_selection.select_tracks(
telescope_configuration=output_telescope_configuration,
input_tracks_file=output_tracks_file,
output_tracks_file=output_selected_tracks_file,
select_duts=actual_align_duts,
select_hit_duts=actual_hit_duts,
select_quality_duts=actual_quality_duts,
select_isolated_track_duts=actual_quality_duts,
select_isolated_hit_duts=actual_quality_duts,
query=query_string,
max_events=None,
chunk_size=chunk_size)
# if fit DUTs were aligned, update telescope alignment
if set(align_duts) & set(select_fit_duts):
logging.info('= Alignment step 3b - iteration %d: Aligning telescope =', iteration_step)
output_track_angles_file = os.path.splitext(merged_file)[0] + '_tracks_angles_aligned_selected_tracks_duts_%s_tmp_%d.h5' % (alignment_duts, iteration_step)
histogram_track_angle(
telescope_configuration=output_telescope_configuration,
input_tracks_file=output_selected_tracks_file,
output_track_angle_file=output_track_angles_file,
select_duts=actual_align_duts,
n_bins=100,
plot=plot)
# Read and store beam angle to improve track finding
if (set(align_duts) & set(select_fit_duts)):
with tb.open_file(output_track_angles_file, mode="r") as in_file_h5:
if not np.isnan(in_file_h5.root.Global_alpha_track_angle_hist.attrs.mean) and not np.isnan(in_file_h5.root.Global_beta_track_angle_hist.attrs.mean):
aligned_telescope = Telescope(configuration_file=output_telescope_configuration)
aligned_telescope.rotation_alpha = in_file_h5.root.Global_alpha_track_angle_hist.attrs.mean
aligned_telescope.rotation_beta = in_file_h5.root.Global_beta_track_angle_hist.attrs.mean
aligned_telescope.save_configuration()
else:
logging.warning("Cannot read track angle histograms, track finding might be spoiled")
os.remove(output_track_angles_file)
if plot:
logging.info('= Alignment step 3c - iteration %d: Calculating residuals =', iteration_step)
output_residuals_file = os.path.splitext(merged_file)[0] + '_residuals_aligned_selected_tracks_%s_tmp_%d.h5' % (alignment_duts, iteration_step)
calculate_residuals(
telescope_configuration=output_telescope_configuration,
input_tracks_file=output_selected_tracks_file,
output_residuals_file=output_residuals_file,
select_duts=actual_align_duts,
use_limits=use_limits,
plot=True,
chunk_size=chunk_size)
os.remove(output_residuals_file)
logging.info('= Alignment step 4 - iteration %d: Calculating transformation matrix for %d DUTs =', iteration_step, len(align_duts))
calculate_transformation(
telescope_configuration=output_telescope_configuration,
input_tracks_file=output_selected_tracks_file,
select_duts=actual_align_duts,
select_alignment_parameters=[(["translation_x", "translation_y", "rotation_alpha", "rotation_beta", "rotation_gamma"] if (dut_index in select_telescope_duts and (alignment_parameters is None or alignment_parameters[i] is None)) else (default_alignment_parameters if (alignment_parameters is None or alignment_parameters[i] is None) else alignment_parameters[i])) for i, dut_index in enumerate(actual_align_duts)],
use_limits=use_limits,
max_iterations=100,
chunk_size=chunk_size)
# Delete temporary files
os.remove(output_tracks_file)
os.remove(output_selected_tracks_file)
# Delete temporary files
if output_track_candidates_file is not None:
os.remove(output_track_candidates_file)
def _duts_alignment_kalman(telescope_configuration, output_alignment_file, input_track_candidates_file, alignment_parameters, select_telescope_duts, select_duts=None, select_hit_duts=None, select_fit_duts=None, min_track_hits=None, beam_energy=2500, particle_mass=0.511, scattering_planes=None, track_chi2=25.0, use_limits=True, iteration_index=0, exclude_dut_hit=False, annealing_factor=10000, annealing_tracks=5000, max_tracks=10000, alignment_parameters_errors=None, plot=True, chunk_size=1000):
'''Calculate tracks and set tracks quality flag for selected DUTs.
Two methods are available to generate tracks: a linear fit (method="fit") and a Kalman Filter (method="kalman").
Parameters
----------
telescope_configuration : string
Filename of the telescope configuration file.
input_track_candidates_file : string
Filename of the input track candidate file.
output_tracks_file : string
Filename of the output tracks file.
max_events : uint
Maximum number of randomly chosen events. If None, all events are taken.
select_duts : list
Specify the fit DUTs for which tracks will be fitted and a track array will be generated.
If None, for all DUTs are selected.
select_hit_duts : list or list of lists
Specifying DUTs that are required to have a hit for each selected DUT.
If None, no DUT is required to have a hit.
select_fit_duts : list or list of lists
Specifying DUTs that are used for the track fit for each selected DUT.
If None, all DUTs are used for the track fit.
Note: This parameter needs to be set correctly. Usually not all available DUTs should be used for track fitting.
The list usually only contains DUTs, which are part of the telescope.
min_track_hits : uint or list
Minimum number of track hits for each selected DUT.
If None or list item is None, the minimum number of track hits is the length of select_fit_duts.
exclude_dut_hit : bool or list
Decide whether or not to use hits in the actual fit DUT for track fitting (for unconstrained residuals).
If False (default), use all DUTs as specified in select_fit_duts and use them for track fitting if hits are available (potentially constrained residuals).
If True, do not use hits form the actual fit DUT for track fitting, even if specified in select_fit_duts (unconstrained residuals).
method : string
Available methods are 'kalman', which uses a Kalman Filter for track calculation, and 'fit', which uses a simple
straight line fit for track calculation.
beam_energy : float
Energy of the beam in MeV, e.g., 2500.0 MeV for ELSA beam. Only used for the Kalman Filter.
particle_mass : float
Mass of the particle in MeV, e.g., 0.511 MeV for electrons. Only used for the Kalman Filter.
scattering_planes : list of Dut objects
Specifies additional scattering planes in case of DUTs which are not used or additional material in the way of the tracks.
Scattering planes must contain the following attributes:
name: name of the scattering plane
material_budget: material budget of the scattering plane
translation_x/translation_y/translation_z: x/y/z position of the plane (in um)
rotation_alpha/rotation_beta/rotation_gamma: alpha/beta/gamma angle of scattering plane (in radians)
The material budget is defined as the thickness devided by the radiation length.
If scattering_planes is None, no scattering plane will be added. Only available when using the Kalman Filter.
See the example on how to create scattering planes in the example script folder.
quality_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the quality flag. The selected track and corresponding hit
must have a smaller distance to have the quality flag to be set to 1.
If None, set distance to infinite.
isolation_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the isolated track/hit flag. Any other occurence of tracks or hits from the same event
within this distance will prevent the flag from beeing set.
If None, set distance to 0.
use_limits : bool
If True, use column and row limits from pre-alignment for selecting the data.
keep_data : bool
Keep all track candidates in data and add track info only to fitted tracks. Necessary for purity calculations.
full_track_info : bool
If True, the track vector and position of all DUTs is appended to track table in order to get the full track information.
If False, only the track vector and position of the actual fit DUT is appended to track table.
chunk_size : uint
Chunk size of the data when reading from file.
Returns
-------
output_tracks_file : string
Filename of the output tracks file.
'''
def _store_alignment_data(alignment_values, n_tracks_processed, chi2s, chi2s_probs, deviation_cuts):
''' Helper function to write alignment data to output file.
'''
# Do not forget to save configuration to .yaml file.
telescope.save_configuration()
# Store alignment results in file
for dut_index, _ in enumerate(telescope):
try: # Check if table exists already, then append data
alignment_table = out_file_h5.get_node('/Alignment_DUT%i' % dut_index)
except tb.NoSuchNodeError: # Table does not exist, thus create new
alignment_table = out_file_h5.create_table(
where=out_file_h5.root,
name='Alignment_DUT%i' % dut_index,
description=alignment_values[dut_index].dtype,
title='Alignment_DUT%i' % dut_index,
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
alignment_table.append(alignment_values[dut_index])
alignment_table.attrs.deviation_cuts = deviation_cuts
alignment_table.attrs.n_tracks_processed = n_tracks_processed[dut_index]
alignment_table.flush()
# Store chi2 values
try: # Check if table exists already, then append data
out_chi2s = out_file_h5.get_node('/TrackChi2')
out_chi2s_probs = out_file_h5.get_node('/TrackpValue')
except tb.NoSuchNodeError: # Table does not exist, thus create new
out_chi2s = out_file_h5.create_earray(
where=out_file_h5.root,
name='TrackChi2',
title='Track Chi2',
atom=tb.Atom.from_dtype(chi2s.dtype),
shape=(0,),
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
out_chi2s_probs = out_file_h5.create_earray(
where=out_file_h5.root,
name='TrackpValue',
title='Track pValue',
atom=tb.Atom.from_dtype(chi2s.dtype),
shape=(0,),
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
out_chi2s.append(chi2s)
out_chi2s.flush()
out_chi2s_probs.append(chi2s_probs)
out_chi2s_probs.flush()
out_chi2s.attrs.max_track_chi2 = track_chi2
def _alignment_loop(actual_align_state, actual_align_cov, initial_rotation_matrix, initial_position_vector):
''' Helper function which loops over track chunks and performs the alignnment.
'''
# Init progressbar
n_tracks = in_file_h5.root.TrackCandidates.shape[0]
pbar = tqdm(total=n_tracks, ncols=80)
# Number of processed tracks for every DUT
n_tracks_processed = np.zeros(shape=(len(telescope)), dtype=np.int)
# Number of tracks fulfilling hit requirement
total_n_tracks_valid_hits = 0
# Maximum allowed relative change for each alignment parameter. Can be adjusted.
deviation_cuts = [0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
alpha = np.zeros(shape=len(telescope), dtype=np.float64) # annealing factor
# Loop in chunks over tracks. After each chunk, alignment values are stored.
for track_candidates_chunk, index_chunk in analysis_utils.data_aligned_at_events(in_file_h5.root.TrackCandidates, chunk_size=10000):
# Select only tracks for which hit requirement is fulfilled
track_candidates_chunk_valid_hits = track_candidates_chunk[track_candidates_chunk['hit_flag'] & dut_hit_mask == dut_hit_mask]
total_n_tracks_valid_hits_chunk = track_candidates_chunk_valid_hits.shape[0]
total_n_tracks_valid_hits += total_n_tracks_valid_hits_chunk
# Per chunk variables
chi2s = np.zeros(shape=(total_n_tracks_valid_hits_chunk), dtype=np.float64) # track chi2s
chi2s_probs = np.zeros(shape=(total_n_tracks_valid_hits_chunk), dtype=np.float64) # track pvalues
alignment_values = np.full(shape=(len(telescope), total_n_tracks_valid_hits_chunk), dtype=kfa_alignment_descr, fill_value=np.nan) # alignment values
# Loop over tracks in chunk
for track_index, track in enumerate(track_candidates_chunk_valid_hits):
track_hits = np.full((1, n_duts, 6), fill_value=np.nan, dtype=np.float64)
# Compute aligned position and apply the alignment
for dut_index, dut in enumerate(telescope):
# Get local track hits
track_hits[:, dut_index, 0] = track['x_dut_%s' % dut_index]
track_hits[:, dut_index, 1] = track['y_dut_%s' % dut_index]
track_hits[:, dut_index, 2] = track['z_dut_%s' % dut_index]
track_hits[:, dut_index, 3] = track['x_err_dut_%s' % dut_index]
track_hits[:, dut_index, 4] = track['y_err_dut_%s' % dut_index]
track_hits[:, dut_index, 5] = track['z_err_dut_%s' % dut_index]
# Calculate new alignment (takes initial alignment and actual *change* of parameters)
new_rotation_matrix, new_position_vector = _update_alignment(initial_rotation_matrix[dut_index], initial_position_vector[dut_index], actual_align_state[dut_index])
# Get euler angles from rotation matrix
alpha_average, beta_average, gamma_average = geometry_utils.euler_angles(R=new_rotation_matrix)
# Set new alignment to DUT
dut._translation_x = float(new_position_vector[0])
dut._translation_y = float(new_position_vector[1])
dut._translation_z = float(new_position_vector[2])
dut._rotation_alpha = float(alpha_average)
dut._rotation_beta = float(beta_average)
dut._rotation_gamma = float(gamma_average)
alignment_values[dut_index, track_index]['translation_x'] = dut.translation_x
alignment_values[dut_index, track_index]['translation_y'] = dut.translation_y
alignment_values[dut_index, track_index]['translation_z'] = dut.translation_z
alignment_values[dut_index, track_index]['rotation_alpha'] = dut.rotation_alpha
alignment_values[dut_index, track_index]['rotation_beta'] = dut.rotation_beta
alignment_values[dut_index, track_index]['rotation_gamma'] = dut.rotation_gamma
C = actual_align_cov[dut_index]
alignment_values[dut_index, track_index]['translation_x_err'] = np.sqrt(C[0, 0])
alignment_values[dut_index, track_index]['translation_y_err'] = np.sqrt(C[1, 1])
alignment_values[dut_index, track_index]['translation_z_err'] = np.sqrt(C[2, 2])
alignment_values[dut_index, track_index]['rotation_alpha_err'] = np.sqrt(C[3, 3])
alignment_values[dut_index, track_index]['rotation_beta_err'] = np.sqrt(C[4, 4])
alignment_values[dut_index, track_index]['rotation_gamma_err'] = np.sqrt(C[5, 5])
# Calculate deterministic annealing (scaling factor for covariance matrix) in order to take into account misalignment
alpha[dut_index] = _calculate_annealing(k=n_tracks_processed[dut_index], annealing_factor=annealing_factor, annealing_tracks=annealing_tracks)
# Store annealing factor
alignment_values[dut_index, track_index]['annealing_factor'] = alpha[dut_index]
# Run Kalman Filter
try:
offsets, slopes, chi2s_reg, chi2s_red, chi2s_prob, x_err, y_err, cov, cov_obs, obs_mat = _fit_tracks_kalman_loop(track_hits, telescope, fit_duts, beam_energy, particle_mass, scattering_planes, alpha)
except Exception as e:
print(e, 'TRACK FITTING')
continue
# Store chi2 and pvalue
chi2s[track_index] = chi2s_red
chi2s_probs[track_index] = chi2s_prob
# Data quality check I: Check chi2 of track
if chi2s_red > track_chi2:
continue
# Actual track states
p0 = np.column_stack((offsets[0, :, 0], offsets[0, :, 1],
slopes[0, :, 0], slopes[0, :, 1]))
# Covariance matrix (x, y, dx, dy) of track estimates
C0 = cov[0, :, :, :]
# Covariance matrix (x, y, dx, dy) of observations
V = cov_obs[0, :, :, :]
# Measurement matrix
H = obs_mat[0, :, :, :]
# Actual alignment parameters and its covariance
a0 = actual_align_state.copy()
E0 = actual_align_cov.copy()
# Updated alignment parameters and its covariance
E1 = np.zeros_like(E0)
a1 = np.zeros_like(a0)
# Update all alignables
actual_align_state, actual_align_cov, alignment_values, n_tracks_processed = _update_alignment_parameters(
telescope, H, V, C0, p0, a0, E0, track_hits, a1, E1,
alignment_values, deviation_cuts,
actual_align_state, actual_align_cov, n_tracks_processed, track_index)
# Reached number of max. specified tracks. Stop alignment
if n_tracks_processed.min() > max_tracks:
pbar.update(track_index)
pbar.write('Processed {0} tracks (per DUT) out of {1} tracks'.format(n_tracks_processed, total_n_tracks_valid_hits))
pbar.close()
logging.info('Maximum number of tracks reached! Stopping alignment...')
# Store alignment data
_store_alignment_data(alignment_values[:, :track_index + 1], n_tracks_processed, chi2s[:track_index + 1], chi2s_probs[:track_index + 1], deviation_cuts)
return
pbar.update(track_candidates_chunk.shape[0])
pbar.write('Processed {0} tracks (per DUT) out of {1} tracks'.format(n_tracks_processed, total_n_tracks_valid_hits))
# Store alignment data
_store_alignment_data(alignment_values, n_tracks_processed, chi2s, chi2s_probs, deviation_cuts)
pbar.close()
telescope = Telescope(telescope_configuration)
n_duts = len(telescope)
logging.info('= Alignment step 2 - Fitting tracks for %d DUTs =', len(select_duts))
if iteration_index == 0: # clean up before starting alignment. In case different sets of DUTs are aligned after each other only clean up once.
if os.path.exists(output_alignment_file):
os.remove(output_alignment_file)
logging.info('=== Fitting tracks of %d DUTs ===' % n_duts)
if not beam_energy:
raise ValueError('Beam energy not given (in MeV).')
if not particle_mass:
raise ValueError('Particle mass not given (in MeV).')
if select_duts is None:
select_duts = list(range(n_duts)) # standard setting: fit tracks for all DUTs
elif not isinstance(select_duts, Iterable):
select_duts = [select_duts]
# Check for duplicates
if len(select_duts) != len(set(select_duts)):
raise ValueError("Found douplicate in select_duts.")
# Check if any iterable in iterable
if any(map(lambda val: isinstance(val, Iterable), select_duts)):
raise ValueError("Item in parameter select_duts is iterable.")
# Create track, hit selection
if select_fit_duts is None: # If None: use all DUTs
select_fit_duts = list(range(n_duts))
# # copy each item
# for hit_duts in select_hit_duts:
# select_fit_duts.append(hit_duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(select_fit_duts, Iterable):
raise ValueError("Parameter select_fit_duts is not an iterable.")
elif not select_fit_duts: # empty iterable
raise ValueError("Parameter select_fit_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable) and val is not None, select_fit_duts)):
select_fit_duts = [select_fit_duts[:] for _ in select_duts]
# if None use all DUTs
for index, item in enumerate(select_fit_duts):
if item is None:
select_fit_duts[index] = list(range(n_duts))
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_fit_duts)):
raise ValueError("Not all items in parameter select_fit_duts are iterable.")
# Finally check length of all arrays
if len(select_fit_duts) != len(select_duts): # empty iterable
raise ValueError("Parameter select_fit_duts has the wrong length.")
for index, fit_dut in enumerate(select_fit_duts):
if len(fit_dut) < 2: # check the length of the items
raise ValueError("Item in parameter select_fit_duts has length < 2.")
# Create track, hit selection
if select_hit_duts is None: # If None, require no hit
# select_hit_duts = list(range(n_duts))
select_hit_duts = []
# Check iterable and length
if not isinstance(select_hit_duts, Iterable):
raise ValueError("Parameter select_hit_duts is not an iterable.")
# elif not select_hit_duts: # empty iterable
# raise ValueError("Parameter select_hit_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable) and val is not None, select_hit_duts)):
select_hit_duts = [select_hit_duts[:] for _ in select_duts]
# If None, require no hit
for index, item in enumerate(select_hit_duts):
if item is None:
select_hit_duts[index] = []
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_hit_duts)):
raise ValueError("Not all items in parameter select_hit_duts are iterable.")
# Finally check length of all arrays
if len(select_hit_duts) != len(select_duts): # empty iterable
raise ValueError("Parameter select_hit_duts has the wrong length.")
# Check iterable and length
if not isinstance(exclude_dut_hit, Iterable):
exclude_dut_hit = [exclude_dut_hit] * len(select_duts)
elif not exclude_dut_hit: # empty iterable
raise ValueError("Parameter exclude_dut_hit has no items.")
# Finally check length of all array
if len(exclude_dut_hit) != len(select_duts): # empty iterable
raise ValueError("Parameter exclude_dut_hit has the wrong length.")
# Check if only bools in iterable
if not all(map(lambda val: isinstance(val, (bool,)), exclude_dut_hit)):
raise ValueError("Not all items in parameter exclude_dut_hit are boolean.")
# Check iterable and length
if not isinstance(min_track_hits, Iterable):
min_track_hits = [min_track_hits] * len(select_duts)
# Finally check length of all arrays
if len(min_track_hits) != len(select_duts): # empty iterable
raise ValueError("Parameter min_track_hits has the wrong length.")
fitted_duts = []
with tb.open_file(input_track_candidates_file, mode='r') as in_file_h5:
with tb.open_file(output_alignment_file, mode='a') as out_file_h5:
for fit_dut_index, actual_fit_dut in enumerate(select_duts): # Loop over the DUTs where tracks shall be fitted for
# Test whether other DUTs have identical tracks
# if yes, save some CPU time and fit only once.
# This following list contains all DUT indices that will be fitted
# during this step of the loop.
if actual_fit_dut in fitted_duts:
continue
# calculate all DUTs with identical tracks to save processing time
actual_fit_duts = []
for curr_fit_dut_index, curr_fit_dut in enumerate(select_duts):
if (curr_fit_dut == actual_fit_dut or
(((exclude_dut_hit[curr_fit_dut_index] is False and exclude_dut_hit[fit_dut_index] is False and set(select_fit_duts[curr_fit_dut_index]) == set(select_fit_duts[fit_dut_index])) or
(exclude_dut_hit[curr_fit_dut_index] is False and exclude_dut_hit[fit_dut_index] is True and set(select_fit_duts[curr_fit_dut_index]) == (set(select_fit_duts[fit_dut_index]) - set([actual_fit_dut]))) or
(exclude_dut_hit[curr_fit_dut_index] is True and exclude_dut_hit[fit_dut_index] is False and (set(select_fit_duts[curr_fit_dut_index]) - set([curr_fit_dut])) == set(select_fit_duts[fit_dut_index])) or
(exclude_dut_hit[curr_fit_dut_index] is True and exclude_dut_hit[fit_dut_index] is True and (set(select_fit_duts[curr_fit_dut_index]) - set([curr_fit_dut])) == (set(select_fit_duts[fit_dut_index]) - set([actual_fit_dut])))) and
set(select_hit_duts[curr_fit_dut_index]) == set(select_hit_duts[fit_dut_index]) and
min_track_hits[curr_fit_dut_index] == min_track_hits[fit_dut_index])):
actual_fit_duts.append(curr_fit_dut)
# continue with fitting
logging.info('== Fit tracks for %s ==', ', '.join([telescope[curr_dut].name for curr_dut in actual_fit_duts]))
# select hit DUTs based on input parameters
# hit DUTs are always enforced
hit_duts = select_hit_duts[fit_dut_index]
dut_hit_mask = 0 # DUTs required to have hits
for dut_index in hit_duts:
dut_hit_mask |= ((1 << dut_index))
logging.info('Require hits in %d DUTs for track selection: %s', len(hit_duts), ', '.join([telescope[curr_dut].name for curr_dut in hit_duts]))
# select fit DUTs based on input parameters
# exclude actual DUTs from fit DUTs if exclude_dut_hit parameter is set (for, e.g., unbiased residuals)
fit_duts = list(set(select_fit_duts[fit_dut_index]) - set([actual_fit_dut])) if exclude_dut_hit[fit_dut_index] else select_fit_duts[fit_dut_index]
if min_track_hits[fit_dut_index] is None:
actual_min_track_hits = len(fit_duts)
else:
actual_min_track_hits = min_track_hits[fit_dut_index]
if actual_min_track_hits < 2:
raise ValueError('The number of required hits is smaller than 2. Cannot fit tracks for %s.', telescope[actual_fit_dut].name)
dut_fit_mask = 0 # DUTs to be used for the fit
for dut_index in fit_duts:
dut_fit_mask |= ((1 << dut_index))
if actual_min_track_hits > len(fit_duts):
raise RuntimeError("min_track_hits for DUT%d is larger than the number of fit DUTs" % (actual_fit_dut,))
logging.info('Require at least %d hits in %d DUTs for track selection: %s', actual_min_track_hits, len(fit_duts), ', '.join([telescope[curr_dut].name for curr_dut in fit_duts]))
if scattering_planes is not None:
logging.info('Adding the following scattering planes: %s', ', '.join([scp.name for scp in scattering_planes]))
# Actual *change* of alignment parameters and covariance
actual_align_state = np.zeros(shape=(len(telescope), 6), dtype=np.float64) # No change at beginning
actual_align_cov = np.zeros(shape=(len(telescope), 6, 6), dtype=np.float64)
# Calculate initial alignment
initial_rotation_matrix, initial_position_vector, actual_align_cov = _calculate_initial_alignment(telescope, select_duts, select_telescope_duts, alignment_parameters, actual_align_cov, alignment_parameters_errors)
# Loop over tracks in chunks and perform alignment.
_alignment_loop(actual_align_state, actual_align_cov, initial_rotation_matrix, initial_position_vector)
fitted_duts.extend(actual_fit_duts)
output_pdf_file = output_alignment_file[:-3] + '.pdf'
# Plot alignment result
plot_utils.plot_kf_alignment(output_alignment_file, telescope, output_pdf_file)
# Delete tmp track candidates file
os.remove(input_track_candidates_file)
def align_telescope(telescope_configuration, select_telescope_duts, reference_dut=None):
telescope = Telescope(telescope_configuration)
logging.info('= Beam-alignment of the telescope =')
logging.info('Use %d DUTs for beam-alignment: %s', len(select_telescope_duts), ', '.join([telescope[index].name for index in select_telescope_duts]))
telescope_duts_positions = np.full((len(select_telescope_duts), 3), fill_value=np.nan, dtype=np.float64)
for index, dut_index in enumerate(select_telescope_duts):
telescope_duts_positions[index, 0] = telescope[dut_index].translation_x
telescope_duts_positions[index, 1] = telescope[dut_index].translation_y
telescope_duts_positions[index, 2] = telescope[dut_index].translation_z
# the x and y translation for the reference DUT will be set to 0
if reference_dut is not None:
first_telescope_dut_index = reference_dut
else:
# calculate reference DUT, use DUT with the smallest z position
first_telescope_dut_index = select_telescope_duts[np.argmin(telescope_duts_positions[:, 2])]
offset, slope = line_fit_3d(positions=telescope_duts_positions)
first_telescope_dut = telescope[first_telescope_dut_index]
logging.info('Reference DUT for beam-alignment: %s', first_telescope_dut.name)
first_dut_translation_x = first_telescope_dut.translation_x
first_dut_translation_y = first_telescope_dut.translation_y
first_telescope_dut_intersection = geometry_utils.get_line_intersections_with_dut(
line_origins=offset[np.newaxis, :],
line_directions=slope[np.newaxis, :],
translation_x=first_telescope_dut.translation_x,
translation_y=first_telescope_dut.translation_y,
translation_z=first_telescope_dut.translation_z,
rotation_alpha=first_telescope_dut.rotation_alpha,
rotation_beta=first_telescope_dut.rotation_beta,
rotation_gamma=first_telescope_dut.rotation_gamma)
for actual_dut in telescope:
dut_intersection = geometry_utils.get_line_intersections_with_dut(
line_origins=offset[np.newaxis, :],
line_directions=slope[np.newaxis, :],
translation_x=actual_dut.translation_x,
translation_y=actual_dut.translation_y,
translation_z=actual_dut.translation_z,
rotation_alpha=actual_dut.rotation_alpha,
rotation_beta=actual_dut.rotation_beta,
rotation_gamma=actual_dut.rotation_gamma)
actual_dut.translation_x -= (dut_intersection[0, 0] - first_telescope_dut_intersection[0, 0] + first_dut_translation_x)
actual_dut.translation_y -= (dut_intersection[0, 1] - first_telescope_dut_intersection[0, 1] + first_dut_translation_y)
# set telescope alpha/beta rotation for a better beam alignment and track finding improvement
# this is compensating the previously made changes to the DUT coordinates
total_angles, alpha_angles, beta_angles = get_angles(
slopes=slope[np.newaxis, :],
xz_plane_normal=np.array([0.0, 1.0, 0.0]),
yz_plane_normal=np.array([1.0, 0.0, 0.0]),
dut_plane_normal=np.array([0.0, 0.0, 1.0]))
telescope.rotation_alpha -= alpha_angles[0]
telescope.rotation_beta -= beta_angles[0]
telescope.save_configuration()
def calculate_transformation(telescope_configuration, input_tracks_file, select_duts, select_alignment_parameters=None, use_limits=True, max_iterations=None, chunk_size=1000000):
'''Takes the tracks and calculates and stores the transformation parameters.
Parameters
----------
telescope_configuration : string
Filename of the telescope configuration file.
input_tracks_file : string
Filename of the input tracks file.
select_duts : list
Selecting DUTs that will be processed.
select_alignment_parameters : list
Selecting the transformation parameters that will be stored to the telescope configuration file for each selected DUT.
If None, all 6 transformation parameters will be calculated.
use_limits : bool
If True, use column and row limits from pre-alignment for selecting the data.
chunk_size : int
Chunk size of the data when reading from file.
'''
telescope = Telescope(telescope_configuration)
logging.info('== Calculating transformation for %d DUTs ==' % len(select_duts))
if select_alignment_parameters is None:
select_alignment_parameters = [default_alignment_parameters] * len(select_duts)
if len(select_duts) != len(select_alignment_parameters):
raise ValueError("Parameter select_alignment_parameters has the wrong length.")
for index, actual_alignment_parameters in enumerate(select_alignment_parameters):
if actual_alignment_parameters is None:
select_alignment_parameters[index] = default_alignment_parameters
else:
non_valid_paramters = set(actual_alignment_parameters) - set(default_alignment_parameters)
if non_valid_paramters:
raise ValueError("Found invalid values in parameter select_alignment_parameters: %s." % ", ".join(non_valid_paramters))
with tb.open_file(input_tracks_file, mode='r') as in_file_h5:
for index, actual_dut_index in enumerate(select_duts):
actual_dut = telescope[actual_dut_index]
node = in_file_h5.get_node(in_file_h5.root, 'Tracks_DUT%d' % actual_dut_index)
logging.info('= Calculate transformation for %s =', actual_dut.name)
logging.info("Modify alignment parameters: %s", ', '.join([alignment_paramter for alignment_paramter in select_alignment_parameters[index]]))
if use_limits:
limit_x_local = actual_dut.x_limit # (lower limit, upper limit)
limit_y_local = actual_dut.y_limit # (lower limit, upper limit)
else:
limit_x_local = None
limit_y_local = None
rotation_average = None
translation_average = None
# euler_angles_average = None
# calculate equal chunk size
start_val = max(int(node.nrows / chunk_size), 2)
while True:
chunk_indices = np.linspace(0, node.nrows, start_val).astype(np.int)
if np.all(np.diff(chunk_indices) <= chunk_size):
break
start_val += 1
chunk_index = 0
n_tracks = 0
total_n_tracks = 0
while chunk_indices[chunk_index] < node.nrows:
tracks_chunk = node.read(start=chunk_indices[chunk_index], stop=chunk_indices[chunk_index + 1])
# select good hits and tracks
selection = np.logical_and(~np.isnan(tracks_chunk['x_dut_%d' % actual_dut_index]), ~np.isnan(tracks_chunk['track_chi2']))
tracks_chunk = tracks_chunk[selection] # Take only tracks where actual dut has a hit, otherwise residual wrong
# Coordinates in global coordinate system (x, y, z)
hit_x_local, hit_y_local, hit_z_local = tracks_chunk['x_dut_%d' % actual_dut_index], tracks_chunk['y_dut_%d' % actual_dut_index], tracks_chunk['z_dut_%d' % actual_dut_index]
offsets = np.column_stack(actual_dut.local_to_global_position(
x=tracks_chunk['offset_x'],
y=tracks_chunk['offset_y'],
z=tracks_chunk['offset_z']))
slopes = np.column_stack(actual_dut.local_to_global_position(
x=tracks_chunk['slope_x'],
y=tracks_chunk['slope_y'],
z=tracks_chunk['slope_z'],
translation_x=0.0,
translation_y=0.0,
translation_z=0.0,
rotation_alpha=actual_dut.rotation_alpha,
rotation_beta=actual_dut.rotation_beta,
rotation_gamma=actual_dut.rotation_gamma))
if not np.allclose(hit_z_local, 0.0):
raise RuntimeError("Transformation into local coordinate system gives z != 0")
limit_xy_local_sel = np.ones_like(hit_x_local, dtype=np.bool)
if limit_x_local is not None and np.isfinite(limit_x_local[0]):
limit_xy_local_sel &= hit_x_local >= limit_x_local[0]
if limit_x_local is not None and np.isfinite(limit_x_local[1]):
limit_xy_local_sel &= hit_x_local <= limit_x_local[1]
if limit_y_local is not None and np.isfinite(limit_y_local[0]):
limit_xy_local_sel &= hit_y_local >= limit_y_local[0]
if limit_y_local is not None and np.isfinite(limit_y_local[1]):
limit_xy_local_sel &= hit_y_local <= limit_y_local[1]
hit_x_local = hit_x_local[limit_xy_local_sel]
hit_y_local = hit_y_local[limit_xy_local_sel]
hit_z_local = hit_z_local[limit_xy_local_sel]
hit_local = np.column_stack([hit_x_local, hit_y_local, hit_z_local])
slopes = slopes[limit_xy_local_sel]
offsets = offsets[limit_xy_local_sel]
n_tracks = np.count_nonzero(limit_xy_local_sel)
x_dut_start = actual_dut.translation_x
y_dut_start = actual_dut.translation_y
z_dut_start = actual_dut.translation_z
alpha_dut_start = actual_dut.rotation_alpha
beta_dut_start = actual_dut.rotation_beta
gamma_dut_start = actual_dut.rotation_gamma
delta_t = 0.9 # TODO: optimize
if max_iterations is None:
iterations = 100
else:
iterations = max_iterations
lin_alpha = 1.0
initialize_angles = True
translation_old = None
rotation_old = None
for i in range(iterations):
if initialize_angles:
alpha, beta, gamma = alpha_dut_start, beta_dut_start, gamma_dut_start
rotation = geometry_utils.rotation_matrix(
alpha=alpha,
beta=beta,
gamma=gamma)
translation = | np.array([x_dut_start, y_dut_start, z_dut_start], dtype=np.float64) | numpy.array |
# imports
import numpy as np
import tensorflow as tf
from numpy import random
import math
import time
import matplotlib.pyplot as plt
"""Part 1 - Forward Propagation"""
def initialize_parameters(layer_dims):
"""
Description: This function initializes weights and biases
:param layer_dims: an array of the dimensions of each layer in the /
network (layer 0 is the size of the flattened input, layer L is the output softmax)
:return: a dictionary containing the initialized W and b parameters of each layer (W1…WL, b1…bL).
"""
parameters = {}
for l in range(1, len(layer_dims)):
parameters[f'W{l}'] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * np.sqrt(2 / layer_dims[l - 1])
parameters[f'b{l}'] = np.zeros(shape=(layer_dims[l], 1))
return parameters
def linear_forward(A, W, b):
"""
Description: Implement the linear part of a layer's forward propagation.
:param A: the activations of the previous layer
:param W: the weight matrix of the current layer (of shape [size of current layer, size of previous layer])
:param b: the bias vector of the current layer (of shape [size of current layer, 1])
:return: Z: the linear component of the activation function (i.e., the value before applying the non-linear function)
:return: linear_cache: a dictionary containing A, W, b (stored for making the backpropagation easier to compute)
"""
Z = np.dot(W, A) + b
linear_cache = dict({'A': A, 'W': W, 'b': b})
return Z, linear_cache
def softmax(Z):
"""
Description: Implementation of softmax function
:param Z: the linear component of the activation function
:return: A: the activations of the layer
:return: activation_cache: returns Z, which will be useful for the backpropagation
"""
numerator = np.exp(Z)
denominator = np.sum(numerator, axis=0, keepdims=True)
A = numerator / denominator
activation_cache = Z
return A, activation_cache
def relu(Z):
"""
Description: Implementation of relu function
:param Z: the linear component of the activation function
:return: A: the activations of the layer
:return: activation_cache: returns Z, which will be useful for the backpropagation
"""
A = np.maximum(0, Z)
activation_cache = Z
return A, activation_cache
def linear_activation_forward(A_prev, W, B, activation):
"""
Description: Implement the forward propagation for the LINEAR->ACTIVATION layer
:param A_prev: activations of the previous layer
:param W: the weights matrix of the current layer
:param B: the bias vector of the current layer
:param activation: the activation function to be used (a string, either “softmax” or “relu”)
:return: A: the activations of the current layer
:return: cache: a joint dictionary containing both linear_cache and activation_cache
"""
if activation in ['relu', 'softmax']:
Z, linear_cache = linear_forward(A_prev, W, B)
A, activation_cache = globals()[activation](Z)
cache = dict({'linear_cache': linear_cache, 'activation_cache': activation_cache})
return A, cache
else:
raise NotImplementedError(
"The given actiavtion function was not implemented. please choose one between {relu} and {softmax}")
def l_model_forward(X, parameters, use_batchnorm):
"""
Description: Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SOFTMAX computation
:param X: the data, numpy array of shape (input size, number of examples)
:param parameters: the initialized W and b parameters of each layer
:param use_batchnorm: a boolean flag used to determine whether to apply batchnorm after the activation/
:return:AL: the last post-activation value
:return: caches: a list of all the cache objects generated by the linear_forward function
"""
caches = list()
A_prev = X
num_layers = int(len(parameters.keys()) / 2)
for l in range(1, num_layers):
W = parameters[f'W{l}']
B = parameters[f'b{l}']
A_prev, cache = linear_activation_forward(A_prev, W, B, 'relu')
if use_batchnorm:
A_prev = apply_batchnorm(A_prev)
caches.append(cache)
W = parameters[f'W{num_layers}']
B = parameters[f'b{num_layers}']
AL, cache = linear_activation_forward(A_prev, W, B, 'softmax')
caches.append(cache)
return AL, caches
def compute_cost(AL, Y):
"""
Description: Implement the cost function defined by equation. The requested cost function is categorical cross-entropy loss.
:param AL: probability vector corresponding to your label predictions, shape (num_of_classes, number of examples)
:param Y: the labels vector (i.e. the ground truth)
:return: cost: the cross-entropy cost
"""
inner_sum_classes = np.sum(Y * np.log(AL), axis=0, keepdims=True)
outer_sum_samples = | np.sum(inner_sum_classes, axis=1) | numpy.sum |
import numpy as np
import numpy.linalg as la
import torch
import torch.nn.functional as F
import torchvision
import json
import time
from matplotlib import pyplot as plt
#from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
from lietorch import SE3, LieGroupParameter
from scipy.spatial.transform import Rotation as R
import cv2
from nerf import (get_ray_bundle, run_one_iter_of_nerf)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def mahalanobis(u, v, cov):
delta = u - v
m = torch.dot(delta, torch.matmul(torch.inverse(cov), delta))
return m
rot_x = lambda phi: torch.tensor([
[1., 0., 0.],
[0., torch.cos(phi), -torch.sin(phi)],
[0., torch.sin(phi), torch.cos(phi)]], dtype=torch.float32)
rot_x_np = lambda phi: np.array([
[1., 0., 0.],
[0., np.cos(phi), -np.sin(phi)],
[0., np.sin(phi), | np.cos(phi) | numpy.cos |
import numpy as np
from scipy.misc import imresize
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
X_L = 10
L = 14
N_BATCH = 50
OBS_SIZE = 20
KEEP = 0.6
# ---------------------------- helpers
def black_white(img):
new_img = np.copy(img)
img_flat = img.flatten()
nonzeros = img_flat[np.nonzero(img_flat)]
sortedd = np.sort(nonzeros)
idxx = round(len(sortedd) * (1.0 - KEEP))
thold = sortedd[idxx]
mask_pos = img >= thold
mask_neg = img < thold
new_img[mask_pos] = 1.0
new_img[mask_neg] = 0.0
return new_img
def vectorize(coords):
retX, retY = np.zeros([L]), np.zeros([L])
retX[coords[0]] = 1.0
retY[coords[1]] = 1.0
return retX, retY
# show dimension of a data object (list of list or a tensor)
def show_dim(lst1):
if hasattr(lst1, '__len__') and len(lst1) > 0:
return [len(lst1), show_dim(lst1[0])]
else:
try:
return lst1.get_shape()
except:
try:
return lst1.shape
except:
return type(lst1)
# -------------------------------------- making the datas
# assume X is already a 2D matrix
def mk_query(X):
def query(O):
Ox, Oy = O
if X[Ox][Oy] == 1.0:
return [1.0, 0.0]
else:
return [0.0, 1.0]
return query
def sample_coord():
Ox, Oy = np.random.multivariate_normal([L/2,L/2], [[L*0.7, 0.0], [0.0, L*0.7]])
Ox, Oy = round(Ox), round(Oy)
if 0 <= Ox < L:
if 0 <= Oy < L:
return Ox, Oy
return sample_coord()
def sample_coord_bias(qq):
def find_positive(qq):
C = sample_coord()
if qq(C) == [1.0, 0.0]:
return C
else:
return find_positive(qq)
def find_negative(qq):
C = sample_coord()
if qq(C) == [0.0, 1.0]:
return C
else:
return find_negative(qq)
toss = np.random.random() < 0.5
if toss:
return find_positive(qq)
else:
return find_negative(qq)
def gen_O(X):
query = mk_query(X)
Ox, Oy = sample_coord_bias(query)
O = (Ox, Oy)
return O, query(O)
def get_img_class(test=False):
img, _x = mnist.train.next_batch(1)
if test:
img, _x = mnist.test.next_batch(1)
img = np.reshape(img[0], [2*L,2*L])
# rescale the image to 14 x 14
img = imresize(img, (14,14), interp='nearest') / 255.0
img = imresize(img, (14,14)) / 255.0
img = black_white(img)
return img, _x
def gen_data():
x = []
obs_x = [[] for i in range(OBS_SIZE)]
obs_y = [[] for i in range(OBS_SIZE)]
obs_tfs = [[] for i in range(OBS_SIZE)]
new_ob_x = []
new_ob_y = []
new_ob_tf = []
imgs = []
for bb in range(N_BATCH):
# generate a hidden variable X
# get a single thing out
img, _x = get_img_class()
imgs.append(img)
# add to x
x.append(_x[0])
# generate new observation
_new_ob_coord, _new_ob_lab = gen_O(img)
_new_ob_x, _new_ob_y = vectorize(_new_ob_coord)
new_ob_x.append(_new_ob_x)
new_ob_y.append(_new_ob_y)
new_ob_tf.append(_new_ob_lab)
# generate observations for this hidden variable x
for ob_idx in range(OBS_SIZE):
_ob_coord, _ob_lab = gen_O(img)
_ob_x, _ob_y = vectorize(_ob_coord)
obs_x[ob_idx].append(_ob_x)
obs_y[ob_idx].append(_ob_y)
obs_tfs[ob_idx].append(_ob_lab)
return np.array(x, np.float32),\
np.array(obs_x, np.float32),\
np.array(obs_y, np.float32),\
| np.array(obs_tfs, np.float32) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 21 12:04:45 2020
import statement for ease of access
@author: steve
"""
import numpy as np
from kep_util_globvar import M1,M2,H,J
from kep_derivative_functions import nabla_q,nabla_H,nabla_l
from kep_derivative_functions import partial_Hpp,partial_Hqq,hessian_H,hessian_l
from kep_derivative_functions import gradient,modified_gradient_en_ang_attractor,modified_gradient_energy_attractor
# %% BASIC NUMERICAL FLOW FUNCTIONS
# advances solution forward one timestep using the explicit euler method
def exp_euler(y):
# calculate the gradient
grady = gradient(y)
y_next = y + H*grady
return y_next
# the gradient is modified so that the energy manifold is an attractor
def exp_euler_modified_energy_attractor(y):
grady_mod = modified_gradient_energy_attractor(y)
y_next = y + H*grady_mod
return y_next
# the gradient is modified so that the energy and angular momentum manifolds are attractors
def exp_euler_modified_energy_ang_attractor(y):
grady_mod = modified_gradient_en_ang_attractor(y)
y_next = y + H*grady_mod
return y_next
# advances solution forward one timestep using Stromer-Verlet scheme (p8)
def stromer_verlet(y):
grady = gradient(y)
# STEP 1 : \dot q_{n+1/2} = \dot q_n + h/2 * \dot p_n/m
p1plushalf = y[0] + 0.5*grady[0]*H
p2plushalf = y[1] + 0.5*grady[1]*H
# STEP2 : q_{n+1} = q_n + h\dot q_{n+1/2}
q1_next = y[2] + H*p1plushalf/M1
q2_next = y[3] + H*p2plushalf/M2
# STEP 3 : p_{n+1} = p_{n+1/2} - h/2 \frac{\partial H(p,q)}{\partial q}
nablaq_next = nabla_q([q1_next,q2_next])
p1_next = p1plushalf + 0.5*H* nablaq_next[0]
p2_next = p2plushalf + 0.5*H* nablaq_next[1]
y_next = np.array([p1_next,p2_next,q1_next,q2_next])
return y_next
# advances solution one timestep using explicit trapezium rule (runge kutta) p28
def exp_trapezium(y):
k1 = gradient(y)
k2 = gradient(y + H*k1)
y_next = y + 0.5*H*(k1+k2)
return y_next
# advances solution one timestep using explicit midpoint rule (runge kutta) p28
def exp_midpoint(y):
k1 = gradient(y)
k2 = gradient(y + H*k1*0.5)
y_next = y + H*k2
return y_next
# with u=(p1,p2) and v=(q1,q2) we use syplectic euler method, easy because
# u' = a(v) and v' = b(u) , single variable dependence
def syplectic_euler(y):
grady = gradient(y)
u,v,u_prime = y[:2],y[2:],grady[:2]
# first advance u_n to u_n+1 by explicit euler
u_next = u + H*u_prime
# then advance v_n to v_n+1 by implicit euler
grady_2 = gradient(np.array([u_next[0],u_next[1],v[0],v[1]]))
v_next_prime = grady_2[2:]
v_next = v + H*v_next_prime
y_next = np.concatenate([u_next,v_next])
return y_next
def fourth_order_kutta(y):
# equation 1.8 left p30 - numerical methods book
k1 = gradient(y)
k2 = gradient(y + 0.5*H*k1)
k3 = gradient(y + 0.5*H*k2)
k4 = gradient(y + H*k3)
y_next = y + H/6*(k1 + 2*k2 + 2*k3 + k4)
return y_next
# %% UTILITY FUNCTIONS FOR MODIFIED EQ INTEGRATORS
# returns the scalar lambda_2 as defined in subsection 'Modified Equations for Numerical Flow of projection methods'
# in section 'Backward Error Analysis' (5) of the report
def lambda2(y,J,hessian_beta,nabla_beta,nabla_hamil,d2):
hess_b = hessian_beta(y)
nab_b = nabla_beta(y).flatten()
nab_H = nabla_hamil(y).flatten()
jinv_nab_H = -J @ nab_H
first_term = 0.5 * np.dot(hess_b @ jinv_nab_H , jinv_nab_H)
second_term = np.dot(nab_b , d2(y))
return (first_term + second_term) / np.dot(nab_b,nab_b)
def lambda3(y,J,threetensor_beta,hessian_beta,nabla_hamil,nabla_beta,d3,d2):
return
# returns term in the equation that comes from energy projection
def get_o2_projection_term_energy(y,d2):
lam2 = lambda2(y,J,hessian_beta = hessian_H,
nabla_beta = nabla_H, nabla_hamil = nabla_H,
d2 = d2)
return -nabla_H(y).flatten() * lam2
# returns term in the equation that comes from angular momentum projection
def get_o2_projection_term_ang_mom(y,d2):
lam2 = lambda2(y,J,hessian_beta = hessian_l,
nabla_beta = nabla_l, nabla_hamil = nabla_H,
d2 = d2)
return -nabla_l(y).flatten() * lam2
# %% NUMERICAL FLOW FUNCTIONS
# helper function computes second term of numerical flow
def d2_exp_euler(y):
# this one is trivial
return np.array((0,0,0,0,0,0,0,0))
def d3_exp_euler(y):
return np.array((0,0,0,0,0,0,0,0))
# NOT YET WRITTEN
def d2_stromer_verlet(y):
nab_H = nabla_H(y).flatten() #p11,p12,p21,p22,q11,q12,q21,q22
ham_p,ham_q = nab_H[:4],nab_H[4:]
ham_pp,ham_qq = partial_Hpp(y),partial_Hqq(y)
return - 0.5 * np.concatenate([ham_qq @ ham_p , ham_pp @ ham_q])
def d3_stromer_verlet(y):
return
# %% MODIFIED GRAIENT FUNCTIONS
def mod_flow_o2_no_proj(y,d2,h):
f = gradient(y).flatten()
nab_H = nabla_H(y).flatten() #p11,p12,p21,p22,q11,q12,q21,q22
ham_p,ham_q = nab_H[:4],nab_H[4:]
ham_pp,ham_qq = partial_Hpp(y),partial_Hqq(y)
# construct the second order term
f2 = 0.5 * np.concatenate([ham_qq @ ham_p , ham_pp @ ham_q]) + d2(y).flatten()
return f + h*f2 # (warning, no reformatting here, still flat)
def mod_flow_o2_no_proj_unflattened(y,d2,h):
f = gradient(y).flatten()
nab_H = nabla_H(y).flatten() #p11,p12,p21,p22,q11,q12,q21,q22
ham_p,ham_q = nab_H[:4],nab_H[4:]
ham_pp,ham_qq = partial_Hpp(y),partial_Hqq(y)
# construct the second order term
f2 = 0.5 * np.concatenate([ham_qq @ ham_p , ham_pp @ ham_q]) + d2(y).flatten()
m2 = f + h*f2
m2 = np.array((m2[0:2],m2[2:4],m2[4:6],m2[6:]))
return m2
def mod_flow_o2_energy_proj(y,d2,h):
m1 = mod_flow_o2_no_proj(y,d2,h)
proj_term = get_o2_projection_term_energy(y,d2) # add the energy projection term
m2 = m1 + h*proj_term
m2 = np.array((m2[0:2],m2[2:4],m2[4:6],m2[6:]))
return m2
def mod_flow_o2_ang_proj(y,d2,h):
m1 = mod_flow_o2_no_proj(y,d2,h)
proj_term = get_o2_projection_term_ang_mom(y,d2)
m2 = m1 + h*proj_term
m2 = np.array((m2[0:2],m2[2:4],m2[4:6],m2[6:]))
return m2
def mod_flow_o2_energy_and_ang_proj(y,d2,h):
m1 = mod_flow_o2_no_proj(y,d2,h)
proj_term_energy = get_o2_projection_term_energy(y,d2)
proj_term_ang_mom = get_o2_projection_term_ang_mom(y,d2)
m2 = m1 + h*(proj_term_energy + proj_term_ang_mom)
m2 = | np.array((m2[0:2],m2[2:4],m2[4:6],m2[6:])) | numpy.array |
import argparse
import itertools
import json
import os
import re
from collections import defaultdict
from pathlib import Path
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from tensorboard.backend.event_processing import event_accumulator
numbers = re.compile(r'(\d+)')
BENCHMARK_THRESHOLD = 25.0
SG6 = ['PointGoal1', 'PointGoal2', 'CarGoal1', 'PointButton1', 'PointPush1', 'DoggoGoal1']
def numerical_sort(value):
value = str(value)
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
def parse_tf_event_file(file_path):
print('Parsing event file {}'.format(file_path))
ea = event_accumulator.EventAccumulator(file_path)
ea.Reload()
if any(map(lambda metric: metric not in ea.scalars.Keys(),
['evaluation/average_return',
'evaluation/average_cost_return',
'training/episode_cost_return'])
):
return [], [], [], []
rl_objective, safety_objective, timesteps = [], [], []
for i, (objective, cost_objective) in enumerate(zip(
ea.Scalars('evaluation/average_return'), ea.Scalars('evaluation/average_cost_return')
)):
rl_objective.append(objective.value)
safety_objective.append(cost_objective.value)
timesteps.append(objective.step)
sum_costs = 0.0
sum_costs_per_step = []
costs_iter = iter(ea.Scalars('training/episode_cost_return'))
for step in timesteps:
while True:
cost = next(costs_iter)
sum_costs += cost.value
if cost.step >= step:
break
sum_costs_per_step.append(sum_costs)
return rl_objective, safety_objective, sum_costs_per_step, timesteps
def parse(experiment_path, run, max_steps):
run_rl_objective, run_cost_objective, run_sum_costs, run_timesteps = [], [], [], []
files = list(Path(experiment_path).glob(os.path.join(run, 'events.out.tfevents.*')))
last_time = -1
all_sum_costs = 0
for file in sorted(files, key=numerical_sort):
objective, cost_objective, sum_costs, timestamps = parse_tf_event_file(
str(file)
)
if not all([objective, cost_objective, sum_costs, timestamps]):
print("Not all metrics are available!")
continue
# Filter out time overlaps, taking the first event file.
run_rl_objective += [obj for obj, stamp in zip(objective, timestamps) if
last_time < stamp <= max_steps]
run_cost_objective += [obj for obj, stamp in zip(cost_objective, timestamps) if
last_time < stamp <= max_steps]
run_sum_costs += [(cost + all_sum_costs) / stamp for cost, stamp in zip(
sum_costs, timestamps
) if last_time < stamp <= max_steps]
run_timesteps += [stamp for stamp in timestamps if last_time < stamp <= max_steps]
last_time = timestamps[-1]
all_sum_costs = run_sum_costs[-1] * last_time
return run_rl_objective, run_cost_objective, run_sum_costs, run_timesteps
def parse_experiment_data(experiment_path, max_steps=2e6):
rl_objectives, cost_objectives, sum_costs, all_timesteps = [], [], [], []
for metrics in map(
parse, itertools.repeat(experiment_path), next(os.walk(experiment_path))[1],
itertools.repeat(max_steps)
):
run_rl_objective, run_cost_objective, run_sum_costs, run_timesteps = metrics
rl_objectives.append(run_rl_objective)
cost_objectives.append(run_cost_objective)
sum_costs.append(run_sum_costs)
all_timesteps.append(run_timesteps)
return (
np.asarray(rl_objectives), np.asarray(cost_objectives),
np.asarray(sum_costs), np.asarray(all_timesteps)
)
def median_percentiles(metric):
median = np.median(metric, axis=0)
upper_percentile = np.percentile(metric, 95, axis=0, interpolation='linear')
lower_percentile = np.percentile(metric, 5, axis=0, interpolation='linear')
return median, upper_percentile, lower_percentile
def make_statistics(eval_rl_objectives, eval_mean_sum_costs, sum_costs, timesteps):
objectives_median, objectives_upper, objectives_lower = median_percentiles(eval_rl_objectives)
mean_sum_costs_median, mean_sum_costs_upper, mean_sum_costs_lower = median_percentiles(
eval_mean_sum_costs)
average_costs_median, average_costs_upper, average_costs_lower = median_percentiles(sum_costs)
return dict(objectives_median=objectives_median,
objectives_upper=objectives_upper,
objectives_lower=objectives_lower,
mean_sum_costs_median=mean_sum_costs_median,
mean_sum_costs_upper=mean_sum_costs_upper,
mean_sum_costs_lower=mean_sum_costs_lower,
average_costs_median=average_costs_median,
average_costs_upper=average_costs_upper,
average_costs_lower=average_costs_lower,
timesteps=timesteps[0]
)
def draw(ax, timesteps, median, upper, lower, label):
ax.plot(timesteps, median, label=label)
ax.fill_between(timesteps, lower, upper, alpha=0.2)
ax.ticklabel_format(axis='x', style='sci', scilimits=(0, 0))
ax.set_xlim([0, timesteps[-1]])
ax.xaxis.set_major_locator(ticker.MaxNLocator(5, steps=[1, 2, 2.5, 5, 10]))
ax.yaxis.set_major_locator(ticker.MaxNLocator(5, steps=[1, 2, 2.5, 5, 10]))
def resolve_name(name):
if 'not_safe' in name:
return 'Unsafe LAMBDA'
elif 'la_mbda' in name:
return 'LAMBDA'
elif 'greedy' in name:
return 'Greedy LAMBDA'
elif 'cem_mpc' in name:
return 'CEM-MPC'
else:
return ""
def resolve_environment(name):
if 'point_goal1' in name:
return 'PointGoal1'
elif 'point_goal2' in name:
return 'PointGoal2'
elif 'car_goal1' in name:
return 'CarGoal1'
elif 'point_button1' in name:
return 'PointButton1'
elif 'point_push1' in name:
return 'PointPush1'
elif 'doggo_goal1' in name:
return 'DoggoGoal1'
else:
return ""
def draw_baseline(environments_paths, axes, baseline_path):
with open(baseline_path) as file:
benchmark_results = json.load(file)
for environment, env_axes in zip(environments_paths, axes):
env_name = resolve_environment(environment)
ppo_lagrangian = benchmark_results[env_name]['ppo_lagrangian']
trpo_lagrangian = benchmark_results[env_name]['trpo_lagrangian']
cpo_lagrangian = benchmark_results[env_name]['cpo']
for ax, value_cpo, value_ppo, value_trpo in zip(
env_axes, cpo_lagrangian, ppo_lagrangian, trpo_lagrangian):
lims = np.array(ax.get_xlim())
ax.plot(lims, | np.ones_like(lims) | numpy.ones_like |
#!/usr/bin/python
import sys, getopt
import os
import pandas as pd
import numpy as np
import pyquaternion as pyq
from pyquaternion import Quaternion
from scipy import signal
from scipy.spatial.transform import Slerp
from scipy.spatial.transform import Rotation as R
def main(argv):
inputfile = ''
calfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:c:o:",["ifile=", "cfile=","ofile="])
except getopt.GetoptError:
print('test.py -i <inputfile> -c <calfile> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test.py -i <inputfile> -c calfile -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-c", "--ifile"):
calfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
# Creating Functions
def orientation_matrix(q0, q1, q2, q3):
# based on https://automaticaddison.com/how-to-convert-a-quaternion-to-a-rotation-matrix/
r11 = 2 * (q0 ** 2 + q1 ** 2) - 1
r12 = 2 * (q1 * q2 - q0 * q3)
r13 = 2 * (q1 * q3 + q0 * q2)
r21 = 2 * (q1 * q2 + q0 * q3)
r22 = 2 * (q0 ** 2 + q2 ** 2) - 1
r23 = 2 * (q2 * q3 - q0 * q1)
r31 = 2 * (q1 * q3 - q0 * q2)
r32 = 2 * (q2 * q3 + q0 * q1)
r33 = 2 * (q0 ** 2 + q3 ** 2) - 1
return r11, r12, r13, r21, r22, r23, r31, r32, r33
def compute_relative_orientation(seg, cal):
'''
Calculating the relative orientation between two matrices. This is used for the initial normalization
procedure using the standing calibration
'''
R_11 = np.array([])
R_12 = np.array([])
R_13 = np.array([])
R_21 = np.array([])
R_22 = np.array([])
R_23 = np.array([])
R_31 = np.array([])
R_32 = np.array([])
R_33 = np.array([])
for i in range(seg.shape[0]):
segment = np.asmatrix([
[np.array(seg['o11'])[i], np.array(seg['o12'])[i], np.array(seg['o13'])[i]],
[np.array(seg['o21'])[i], np.array(seg['o22'])[i], np.array(seg['o23'])[i]],
[np.array(seg['o31'])[i], np.array(seg['o32'])[i], np.array(seg['o33'])[i]]
])
segment_cal = np.asmatrix([
[np.array(cal['o11'])[i], np.array(cal['o12'])[i], np.array(cal['o13'])[i]],
[np.array(cal['o21'])[i], np.array(cal['o22'])[i], np.array(cal['o23'])[i]],
[np.array(cal['o31'])[i], np.array(cal['o32'])[i], np.array(cal['o33'])[i]]
])
# normalization
r = np.matmul(segment, segment_cal.T)
new_orientations = np.asarray(r).reshape(-1)
R_11 = np.append(R_11, new_orientations[0])
R_12 = np.append(R_12, new_orientations[1])
R_13 = np.append(R_13, new_orientations[2])
R_21 = np.append(R_21, new_orientations[3])
R_22 = np.append(R_22, new_orientations[4])
R_23 = np.append(R_23, new_orientations[5])
R_31 = np.append(R_31, new_orientations[6])
R_32 = np.append(R_32, new_orientations[7])
R_33 = np.append(R_33, new_orientations[8])
return R_11, R_12, R_13, R_21, R_22, R_23, R_31, R_32, R_33
def compute_joint_angle(df, child, parent):
c = df[df[' jointType'] == child]
p = df[df[' jointType'] == parent]
ml = np.array([])
ap = np.array([])
v = np.array([])
# Compute Rotation Matrix Components
for i in range(c.shape[0]):
segment = np.asmatrix([
[np.array(c['n_o11'])[i], np.array(c['n_o12'])[i], np.array(c['n_o13'])[i]],
[np.array(c['n_o21'])[i], np.array(c['n_o22'])[i], np.array(c['n_o23'])[i]],
[np.array(c['n_o31'])[i], np.array(c['n_o32'])[i], np.array(c['n_o33'])[i]]
])
reference_segment = np.asmatrix([
[np.array(p['n_o11'])[i], np.array(p['n_o12'])[i], np.array(p['n_o13'])[i]],
[np.array(p['n_o21'])[i], np.array(p['n_o22'])[i], np.array(p['n_o23'])[i]],
[np.array(p['n_o31'])[i], np.array(p['n_o32'])[i], np.array(p['n_o33'])[i]]
])
# transformation of segment to reference segment
r = np.matmul(reference_segment.T, segment)
# decomposition to Euler angles
rotations = R.from_matrix(r).as_euler('xyz', degrees=True)
ml = np.append(ml, rotations[0])
ap = np.append(ap, rotations[1])
v = np.append(v, rotations[2])
return ml, ap, v
def resample_df(d, new_freq=30, method='linear'):
# Resamples data at 30Hz unless otherwise specified
joints_without_quats = [3, 15, 19, 21, 22, 23, 24]
resampled_df = pd.DataFrame(
columns=['# timestamp', ' jointType', ' orientation.X', ' orientation.Y', ' orientation.Z',
' orientation.W', ' position.X', ' position.Y', ' position.Z'])
new_df = pd.DataFrame()
for i in d[' jointType'].unique():
current_df = d.loc[d[' jointType'] == i].copy()
old_times = np.array(current_df['# timestamp'])
new_times = np.arange(min(current_df['# timestamp']), max(current_df['# timestamp']), 1 / new_freq)
o_x = np.array(current_df[' orientation.X'])
o_y = np.array(current_df[' orientation.Y'])
o_z = np.array(current_df[' orientation.Z'])
o_w = np.array(current_df[' orientation.W'])
p_x = np.array(current_df[' position.X'])
p_y = np.array(current_df[' position.Y'])
p_z = np.array(current_df[' position.Z'])
if i in joints_without_quats:
orientation_x = np.repeat(0.0, len(new_times))
orientation_y = np.repeat(0.0, len(new_times))
orientation_z = np.repeat(0.0, len(new_times))
orientation_w = np.repeat(0.0, len(new_times))
else:
if method == "linear":
orientation_x = np.interp(new_times, old_times, o_x)
orientation_y = np.interp(new_times, old_times, o_y)
orientation_z = np.interp(new_times, old_times, o_z)
orientation_w = np.interp(new_times, old_times, o_w)
elif method == 'slerp':
quats = []
for t in range(len(old_times)):
quats.append([o_x[t], o_y[t], o_z[t], o_w[t]])
# Create rotation object
quats_object = R.from_quat(quats)
# Spherical Linear Interpolation
slerp = Slerp(np.array(current_df['# timestamp']), quats_object)
interp_rots = slerp(new_times)
new_quats = interp_rots.as_quat()
# Create new orientation objects
orientation_x = np.array([item[0] for item in new_quats])
orientation_y = np.array([item[1] for item in new_quats])
orientation_z = np.array([item[2] for item in new_quats])
orientation_w = np.array([item[3] for item in new_quats])
else:
raise ValueError("Method must be either linear or spherical (slerp) interpolation.")
position_x = signal.resample(p_x, num=int(max(current_df['# timestamp']) * new_freq))
position_y = signal.resample(p_y, num=int(max(current_df['# timestamp']) * new_freq))
position_z = signal.resample(p_z, num=int(max(current_df['# timestamp']) * new_freq))
new_df['# timestamp'] = pd.Series(new_times)
new_df[' jointType'] = pd.Series(np.repeat(i, len(new_times)))
new_df[' orientation.X'] = pd.Series(orientation_x)
new_df[' orientation.Y'] = pd.Series(orientation_y)
new_df[' orientation.Z'] = pd.Series(orientation_z)
new_df[' orientation.W'] = pd.Series(orientation_w)
new_df[' position.X'] = pd.Series(position_x)
new_df[' position.Y'] = pd.Series(position_y)
new_df[' position.Z'] = pd.Series(position_z)
resampled_df = resampled_df.append(new_df, ignore_index=True)
return resampled_df
def smooth_rotations(o_x, o_y, o_z, o_w):
o_x = np.array(o_x)
o_y = np.array(o_y)
o_z = np.array(o_z)
o_w = np.array(o_w)
trajNoisy = []
for i in range(len(o_x)):
trajNoisy.append([o_x[i], o_y[i], o_z[i], o_w[i]])
trajNoisy = np.array(trajNoisy)
# This code was adapted from https://ww2.mathworks.cn/help/nav/ug/lowpass-filter-orientation-using-quaternion-slerp.html
# As explained in the link above, "The interpolation parameter to slerp is in the closed-interval [0,1], so the output of dist
# must be re-normalized to this range. However, the full range of [0,1] for the interpolation parameter gives poor performance,
# so it is limited to a smaller range hrange centered at hbias."
hrange = 0.4
hbias = 0.4
low = max(min(hbias - (hrange / 2), 1), 0)
high = max(min(hbias + (hrange / 2), 1), 0)
hrangeLimited = high - low
# initial filter state is the quaternion at frame 0
y = trajNoisy[0]
qout = []
for i in range(1, len(trajNoisy)):
x = trajNoisy[i]
# x = mathutils.Quaternion(x)
# y = mathutils.Quaternion(y)
# d = x.rotation_difference(y).angle
x = pyq.Quaternion(x)
y = pyq.Quaternion(y)
d = (x.conjugate * y).angle
# Renormalize dist output to the range [low, high]
hlpf = (d / np.pi) * hrangeLimited + low
# y = y.slerp(x, hlpf)
y = Quaternion.slerp(y, x, hlpf).elements
qout.append(np.array(y))
# because a frame of data is lost during this process, I've (arbitrarily) decided to append an extra quaternion at the end of the trial
# that is identical to the n-1th frame. This keeps the length consistent (so there is no issues with merging later) and should not
# negatively impact the data since the last frame is rarely of interest (and the data collector can decide to collect for a split second
# after their trial of interest has completed to attenuate any of these "errors" that may propogate in the analyses)
qout.append(qout[int(len(qout) - 1)])
orientation_x = [item[0] for item in qout]
orientation_y = [item[1] for item in qout]
orientation_z = [item[2] for item in qout]
orientation_w = [item[3] for item in qout]
return orientation_x, orientation_y, orientation_z, orientation_w
def smooth_quaternions(d):
for i in d[' jointType'].unique():
current_df = d.loc[d[' jointType'] == i].copy()
current_df[' orientation.X'], current_df[' orientation.Y'], current_df[' orientation.Z'], current_df[
' orientation.W'] = smooth_rotations(current_df[' orientation.X'], current_df[' orientation.Y'],
current_df[' orientation.Z'], current_df[' orientation.W'])
d[d[' jointType'] == i] = current_df
return d
def compute_segment_angle(df, SEGMENT):
s = df[df[' jointType'] == SEGMENT]
ml = np.array([])
ap = np.array([])
v = np.array([])
# Compute Rotation Matrix Components
for i in range(s.shape[0]):
segment = np.asmatrix([
[np.array(s['n_o11'])[i], np.array(s['n_o12'])[i], np.array(s['n_o13'])[i]],
[np.array(s['n_o21'])[i], np.array(s['n_o22'])[i], np.array(s['n_o23'])[i]],
[np.array(s['n_o31'])[i], np.array(s['n_o32'])[i], np.array(s['n_o33'])[i]]
])
# decomposition to Euler angles
rotations = R.from_matrix(segment).as_euler('xyz', degrees=True)
ml = np.append(ml, rotations[0])
ap = np.append(ap, rotations[1])
v = np.append(v, rotations[2])
return ml, ap, v
dir = os.getcwd()
# Loading Data
print('... Loading data')
cal = pd.read_csv(os.path.join(dir, calfile))
df = pd.read_csv(os.path.join(dir, inputfile))
df['# timestamp'] = df['# timestamp'] * 10 ** -3
cal['# timestamp'] = cal['# timestamp'] * 10 ** -3
df_reoriented = df.copy()
cal_reoriented = cal.copy()
print('... Reorienting LCSs')
# Hips
df_reoriented.loc[df[' jointType'] == 16, ' orientation.X'] = df.loc[df[' jointType'] == 16, ' orientation.Z']
df_reoriented.loc[df[' jointType'] == 16, ' orientation.Y'] = df.loc[df[' jointType'] == 16, ' orientation.X']
df_reoriented.loc[df[' jointType'] == 16, ' orientation.Z'] = df.loc[df[' jointType'] == 16, ' orientation.Y']
cal_reoriented.loc[cal[' jointType'] == 16, ' orientation.X'] = cal.loc[cal[' jointType'] == 16, ' orientation.Z']
cal_reoriented.loc[cal[' jointType'] == 16, ' orientation.Y'] = cal.loc[cal[' jointType'] == 16, ' orientation.X']
cal_reoriented.loc[cal[' jointType'] == 16, ' orientation.Z'] = cal.loc[cal[' jointType'] == 16, ' orientation.Y']
df_reoriented.loc[df[' jointType'] == 12, ' orientation.X'] = df.loc[df[' jointType'] == 12, ' orientation.Z']
df_reoriented.loc[df[' jointType'] == 12, ' orientation.Y'] = df.loc[df[' jointType'] == 12, ' orientation.X'] * -1
df_reoriented.loc[df[' jointType'] == 12, ' orientation.Z'] = df.loc[df[' jointType'] == 12, ' orientation.Y'] * -1
cal_reoriented.loc[cal[' jointType'] == 12, ' orientation.X'] = cal.loc[cal[' jointType'] == 12, ' orientation.Z']
cal_reoriented.loc[cal[' jointType'] == 12, ' orientation.Y'] = cal.loc[cal[' jointType'] == 12, ' orientation.X'] * -1
cal_reoriented.loc[cal[' jointType'] == 12, ' orientation.Z'] = cal.loc[cal[' jointType'] == 12, ' orientation.Y'] * -1
# Knees
df_reoriented.loc[df[' jointType'] == 17, ' orientation.X'] = df.loc[df[' jointType'] == 17, ' orientation.X'] * -1
df_reoriented.loc[df[' jointType'] == 17, ' orientation.Y'] = df.loc[df[' jointType'] == 17, ' orientation.Y'] * -1
df_reoriented.loc[df[' jointType'] == 17, ' orientation.Z'] = df.loc[df[' jointType'] == 17, ' orientation.Z']
cal_reoriented.loc[cal[' jointType'] == 17, ' orientation.X'] = cal.loc[cal[' jointType'] == 17, ' orientation.X'] * -1
cal_reoriented.loc[cal[' jointType'] == 17, ' orientation.Y'] = cal.loc[cal[' jointType'] == 17, ' orientation.Y'] * -1
cal_reoriented.loc[cal[' jointType'] == 17, ' orientation.Z'] = cal.loc[cal[' jointType'] == 17, ' orientation.Z']
df_reoriented.loc[df[' jointType'] == 13, ' orientation.X'] = df.loc[df[' jointType'] == 13, ' orientation.X']
df_reoriented.loc[df[' jointType'] == 13, ' orientation.Y'] = df.loc[df[' jointType'] == 13, ' orientation.Y'] * -1
df_reoriented.loc[df[' jointType'] == 13, ' orientation.Z'] = df.loc[df[' jointType'] == 13, ' orientation.Z'] * -1
cal_reoriented.loc[cal[' jointType'] == 13, ' orientation.X'] = cal.loc[cal[' jointType'] == 13, ' orientation.X']
cal_reoriented.loc[cal[' jointType'] == 13, ' orientation.Y'] = cal.loc[cal[' jointType'] == 13, ' orientation.Y'] * -1
cal_reoriented.loc[cal[' jointType'] == 13, ' orientation.Z'] = cal.loc[cal[' jointType'] == 13, ' orientation.Z'] * -1
# Ankles
df_reoriented.loc[df[' jointType'] == 18, ' orientation.X'] = df.loc[df[' jointType'] == 18, ' orientation.X'] * -1
df_reoriented.loc[df[' jointType'] == 18, ' orientation.Y'] = df.loc[df[' jointType'] == 18, ' orientation.Y'] * -1
df_reoriented.loc[df[' jointType'] == 18, ' orientation.Z'] = df.loc[df[' jointType'] == 18, ' orientation.Z']
cal_reoriented.loc[cal[' jointType'] == 18, ' orientation.X'] = cal.loc[cal[' jointType'] == 18, ' orientation.X'] * -1
cal_reoriented.loc[cal[' jointType'] == 18, ' orientation.Y'] = cal.loc[cal[' jointType'] == 18, ' orientation.Y'] * -1
cal_reoriented.loc[cal[' jointType'] == 18, ' orientation.Z'] = cal.loc[cal[' jointType'] == 18, ' orientation.Z']
df_reoriented.loc[df[' jointType'] == 14, ' orientation.X'] = df.loc[df[' jointType'] == 14, ' orientation.X']
df_reoriented.loc[df[' jointType'] == 14, ' orientation.Y'] = df.loc[df[' jointType'] == 14, ' orientation.Y'] * -1
df_reoriented.loc[df[' jointType'] == 14, ' orientation.Z'] = df.loc[df[' jointType'] == 14, ' orientation.Z'] * -1
cal_reoriented.loc[cal[' jointType'] == 14, ' orientation.X'] = cal.loc[cal[' jointType'] == 14, ' orientation.X']
cal_reoriented.loc[cal[' jointType'] == 14, ' orientation.Y'] = cal.loc[cal[' jointType'] == 14, ' orientation.Y'] * -1
cal_reoriented.loc[cal[' jointType'] == 14, ' orientation.Z'] = cal.loc[cal[' jointType'] == 14, ' orientation.Z'] * -1
# Resampling data to 30Hz
df_reoriented = resample_df(df_reoriented, new_freq=30, method='slerp')
# Smooth Quaternion Rotations
df_reoriented = smooth_quaternions(df_reoriented)
# need to re-sort and reset the index following the resampling
df_reoriented = df_reoriented.sort_values(by=['# timestamp', ' jointType']).reset_index()
df_reoriented['o11'], df_reoriented['o12'], df_reoriented['o13'], df_reoriented['o21'], df_reoriented['o22'], \
df_reoriented['o23'], df_reoriented['o31'], df_reoriented['o32'], df_reoriented['o33'] \
= orientation_matrix(df_reoriented[' orientation.W'], df_reoriented[' orientation.X'],
df_reoriented[' orientation.Y'], df_reoriented[' orientation.Z'])
cal_reoriented['o11'], cal_reoriented['o12'], cal_reoriented['o13'], cal_reoriented['o21'], cal_reoriented['o22'], \
cal_reoriented['o23'], cal_reoriented['o31'], cal_reoriented['o32'], cal_reoriented['o33'] \
= orientation_matrix(cal_reoriented[' orientation.W'], cal_reoriented[' orientation.X'],
cal_reoriented[' orientation.Y'], cal_reoriented[' orientation.Z'])
df_reoriented.set_index(' jointType', inplace=True)
cal_reoriented.set_index(' jointType', inplace=True)
cal_reoriented = cal_reoriented.groupby(' jointType').mean().drop(columns=['# timestamp'])
cal_reoriented = pd.concat([cal_reoriented] * np.int64(df_reoriented.shape[0] / 25))
print('... Normalizing to calibration pose')
# Normalize orientations to calibration pose
df_reoriented['n_o11'], df_reoriented['n_o12'], df_reoriented['n_o13'], df_reoriented['n_o21'], df_reoriented[
'n_o22'], \
df_reoriented['n_o23'], df_reoriented['n_o31'], df_reoriented['n_o32'], df_reoriented['n_o33'] \
= np.array(compute_relative_orientation(cal_reoriented, df_reoriented))
df_reoriented.reset_index(inplace=True)
print('... Computing joint angles')
r_hipFlexion, r_hipAbduction, r_hipV = compute_joint_angle(df_reoriented, child=17, parent=16)
l_hipFlexion, l_hipAbduction, l_hipV = compute_joint_angle(df_reoriented, child=13, parent=12)
r_kneeFlexion, r_kneeAbduction, r_kneeV = compute_joint_angle(df_reoriented, child=18, parent=17)
l_kneeFlexion, l_kneeAbduction, l_kneeV = compute_joint_angle(df_reoriented, child=14, parent=13)
# Note that 16 or 12 can be used for the pelvis (given Kinect's definitions)
pelvis_rotation = compute_segment_angle(df_reoriented, 16)[0]
r_thigh_rotation = compute_segment_angle(df_reoriented, 17)[0]
l_thigh_rotation = compute_segment_angle(df_reoriented, 13)[0]
r_shank_rotation = compute_segment_angle(df_reoriented, 18)[0]
l_shank_rotation = compute_segment_angle(df_reoriented, 14)[0]
new_df = pd.DataFrame({
'frame': np.arange(df_reoriented['# timestamp'].unique().shape[0]),
'timeStamp': df_reoriented['# timestamp'].unique(),
# Below are adjusted for relatively easy anatomical interpretations
'r_hipFlexion' : r_hipFlexion,
'l_hipFlexion' : l_hipFlexion*-1,
'r_hipAbduction' : r_hipAbduction*-1,
'l_hipAbduction' : l_hipAbduction,
'r_hipV' : r_hipV *-1,
'l_hipV' : l_hipV *-1,
'r_kneeFlexion' : r_kneeFlexion*-1,
'l_kneeFlexion' : l_kneeFlexion,
'r_kneeAdduction' : r_kneeAbduction,
'l_kneeAdduction' : l_kneeAbduction*-1,
'r_kneeV' : r_kneeV*-1,
'l_kneeV' : l_kneeV,
# Below are adjusted specifically for use with relative phase analyses
'pelvis_rotation': pelvis_rotation,
'r_thigh_rotation': r_thigh_rotation,
'l_thigh_rotation': l_thigh_rotation*-1,
'r_shank_rotation': r_shank_rotation,
'l_shank_rotation': l_shank_rotation*-1,
# Below are left in the GCS
'r_hip_x': np.array(df_reoriented[df_reoriented[' jointType'] == 16][' position.X']),
'r_hip_y': np.array(df_reoriented[df_reoriented[' jointType'] == 16][' position.Y']),
'r_hip_z': np.array(df_reoriented[df_reoriented[' jointType'] == 16][' position.Z']),
'l_hip_x': np.array(df_reoriented[df_reoriented[' jointType'] == 12][' position.X']),
'l_hip_y': np.array(df_reoriented[df_reoriented[' jointType'] == 12][' position.Y']),
'l_hip_z': np.array(df_reoriented[df_reoriented[' jointType'] == 12][' position.Z']),
'r_knee_x': np.array(df_reoriented[df_reoriented[' jointType'] == 17][' position.X']),
'r_knee_y': np.array(df_reoriented[df_reoriented[' jointType'] == 17][' position.Y']),
'r_knee_z': np.array(df_reoriented[df_reoriented[' jointType'] == 17][' position.Z']),
'l_knee_x': np.array(df_reoriented[df_reoriented[' jointType'] == 13][' position.X']),
'l_knee_y': np.array(df_reoriented[df_reoriented[' jointType'] == 13][' position.Y']),
'l_knee_z': np.array(df_reoriented[df_reoriented[' jointType'] == 13][' position.Z']),
'r_ankle_x': np.array(df_reoriented[df_reoriented[' jointType'] == 18][' position.X']),
'r_ankle_y': np.array(df_reoriented[df_reoriented[' jointType'] == 18][' position.Y']),
'r_ankle_z': np.array(df_reoriented[df_reoriented[' jointType'] == 18][' position.Z']),
'l_ankle_x': np.array(df_reoriented[df_reoriented[' jointType'] == 14][' position.X']),
'l_ankle_y': np.array(df_reoriented[df_reoriented[' jointType'] == 14][' position.Y']),
'l_ankle_z': np.array(df_reoriented[df_reoriented[' jointType'] == 14][' position.Z']),
'r_foot_x': np.array(df_reoriented[df_reoriented[' jointType'] == 19][' position.X']),
'r_foot_y': np.array(df_reoriented[df_reoriented[' jointType'] == 19][' position.Y']),
'r_foot_z': np.array(df_reoriented[df_reoriented[' jointType'] == 19][' position.Z']),
'l_foot_x': np.array(df_reoriented[df_reoriented[' jointType'] == 15][' position.X']),
'l_foot_y': np.array(df_reoriented[df_reoriented[' jointType'] == 15][' position.Y']),
'l_foot_z': np.array(df_reoriented[df_reoriented[' jointType'] == 15][' position.Z']),
'spinebase_x': np.array(df_reoriented[df_reoriented[' jointType'] == 0][' position.X']),
'spinebase_y': np.array(df_reoriented[df_reoriented[' jointType'] == 0][' position.Y']),
'spinebase_z': np.array(df_reoriented[df_reoriented[' jointType'] == 0][' position.Z']),
'spinemid_x': np.array(df_reoriented[df_reoriented[' jointType'] == 1][' position.X']),
'spinemid_y': np.array(df_reoriented[df_reoriented[' jointType'] == 1][' position.Y']),
'spinemid_z': np.array(df_reoriented[df_reoriented[' jointType'] == 1][' position.Z']),
'neck_x': np.array(df_reoriented[df_reoriented[' jointType'] == 2][' position.X']),
'neck_y': np.array(df_reoriented[df_reoriented[' jointType'] == 2][' position.Y']),
'neck_z': np.array(df_reoriented[df_reoriented[' jointType'] == 2][' position.Z']),
'head_x': np.array(df_reoriented[df_reoriented[' jointType'] == 3][' position.X']),
'head_y': | np.array(df_reoriented[df_reoriented[' jointType'] == 3][' position.Y']) | numpy.array |
'''
Functions dealing with (n,d) points
'''
import numpy as np
from .constants import log, tol
from .geometry import plane_transform
from . import transformations
from . import util
def point_plane_distance(points, plane_normal, plane_origin=[0, 0, 0]):
w = np.array(points) - plane_origin
distances = np.dot(plane_normal, w.T) / np.linalg.norm(plane_normal)
return distances
def major_axis(points):
'''
Returns an approximate vector representing the major axis of points
'''
U, S, V = np.linalg.svd(points)
axis = util.unitize(np.dot(S, V))
return axis
def plane_fit(points, tolerance=None):
'''
Given a set of points, find an origin and normal using least squares
Parameters
---------
points: (n,3)
tolerance: how non-planar the result can be without raising an error
Returns
---------
C: (3) point on the plane
N: (3) normal vector
'''
C = points[0]
x = points - C
M = np.dot(x.T, x)
N = np.linalg.svd(M)[0][:, -1]
if not (tolerance is None):
normal_range = np.ptp(np.dot(N, points.T))
if normal_range > tol.planar:
log.error('Points have peak to peak of %f', normal_range)
raise ValueError('Plane outside tolerance!')
return C, N
def radial_sort(points,
origin=None,
normal=None):
'''
Sorts a set of points radially (by angle) around an origin/normal.
If origin/normal aren't specified, it sorts around centroid
and the approximate plane the points lie in.
points: (n,3) set of points
'''
# if origin and normal aren't specified, generate one at the centroid
if origin is None:
origin = np.average(points, axis=0)
if normal is None:
normal = surface_normal(points)
# create two axis perpendicular to each other and the normal,
# and project the points onto them
axis0 = [normal[0], normal[2], -normal[1]]
axis1 = np.cross(normal, axis0)
ptVec = points - origin
pr0 = np.dot(ptVec, axis0)
pr1 = np.dot(ptVec, axis1)
# calculate the angles of the points on the axis
angles = np.arctan2(pr0, pr1)
# return the points sorted by angle
return points[[np.argsort(angles)]]
def project_to_plane(points,
plane_normal=[0, 0, 1],
plane_origin=[0, 0, 0],
transform=None,
return_transform=False,
return_planar=True):
'''
Projects a set of (n,3) points onto a plane.
Parameters
---------
points: (n,3) array of points
plane_normal: (3) normal vector of plane
plane_origin: (3) point on plane
transform: None or (4,4) matrix. If specified, normal/origin are ignored
return_transform: bool, if true returns the (4,4) matrix used to project points
onto a plane
return_planar: bool, if True, returns (n,2) points. If False, returns
(n,3), where the Z column consists of zeros
'''
if np.all(np.abs(plane_normal) < tol.zero):
raise NameError('Normal must be nonzero!')
if transform is None:
transform = plane_transform(plane_origin, plane_normal)
transformed = transformations.transform_points(points, transform)
transformed = transformed[:, 0:(3 - int(return_planar))]
if return_transform:
polygon_to_3D = np.linalg.inv(transform)
return transformed, polygon_to_3D
return transformed
def absolute_orientation(points_A, points_B, return_error=False):
'''
Calculates the transform that best aligns points_A with points_B
Uses Horn's method for the absolute orientation problem, in 3D with no scaling.
Parameters
---------
points_A: (n,3) list of points
points_B: (n,3) list of points, T*points_A
return_error: boolean, if True returns (n) list of euclidean distances
representing the distance from T*points_A[i] to points_B[i]
Returns
---------
M: (4,4) transformation matrix for the transform that best aligns
points_A to points_B
error: float, list of maximum euclidean distance
'''
points_A = np.array(points_A)
points_B = np.array(points_B)
if (points_A.shape != points_B.shape):
raise ValueError('Points must be of the same shape!')
if len(points_A.shape) != 2 or points_A.shape[1] != 3:
raise ValueError('Points must be (n,3)!')
lc = np.average(points_A, axis=0)
rc = np.average(points_B, axis=0)
left = points_A - lc
right = points_B - rc
M = np.dot(left.T, right)
[[Sxx, Sxy, Sxz],
[Syx, Syy, Syz],
[Szx, Szy, Szz]] = M
N = [[(Sxx + Syy + Szz), (Syz - Szy), (Szx - Sxz), (Sxy - Syx)],
[(Syz - Szy), (Sxx - Syy - Szz), (Sxy + Syx), (Szx + Sxz)],
[(Szx - Sxz), (Sxy + Syx), (-Sxx + Syy - Szz), (Syz + Szy)],
[(Sxy - Syx), (Szx + Sxz), (Syz + Szy), (-Sxx - Syy + Szz)]]
(w, v) = np.linalg.eig(N)
q = v[:, np.argmax(w)]
q = q / np.linalg.norm(q)
M1 = [[q[0], -q[1], -q[2], -q[3]],
[q[1], q[0], q[3], -q[2]],
[q[2], -q[3], q[0], q[1]],
[q[3], q[2], -q[1], q[0]]]
M2 = [[q[0], -q[1], -q[2], -q[3]],
[q[1], q[0], -q[3], q[2]],
[q[2], q[3], q[0], -q[1]],
[q[3], -q[2], q[1], q[0]]]
R = np.dot(np.transpose(M1), M2)[1:4, 1:4]
T = rc - np.dot(R, lc)
M = | np.eye(4) | numpy.eye |
import batoid
import numpy as np
from test_helpers import timer, init_gpu, rays_allclose, checkAngle, do_pickle
@timer
def test_properties():
rng = np.random.default_rng(5)
size = 10
for i in range(100):
x = rng.normal(size=size)
y = rng.normal(size=size)
z = rng.normal(size=size)
vx = rng.normal(size=size)
vy = rng.normal(size=size)
vz = rng.normal(size=size)
t = rng.normal(size=size)
w = rng.normal(size=size)
fx = rng.normal(size=size)
vig = rng.choice([True, False], size=size)
fa = rng.choice([True, False], size=size)
cs = batoid.CoordSys(
origin=rng.normal(size=3),
rot=batoid.RotX(rng.normal())@batoid.RotY(rng.normal())
)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, w, fx, vig, fa, cs)
np.testing.assert_array_equal(rv.x, x)
np.testing.assert_array_equal(rv.y, y)
np.testing.assert_array_equal(rv.z, z)
np.testing.assert_array_equal(rv.r[:, 0], x)
np.testing.assert_array_equal(rv.r[:, 1], y)
np.testing.assert_array_equal(rv.r[:, 2], z)
np.testing.assert_array_equal(rv.vx, vx)
np.testing.assert_array_equal(rv.vy, vy)
np.testing.assert_array_equal(rv.vz, vz)
np.testing.assert_array_equal(rv.v[:, 0], vx)
np.testing.assert_array_equal(rv.v[:, 1], vy)
np.testing.assert_array_equal(rv.v[:, 2], vz)
np.testing.assert_array_equal(rv.k[:, 0], rv.kx)
np.testing.assert_array_equal(rv.k[:, 1], rv.ky)
np.testing.assert_array_equal(rv.k[:, 2], rv.kz)
np.testing.assert_array_equal(rv.t, t)
np.testing.assert_array_equal(rv.wavelength, w)
np.testing.assert_array_equal(rv.flux, fx)
np.testing.assert_array_equal(rv.vignetted, vig)
np.testing.assert_array_equal(rv.failed, fa)
assert rv.coordSys == cs
rv._syncToDevice()
do_pickle(rv)
@timer
def test_positionAtTime():
rng = np.random.default_rng(57)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, 0.0)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.0, -1.1, 2.5]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + t1 * rv.v
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
np.testing.assert_equal(rv.x, x)
np.testing.assert_equal(rv.y, y)
np.testing.assert_equal(rv.z, z)
np.testing.assert_equal(rv.vx, vx)
np.testing.assert_equal(rv.vy, vy)
np.testing.assert_equal(rv.vz, vz)
np.testing.assert_equal(rv.t, t)
np.testing.assert_equal(rv.wavelength, 0.0)
for t1 in [0.0, 1.4, -1.3, 2.1]:
np.testing.assert_equal(
rv.positionAtTime(t1),
rv.r + rv.v*(t1-rv.t)[:,None]
)
@timer
def test_propagate():
rng = np.random.default_rng(577)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
# Try with default t=0 first
rv = batoid.RayVector(x, y, z, vx, vy, vz)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
# Now add some random t's
t = rng.uniform(-1.0, 1.0, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t)
for t1 in [0.0, 1.0, -1.1, 2.5]:
rvcopy = rv.copy()
r1 = rv.positionAtTime(t1)
rvcopy.propagate(t1)
np.testing.assert_equal(
rvcopy.r,
r1
)
np.testing.assert_equal(
rvcopy.v,
rv.v
)
np.testing.assert_equal(
rvcopy.t,
t1
)
@timer
def test_phase():
rng = np.random.default_rng(5772)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
# First explicitly check that phase is 0 at position and time of individual
# rays
for i in rng.choice(size, size=10):
np.testing.assert_equal(
rv.phase(rv.r[i], rv.t[i])[i],
0.0
)
# Now use actual formula
# phi = k.(r-r0) - (t-t0)omega
# k = 2 pi v / lambda |v|^2
# omega = 2 pi / lambda
# |v| = 1 / n
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
phi = np.einsum("ij,ij->i", rv.v, r1-rv.r)
phi *= n*n
phi -= (t1-rv.t)
phi *= 2*np.pi/wavelength
np.testing.assert_allclose(
rv.phase(r1, t1),
phi,
rtol=0,
atol=1e-7
)
for i in rng.choice(size, size=10):
s = slice(i, i+1)
rvi = batoid.RayVector(
x[s], y[s], z[s],
vx[s], vy[s], vz[s],
t[s].copy(), wavelength[s].copy()
)
# Move integer number of wavelengths ahead
ti = rvi.t[0]
wi = rvi.wavelength[0]
r1 = rvi.positionAtTime(ti + 5123456789*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Half wavelength
r1 = rvi.positionAtTime(ti + 6987654321.5*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=2e-5)
# Quarter wavelength
r1 = rvi.positionAtTime(ti + 0.25*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=2e-5)
# Three-quarters wavelength
r1 = rvi.positionAtTime(ti + 7182738495.75*wi)[0]
a = rvi.amplitude(r1, ti)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=2e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=2e-5)
# We can also keep the position the same and change the time in
# half/quarter integer multiples of the period.
a = rvi.amplitude(rvi.r[0], rvi.t[0]+5e9*wi)
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+5.5)*wi)
np.testing.assert_allclose(a.real, -1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+2.25)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, -1.0, rtol=0, atol=1e-5)
a = rvi.amplitude(rvi.r[0], rvi.t[0]+(5e9+1.75)*wi)
np.testing.assert_allclose(a.real, 0.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 1.0, rtol=0, atol=1e-5)
# If we pick a point anywhere along a vector originating at the ray
# position, but orthogonal to its direction of propagation, then we
# should get phase = 0 (mod 2pi).
v1 = np.array([1.0, 0.0, 0.0])
v1 = np.cross(rvi.v[0], v1)
p1 = rvi.r[0] + v1
a = rvi.amplitude(p1, rvi.t[0])
np.testing.assert_allclose(a.real, 1.0, rtol=0, atol=1e-5)
np.testing.assert_allclose(a.imag, 0.0, rtol=0, atol=1e-5)
@timer
def test_sumAmplitude():
import time
rng = np.random.default_rng(57721)
size = 10_000
for n in [1.0, 1.3]:
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0/(n*n) - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
rv = batoid.RayVector(x, y, z, vx, vy, vz, t, wavelength)
satime = 0
atime = 0
for r1, t1 in [
((0, 0, 0), 0),
((0, 1, 2), 3),
((-1, 2, 4), -1),
((0, 1, -4), -2)
]:
at0 = time.time()
s1 = rv.sumAmplitude(r1, t1)
at1 = time.time()
s2 = np.sum(rv.amplitude(r1, t1))
at2 = time.time()
np.testing.assert_allclose(s1, s2, rtol=0, atol=1e-11)
satime += at1-at0
atime += at2-at1
# print(f"sumAplitude() time: {satime}")
# print(f"np.sum(amplitude()) time: {atime}")
@timer
def test_equals():
import time
rng = np.random.default_rng(577215)
size = 10_000
x = rng.uniform(-1, 1, size=size)
y = rng.uniform(-1, 1, size=size)
z = rng.uniform(-0.1, 0.1, size=size)
vx = rng.uniform(-0.05, 0.05, size=size)
vy = rng.uniform(-0.05, 0.05, size=size)
vz = np.sqrt(1.0 - vx*vx - vy*vy)
t = rng.uniform(-1.0, 1.0, size=size)
wavelength = rng.uniform(300e-9, 1100e-9, size=size)
flux = rng.uniform(0.9, 1.1, size=size)
vignetted = rng.choice([True, False], size=size)
failed = rng.choice([True, False], size=size)
args = x, y, z, vx, vy, vz, t, wavelength, flux, vignetted, failed
rv = batoid.RayVector(*args)
rv2 = rv.copy()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
# Repeat, but force comparison on device
rv2 = rv.copy()
rv._rv.x.syncToDevice()
rv._rv.y.syncToDevice()
rv._rv.z.syncToDevice()
rv._rv.vx.syncToDevice()
rv._rv.vy.syncToDevice()
rv._rv.vz.syncToDevice()
rv._rv.t.syncToDevice()
rv._rv.wavelength.syncToDevice()
rv._rv.flux.syncToDevice()
rv._rv.vignetted.syncToDevice()
rv._rv.failed.syncToDevice()
assert rv == rv2
for i in range(len(args)):
newargs = [args[i].copy() for i in range(len(args))]
ai = newargs[i]
if ai.dtype == float:
ai[0] = 1.2+ai[0]*3.45
elif ai.dtype == bool:
ai[0] = not ai[0]
# else panic!
rv2 = batoid.RayVector(*newargs)
assert rv != rv2
@timer
def test_asGrid():
rng = np.random.default_rng(5772156)
for _ in range(10):
backDist = rng.uniform(9.0, 11.0)
wavelength = rng.uniform(300e-9, 1100e-9)
nx = 1
while (nx%2) == 1:
nx = rng.integers(10, 21)
lx = rng.uniform(1.0, 10.0)
dx = lx/(nx-2)
dirCos = np.array([
rng.uniform(-0.1, 0.1),
rng.uniform(-0.1, 0.1),
rng.uniform(-1.2, -0.8),
])
dirCos /= np.sqrt(np.dot(dirCos, dirCos))
# Some things that should be equivalent
grid1 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=lx, dirCos=dirCos
)
grid2 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, dx=dx, dirCos=dirCos
)
grid3 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
dx=dx, lx=lx, dirCos=dirCos
)
grid4 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), dirCos=dirCos
)
theta_x, theta_y = batoid.utils.dirCosToField(*dirCos)
grid5 = batoid.RayVector.asGrid(
backDist=backDist, wavelength=wavelength,
nx=nx, lx=(lx, 0.0), theta_x=theta_x, theta_y=theta_y
)
rays_allclose(grid1, grid2)
rays_allclose(grid1, grid3)
rays_allclose(grid1, grid4)
rays_allclose(grid1, grid5)
# Check distance to chief ray
cridx = (nx//2)*nx+nx//2
obs_dist = np.sqrt(np.dot(grid1.r[cridx], grid1.r[cridx]))
np.testing.assert_allclose(obs_dist, backDist)
np.testing.assert_allclose(grid1.t, 0)
np.testing.assert_allclose(grid1.wavelength, wavelength)
np.testing.assert_allclose(grid1.vignetted, False)
np.testing.assert_allclose(grid1.failed, False)
np.testing.assert_allclose(grid1.vx, dirCos[0])
np.testing.assert_allclose(grid1.vy, dirCos[1])
| np.testing.assert_allclose(grid1.vz, dirCos[2]) | numpy.testing.assert_allclose |
import logging
import cv2
import numpy as np
import pytesseract
import os
import time
import json
import re
from multiprocessing import Pool
from Levenshtein import distance
from .input_handler import InputHandler
from .grabscreen import grab_screen
from .utils import get_config, filter_mod
# This is a position of the inventory as fraction of the resolution
OWN_INVENTORY_ORIGIN = (0.6769531, 0.567361)
# These are the sockets positions as measured on 2560x1440 resolution
# with X_SCALE and Y_SCALE applied, i.e., scale * SOCKETS[i] is the i:th
# sockets absolute pixel position with origin in the middle of the skill tree
# I think the SCALE variables are in fact useless and a relics from the
# positions initially being measured at a view which wasn't zoomed out maximally
SOCKETS = {
1: (-650.565, -376.013),
2: (648.905, -396.45),
3: (6.3354, 765.658),
4: (-1700.9, 2424.17),
5: (-2800.66, -215.34),
6: (-1435.02, -2635.39),
7: (1855.53, -2360.1),
8: (2835.84, 230.5361),
9: (1225.37, 2625.76),
10: (-120.12471, 5195.44),
11: (-3580.19, 5905.92),
12: (-5395.86, 2120.42),
13: (-6030.95, -115.7007),
14: (-5400.59, -1985.18),
15: (-3035.14, -5400.87),
16: (160.10728, -5196.32),
17: (3382.05, -5195.21),
18: (5730.2, -1625.75),
19: (6465.24, 190.3341),
20: (5542.76, 1690.07),
21: (3322.76, 6090.5),
}
# The offsets are specified in the same fashion as SOCKETS and are rough
# guesses which allow us to move to the general area and later refine the
# position of the socket through template matching
SOCKET_MOVE_OFFSET = {
1: (0, 150),
2: (0, 150),
3: (0, 200),
4: (0, 150),
5: (-300, 200),
6: (-100, 150),
7: (-150, 0),
8: (0, -150),
9: (-100, -125),
10: (170, 0),
11: (-400, -900),
12: (0, 300),
13: (400, 200),
14: (-250, -150),
15: (-100, -150),
16: (150, -150),
17: (150, 500), #
18: (-300, 400),
19: (-1000, -150),
20: (-500, 500),
21: (100, -1000),
}
# Scalers for the SOCKETS positions to convert them to 2560x1440 pixel positions
X_SCALE = 0.2
Y_SCALE = 0.2
CIRCLE_EFFECTIVE_RADIUS = 300
IMAGE_FOLDER = "data/images/"
# We're using a lot of template matching and all templates are defined here
# with matching thresholds (scores) and sizes per resolution
TEMPLATES = {
"AmbidexterityCluster.png": {
"1440p_size": (34, 34),
"1440p_threshold": 0.95,
"1080p_size": (26, 26),
"1080p_threshold": 0.95,
},
"FreeSpace.png": {
"1440p_size": (41, 41),
"1440p_threshold": 0.98,
"1080p_size": (30, 30),
"1080p_threshold": 0.98,
},
"Notable.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.89,
"1080p_size": (23, 23),
"1080p_threshold": 0.85,
},
"NotableAllocated.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.93,
"1080p_size": (23, 23),
"1080p_threshold": 0.90,
},
"Jewel.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.92,
"1080p_size": (23, 23),
"1080p_threshold": 0.92,
},
"JewelSocketed.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.9,
"1080p_size": (23, 23),
"1080p_threshold": 0.9,
},
"LargeJewel.png": {
"1440p_size": (39, 39),
"1440p_threshold": 0.9,
"1080p_size": (30, 30),
"1080p_threshold": 0.88,
},
"LargeJewelSocketed.png": {
"1440p_size": (39, 39),
"1440p_threshold": 0.9,
"1080p_size": (30, 30),
"1080p_threshold": 0.88,
},
"Skill.png": {
"1440p_size": (21, 21),
"1440p_threshold": 0.87,
"1080p_size": (15, 15),
"1080p_threshold": 0.91,
},
"SkillAllocated.png": {
"1440p_size": (21, 21),
"1440p_threshold": 0.93,
"1080p_size": (15, 15),
"1080p_threshold": 0.91,
},
}
# Defines the position of the text box which is cropped out and OCR'd per node
TXT_BOX = {"x": 32, "y": 0, "w": 900, "h": 320}
mod_files = {
"passives": "data/passives.json",
"passivesAlt": "data/passivesAlternatives.json",
"passivesAdd": "data/passivesAdditions.json",
"passivesVaalAdd": "data/passivesVaalAdditions.json",
}
class TreeNavigator:
def __init__(self, resolution, halt_value):
self.resolution = resolution
self.input_handler = InputHandler(self.resolution)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(message)s",
datefmt="[%H:%M:%S %d-%m-%Y]",
)
self.log = logging.getLogger("tree_nav")
self.config = get_config("tree_nav")
self.find_mod_value_re = re.compile("(\(?(?:[0-9]*\.?[0-9]-?)+\)?)")
self.nonalpha_re = re.compile("[^a-zA-Z]")
self.origin_pos = (self.resolution[0] / 2, self.resolution[1] / 2)
self.ingame_pos = [0, 0]
self.px_multiplier = self.resolution[0] / 2560
self.resolution_prefix = str(self.resolution[1]) + "p_"
self.templates_and_masks = self.load_templates()
self.passive_mods, self.passive_names = self.generate_good_strings(mod_files)
self.passive_nodes = list(self.passive_mods.keys()) + list(
self.passive_names.keys()
)
self.halt = halt_value
self.first_run = True
def _run(self):
return not bool(self.halt.value)
def eval_jewel(self, item_location):
self.ingame_pos = [0, 0]
item_name, item_desc = self._setup(item_location, copy=True)
pool = Pool(self.config["ocr_threads"])
jobs = {}
if self.first_run:
# We just initiated the module and not sure where we are
# Thus, we better rectify our position estimate before starting
self._refind_position(SOCKETS[1])
self.first_run = False
for socket_id in sorted(SOCKETS.keys()):
if not self._run():
return None, None, None
found_socket = self._move_screen_to_socket(socket_id)
if not found_socket and socket_id == 1:
self.log.info("We are lost - trying to find known location")
# We just initiated the search and have no clue where we are
# Thus, we better rectify our position estimate before starting
self._refind_position(SOCKETS[1])
socket_nodes = self._analyze_nodes(socket_id)
# Convert stats for the socket from image to lines in separate process
self.log.info("Performing asynchronous OCR")
jobs[socket_id] = pool.map_async(OCR.node_to_strings, socket_nodes)
self.log.info("Analyzed socket %s" % socket_id)
# Return to socket 1 to ease next search
self._move_to_tree_pos_using_spaces(SOCKETS[1])
self._setup(item_location)
self.log.info("Waiting for last OCR to finish")
item_stats = [
{
"socket_id": socket_id,
"socket_nodes": self._filter_ocr_lines(
jobs[socket_id].get(timeout=300)
),
}
for socket_id in jobs
]
pool.close()
pool.join()
return item_name, item_desc, item_stats
def load_templates(self, threshold=128):
templates_and_masks = {}
for template_name in TEMPLATES.keys():
template_path = os.path.join(IMAGE_FOLDER, template_name)
img = cv2.imread(template_path, cv2.IMREAD_UNCHANGED)
size = TEMPLATES[template_name][self.resolution_prefix + "size"]
channels = cv2.split(img)
mask = None
if len(channels) > 3:
mask = np.array(channels[3])
mask[mask <= threshold] = 0
mask[mask > threshold] = 255
mask = cv2.resize(mask, size)
img = cv2.imread(template_path, 0)
img = cv2.resize(img, size)
templates_and_masks[template_name] = {"image": img, "mask": mask}
return templates_and_masks
def _move_screen_to_socket(self, socket_id):
self.log.debug("Moving close to socket %s" % socket_id)
move_offset_tx, move_offset_ty = SOCKET_MOVE_OFFSET[socket_id]
move_offset = self._tree_pos_to_xy(
[move_offset_tx, move_offset_ty], offset=True
)
socket_tx, socket_ty = SOCKETS[socket_id]
socket_xy = self._tree_pos_to_xy([socket_tx, socket_ty])
compensation_offset = self._find_socket(socket_xy)
if compensation_offset is None:
found_socket = False
compensation_offset = [0, 0]
else:
found_socket = True
self.log.debug("Compensated navigation with %s" % compensation_offset)
move_to = [
socket_xy[0] + compensation_offset[0] + move_offset[0],
socket_xy[1] + compensation_offset[1] + move_offset[1],
]
x_offset = move_to[0] - self.resolution[0] / 2
y_offset = move_to[1] - self.resolution[1] / 2
self.input_handler.click(
*move_to, *move_to, button=None, raw=True, speed_factor=1
)
self.input_handler.drag(self.origin_pos[0], self.origin_pos[1], speed_factor=1)
self.input_handler.rnd_sleep(min=200, mean=300, sigma=100)
self.ingame_pos = [socket_tx + move_offset_tx, socket_ty + move_offset_ty]
return found_socket
def _refind_position(self, desired_tree_pos):
# If the current location has been determined to be incorrect
# we can go to the bottom right corner and find a cluster close
# to socket 21, namely the Ambidexterity cluster
# This is a known location, which can then be used to calculate
# our way to a desired position
self.log.debug("Centering screen position")
# Correct our tree position to a known value
self._locate_screen_using_ambidexterity()
# Find our way to the desired position
self._move_to_tree_pos_using_spaces(desired_tree_pos)
def _move_to_tree_pos_using_spaces(self, desired_tree_pos, max_position_error=5):
dx = desired_tree_pos[0] - self.ingame_pos[0]
dy = desired_tree_pos[1] - self.ingame_pos[1]
self.log.debug("Moving to tree pos using spaces. Deltas: ({}, {})".format(dx, dy))
while (abs(dx) + abs(dy)) > max_position_error:
# Choose quadrant to find spaces in based on dx, dy
right, bottom = dx >= 0, dy >= 0
if right and not bottom:
quadrant = 0
elif not right and not bottom:
quadrant = 1
elif not right and bottom:
quadrant = 2
elif right and bottom:
quadrant = 3
# Find empty spaces that we can drag from
spaces = self._find_empty_space(quadrant)
if spaces is None:
raise ValueError("Could not find an empty space, quitting.")
# Choose a random empty space for maximum drag
chosen_space = spaces[np.random.randint(spaces.shape[0])]
# How far to drag the window to end up in the optimal place
screen_move_x, screen_move_y = self._tree_pos_to_xy([dx, dy],
offset=True)
# Calculate where our drag should end up to perform the move
drag_x = chosen_space[0] - screen_move_x
drag_y = chosen_space[1] - screen_move_y
# We should only drag within the screen's resolution
# Additionally, we use 100px margin to not trigger tree scroll
drag_x = np.clip(drag_x, 100, self.resolution[0] - 100)
drag_y = np.clip(drag_y, 100, self.resolution[1] - 100)
# Drag
self.input_handler.click(
*chosen_space, *chosen_space, button=None, raw=True, speed_factor=1
)
self.input_handler.drag(drag_x, drag_y, speed_factor=1)
self.input_handler.rnd_sleep(min=200, mean=300, sigma=100)
# Calculate how far we've actually moved
effective_move_x = chosen_space[0] - drag_x
effective_move_y = chosen_space[1] - drag_y
# Update our internal tree position
self.ingame_pos = self._add_xy_offset_to_tree_pos(
[effective_move_x, effective_move_y]
)
# Figure out how much we have left to move
dx = desired_tree_pos[0] - self.ingame_pos[0]
dy = desired_tree_pos[1] - self.ingame_pos[1]
def _locate_screen_using_ambidexterity(self):
# Essentially, this is _move_to_tree_pos_using_spaces but
# only used to find the tree position by navigating to a known point
self.log.debug("Moving to ambidexterity")
ambidexterity_position = None
assumed_ambidexterity_position = (0.25234375, 0.20555556)
while ambidexterity_position is None:
# Find empty spaces that we can drag from
spaces = self._find_empty_space(3)
if spaces is None:
raise ValueError("Could not find an empty space, quitting.")
# Choose the farthest empty space for maximum drag
chosen_space = spaces[np.argmax(spaces.sum(axis=1))]
# An arbitrary position in the top left region
drag_location = (200, 200)
# Drag
self.input_handler.click(
*chosen_space, *chosen_space, button=None, raw=True, speed_factor=1
)
self.input_handler.drag(drag_location[0], drag_location[1], speed_factor=1)
self.input_handler.rnd_sleep(min=200, mean=300, sigma=100)
# Are we there yet?
# i.e., have we reached Ambidexterity, which in that case is at
# roughly (646, 296) in absolute 1440p screen px position
ambidexterity_position = self._find_icon(
assumed_ambidexterity_position, "AmbidexterityCluster.png"
)
# Ambidexterity is located (-560, 850) from socket 21
# Thus, this plus any (scaled) offset found by the template matcher is
# our tree position
self.ingame_pos = [
SOCKETS[21][0]
- 560
+ ambidexterity_position[0] / (X_SCALE * self.px_multiplier),
SOCKETS[21][1]
+ 850
+ ambidexterity_position[1] / (Y_SCALE * self.px_multiplier),
]
def _find_empty_space(self, quadrant):
# Finds empty spaces that can be used to drag the screen
# Used to recenter the screen
# The quadrant argument is an int in [0, 1, 2, 3], corresponding to
# [top-right, top-left, bottom-left, bottom-right]
quadrant_translation = {0: [0.5, 0], 1: [0, 0], 2: [0, 0.5], 3: [0.5, 0.5]}
fractional_lt = quadrant_translation[quadrant]
lt = [
int(fractional_lt[0] * self.resolution[0]),
int(fractional_lt[1] * self.resolution[1]),
]
rb = [int(lt[0] + self.resolution[0] / 2),
int(lt[1] + self.resolution[1] / 2)]
searched_area = grab_screen(tuple(lt + rb))
searched_area = cv2.cvtColor(searched_area, cv2.COLOR_BGR2GRAY)
locations = np.zeros_like(searched_area)
centered_coordinates = self._match_image(searched_area, "FreeSpace.png")
locations[tuple(centered_coordinates)] = 1
rel_space_pos_yx = np.argwhere(locations == 1)
rel_space_pos = rel_space_pos_yx.T[::-1].T
if len(rel_space_pos) == 0:
self.log.warning("Could not find any free spaces in tree!")
return None
screen_space_pos = rel_space_pos + lt
# remove positions that are close to edges as these trigger scroll
screen_space_pos = screen_space_pos[(screen_space_pos[:, 0] > 100) &
(screen_space_pos[:, 1] > 100) &
(screen_space_pos[:, 0] < self.resolution[0] - 100) &
(screen_space_pos[:, 1] < self.resolution[1] - 100)]
return screen_space_pos
def _find_icon(self, assumed_position, icon_name):
# Finds the ambidexerity cluster icon in the region it sits in
# if we are at the bottom-right corner of the tree
# The exact location is used to refine our knowledge of our position
abs_assumed_position = (
assumed_position[0] * self.resolution[0],
assumed_position[1] * self.resolution[1],
)
margin_side = int(0.05 * self.resolution[0])
lt = [
int(abs_assumed_position[0] - margin_side / 2),
int(abs_assumed_position[1] - margin_side / 2),
]
rb = [
int(abs_assumed_position[0] + margin_side / 2),
int(abs_assumed_position[1] + margin_side / 2),
]
searched_area = grab_screen(tuple(lt + rb))
searched_area = cv2.cvtColor(searched_area, cv2.COLOR_BGR2GRAY)
locations = | np.zeros((margin_side, margin_side)) | numpy.zeros |
# coding: utf-8
import os
import cv2
import numpy as np
import math
import time
def mkdir(PATH):
'''
ディレクトリを作成する
'''
if not os.path.exists(PATH):
os.makedirs(PATH)
return
def new_rgb(height, width):
'''
新しいRGB画像を作成する
args:
height: 画像の高さ
width: 画像の幅
return:
cv_rgb_blank_image: 新しい画像データ
'''
cv_rgb_blank_image = np.zeros((height,width,3), np.uint8)
return cv_rgb_blank_image
def new_rgba(height, width):
'''
新しいRGBA画像を作成する
args:
height: 画像の高さ
width: 画像の幅
return:
cv_rgb_blank_image: 新しい画像データ
'''
cv_rgb_blank_image = np.zeros((height,width,4), np.uint8)
return cv_rgb_blank_image
def to_rgb(cv_bgr):
'''
RGBに変換する
args:
cv_bgr: OpenCV BGR画像データ
return:
cv_rgb: OpenCV RGB画像データ
'''
#BGRflags = [flag for flag in dir(cv2) if flag.startswith('COLOR_BGR') ]
#print(BGRflags)
cv_rgb = cv2.cvtColor(cv_bgr, cv2.COLOR_BGR2RGB)
return cv_rgb
def to_bgr(cv_rgb):
'''
BGRに変換する
args:
cv_rgb: OpenCV RGB画像データ
return:
cv_bgr: OpenCV BGR画像データ
'''
cv_bgr = cv2.cvtColor(cv_rgb, cv2.COLOR_RGB2BGR)
return cv_bgr
def to_yellow(cv_bgr):
'''
黄色だけを抽出する
args:
cv_bgr: OpenCV BGR画像データ
return:
cv_bgr_result: OpenCV BGR画像データ
'''
#print("to_yellow()")
t0 = time.time()
cv_hsv = cv2.cvtColor(cv_bgr, cv2.COLOR_BGR2HSV)
# 取得する色の範囲を指定する
lower1_color = np.array([20,50,50])
upper1_color = np.array([30,255,255])
# 指定した色に基づいたマスク画像の生成
yellow1_mask = cv2.inRange(cv_hsv,lower1_color,upper1_color)
img_mask = yellow1_mask
# フレーム画像とマスク画像の共通の領域を抽出する
cv_bgr_result = cv2.bitwise_and(cv_bgr,cv_bgr,mask=img_mask)
t1 = time.time()
dt_cv = t1-t0
#print("Conversion took {:.5} seconds".format(dt_cv))
return cv_bgr_result
def to_white(cv_bgr):
'''
白色だけを抽出する
args:
cv_bgr: OpenCV BGR画像データ
return:
cv_bgr_result: OpenCV BGR画像データ
'''
#print("to_white()")
t0 = time.time()
cv_hsv = cv2.cvtColor(cv_bgr, cv2.COLOR_BGR2HSV)
# 取得する色の範囲を指定する
lower1_color = np.array([0,0,120])
upper1_color = np.array([45,40,255])
lower2_color = np.array([50,0,200])
upper2_color = np.array([100,20,255])
lower3_color = np.array([45,0,225])
upper3_color = np.array([100,40,255])
# 指定した色に基づいたマスク画像の生成
white1_mask = cv2.inRange(cv_hsv,lower1_color,upper1_color)
white2_mask = cv2.inRange(cv_hsv,lower2_color,upper2_color)
white3_mask = cv2.inRange(cv_hsv,lower3_color,upper3_color)
img_mask = white1_mask
img_mask = cv2.bitwise_or(white1_mask, white2_mask)
img_mask = cv2.bitwise_or(img_mask, white3_mask)
# フレーム画像とマスク画像の共通の領域を抽出する
cv_bgr_result = cv2.bitwise_and(cv_bgr,cv_bgr,mask=img_mask)
t1 = time.time()
dt_cv = t1-t0
#print("Conversion took {:.5} seconds".format(dt_cv))
return cv_bgr_result
def to_bin(cv_bgr):
'''
画像を2値化する
args:
cv_bgr: OpenCV BGR画像データ
return:
cv_bin: OpenCV 2値化したグレースケール画像データ
'''
#print("to_bin()")
t0 = time.time()
# ガウスぼかしで境界線の弱い部分を消す
cv_gauss = cv2.GaussianBlur(cv_bgr,(5,5),0) # サイズは奇数
cv_gray = cv2.cvtColor(cv_gauss, cv2.COLOR_BGR2GRAY)
#plt.title('gray')
#plt.imshow(cv_gray)
#plt.show()
# 色の薄い部分を削除する
ret, mask = cv2.threshold(cv_gray, 20, 255, cv2.THRESH_BINARY)
mask = cv2.bitwise_and(cv_gray,cv_gray,mask=mask)
cv_gray = cv2.bitwise_and(cv_gray,cv_gray,mask=mask)
#plt.title('gray')
#plt.imshow(cv_gray)
#plt.show()
# 入力画像,閾値,maxVal,閾値処理手法
ret,cv_bin = cv2.threshold(cv_gray,0,255,cv2.THRESH_BINARY|cv2.THRESH_OTSU);
t1 = time.time()
dt_cv = t1-t0
#print("Conversion took {:.5} seconds".format(dt_cv))
return cv_bin
def bin_to_rgb(cv_bin):
'''
二値化したグレースケール画像をOpenCV RGB画像データに変換する
args:
cv_bin: OpenCV grayscale画像データ
return:
cv_rgb: OpenCV RGB画像データ
'''
cv_rgb = np.dstack((cv_bin, cv_bin, cv_bin))
return cv_rgb
def to_edge(cv_gray):
'''
エッジを求める
args:
cv_gray: OpenCVグレースケール画像データ
return:
cv_gray_result: エッジのOpenCVグレースケール画像データ
'''
#print("to_edge()")
t0 = time.time()
# Canny
cv_gray_result = cv2.Canny(cv_gray, 50, 200);
t1 = time.time()
dt_cv = t1-t0
#print("Conversion took {:.5} seconds".format(dt_cv))
return cv_gray_result
def to_hough_lines_p(cv_bin):
'''
確率的Hough変換で直線を求める
args:
cv_bin: OpenCV グレースケール画像データ
return:
cv_bin_out: OpenCV グレースケール画像データ
'''
#print("確率的Hough変換")
t0 = time.time()
threshold=10
minLineLength=10
maxLineGap=10
_lines = cv2.HoughLinesP(cv_bin,rho=1,theta=1*np.pi/180,threshold=threshold,lines=np.array([]),minLineLength=minLineLength,maxLineGap=maxLineGap)
cv_bin_result=np.zeros_like(cv_bin)
if _lines is not None:
a,b,c = _lines.shape
#print(len(_lines[0]))
for i in range(a):
x1 = _lines[i][0][0]
y1 = _lines[i][0][1]
x2 = _lines[i][0][2]
y2 = _lines[i][0][3]
cv2.line(cv_bin_result,(x1,y1),(x2,y2),(255,255,255),1)
t1 = time.time()
dt_cv = t1-t0
#print("Conversion took {:.5} seconds".format(dt_cv))
return cv_bin_result
def to_layer(cv_bgr_image,cv_bgr_overlay,image_alpha=1.0,overlay_alpha=0.75):
'''
2つの画像を重ねる
args:
cv_bgr_image: 下になるOpenCV BGR画像データ
cv_bgr_overlay: 上になるOpenCV BGR画像データ
image_alpha: 下になる画像のアルファ値
overlay_alpha: 上になる画像のアルファ値
return:
cv_bgr_result: 重ねたOpenCV BGR画像データ
'''
cv_bgr_result = cv2.addWeighted(cv_bgr_image, image_alpha, cv_bgr_overlay, overlay_alpha, 0)
return cv_bgr_result
def to_roi(cv_bgr, vertices):
"""
Region Of Interest
頂点座標でmaskを作り、入力画像に適用する
args:
cv_bgr: OpenCV BGR画像データ
vertices: 領域の頂点座標
return:
cv_bgr_result: 領域外を黒くしたOpenCV BGR画像データ
"""
mask = np.zeros_like(cv_bgr)
if len(mask.shape)==2:
cv2.fillPoly(mask, vertices, 255)
else:
cv2.fillPoly(mask, vertices, (255,)*mask.shape[2]) # in case, the input image has a channel dimension
return cv2.bitwise_and(cv_bgr, mask)
def to_ipm(cv_bgr,ipm_vertices):
'''
Inverse Perspective Mapping
TopViewに画像を変換する
args:
cv_bgr: OpenCV BGR画像データ
ipm_vertices: 視点変換座標
return:
cv_bgr_ipm: 変換後のOpenCV BGR画像データ
'''
rows, cols = cv_bgr.shape[:2]
offset = cols*.25
src = ipm_vertices
dst = np.float32([[offset, 0], [cols - offset, 0], [cols - offset, rows], [offset, rows]])
# srcとdst座標に基づいて変換行列を作成する
matrix = cv2.getPerspectiveTransform(src, dst)
# 変換行列から画像をTopViewに変換する
cv_bgr_ipm = cv2.warpPerspective(cv_bgr, matrix, (cols, rows))
return cv_bgr_ipm
def calc_roi_vertices(cv_bgr,
top_width_rate=0.6,top_height_position=0.7,
bottom_width_rate=4.0,bottom_height_position=0.95):
'''
Region Of Interest 頂点座標計算
args:
cv_bgr: OpenCV BGR画像データ
top_widh_rate: 領域とする上辺幅の画像幅比
top_height_position: 領域とする上辺位置の画像高さ比 0.0: 画像上、1.0:画像下
bottom_width_rate: 領域とする底辺幅の画像比
bottom_height_position: 領域とする底辺位置の画像高さ比 0.0: 画像上、1.0:画像下
return:
vertices: 領域の頂点座標配列
'''
bottom_width_left_position = (1.0 - bottom_width_rate)/2
bottom_width_right_position = (1.0 - bottom_width_rate)/2 + bottom_width_rate
top_width_left_position = (1.0 - top_width_rate)/2
top_width_right_position = (1.0 - top_width_rate)/2 + top_width_rate
# Region Of Interest
rows, cols = cv_bgr.shape[:2]
bottom_left = [cols*bottom_width_left_position, rows*bottom_height_position]
top_left = [cols*top_width_left_position, rows*top_height_position]
bottom_right = [cols*bottom_width_right_position, rows*bottom_height_position]
top_right = [cols*top_width_right_position, rows*top_height_position]
# the vertices are an array of polygons (i.e array of arrays) and the data type must be integer
vertices = np.array([[top_left, top_right, bottom_right, bottom_left]], dtype=np.int32)
return vertices
def calc_ipm_vertices(cv_bgr,
top_width_rate=0.6,top_height_position=0.7,
bottom_width_rate=4.0,bottom_height_position=0.95):
'''
Inverse Perspective Mapping 頂点座標計算
args:
cv_bgr: OpenCV BGR画像データ
top_widh_rate: 変換する上辺幅の画像幅比
top_height_position: 変換する上辺位置の画像高さ比 0.0: 画像上、1.0:画像下
bottom_width_rate: 変換する底辺幅の画像比
bottom_height_position: 変換する底辺位置の画像高さ比 0.0: 画像上、1.0:画像下
return:
vertices: 変換する頂点座標配列
'''
bottom_width_left_position = (1.0 - bottom_width_rate)/2
bottom_width_right_position = (1.0 - bottom_width_rate)/2 + bottom_width_rate
top_width_left_position = (1.0 - top_width_rate)/2
top_width_right_position = (1.0 - top_width_rate)/2 + top_width_rate
# Inverse Perspective Mapping
rows, cols = cv_bgr.shape[:2]
bottom_left = [cols*bottom_width_left_position, rows*bottom_height_position]
top_left = [cols*top_width_left_position, rows*top_height_position]
bottom_right = [cols*bottom_width_right_position, rows*bottom_height_position]
top_right = [cols*top_width_right_position, rows*top_height_position]
vertices = np.array([top_left, top_right, bottom_right, bottom_left], dtype=np.float32)
return vertices
def draw_vertices(cv_bgr,vertices):
'''
ROIの座標確認のために表示する
ROI範囲にラインが映っているかを確認するためのもの
args:
cv_bgr: 下になるOpenCV BGR画像
vertices: オーバーレイ表示する頂点座標
return:
before_roi: 変換前のオーバーレイ表示画像
after_roi: 変換後のオーバーレイ表示画像
'''
color=(0,255,0)
image_alpha = 1.0
overlay_alpha = 0.5
overlay = new_rgb(cv_bgr.shape[0], cv_bgr.shape[1])
cv2.fillConvexPoly(overlay, vertices.astype('int32'), color)
cv_bgr_result = cv2.addWeighted(cv_bgr,image_alpha,overlay,overlay_alpha,0)
return cv_bgr_result
def histogram_equalization(cv_bgr,grid_size=(8,8)):
'''
ヒストグラム平坦化
args:
cv_bgr: OpenCV BGR画像データ
grid_size: 平坦化計算範囲
return:
cv_bgr_result: OpenCV BGR画像データ
'''
#print("ヒストグラム平坦化")
t0 = time.time()
lab= cv2.cvtColor(cv_bgr, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=grid_size)
cl = clahe.apply(l)
limg = cv2.merge((cl,a,b))
cv_bgr_result = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
t1 = time.time()
dt_cv = t1-t0
#print("Conversion took {:.5} seconds".format(dt_cv))
return cv_bgr_result
def draw_arrow(cv_rgb, x, y, color, size=1, arrow_type=1, lineType=1):
'''
矢印を描画する
args:
cv_rgb: OpenCV RGB画像データ
x: 左上x座標
y: 左上y座標
color: 色
size: 矢印のサイズ
arrowTyep: 矢印の上下左右向き。1:右、2:上、3:左、4:下
lineType: ラインタイプ
return:
cv_rgb_arrow: OpenCV RGB画像データ
'''
# 矢印の基本座標
if arrow_type == 1:
pts_arrow = np.array([[0,10],[20,10],[20,0],[35,15],[20,30],[20,20],[0,20],[0,10]])
elif arrow_type == 2:
pts_arrow = np.array([[0,15],[10,15],[10,35],[20,35],[20,15],[30,15],[15,0],[0,15]])
elif arrow_type == 3:
pts_arrow = np.array([[35,10],[15,10],[15,0],[0,15],[15,30],[15,20],[35,20],[35,10]])
elif arrow_type == 4:
pts_arrow = np.array([[10,0],[10,20],[0,20],[15,35],[30,20],[20,20],[20,0],[10,0]])
pts_arrow = (pts_arrow * size + [[x,y]]).astype(int)
# 座標に囲まれた領域を描画
cv2.fillPoly(cv_rgb, [pts_arrow], color,lineType)
#cv2.polylines(cv_rgb, [pts_arrow], False, (255,255,255),lineType)
return
def reverse_ipm(cv_bgr,ipm_vertices):
'''
IPM逆変換を行う
args:
cv_bgr: OpenCV BGR画像データ
ipm_vertices: 変換時に使ったIPM座標
return:
cv_bgr_ipm_reverse: OpenCV BGR画像データ
'''
rows, cols = cv_bgr.shape[:2]
offset = cols*.25
dst = ipm_vertices
src = np.float32([[offset, 0], [cols - offset, 0], [cols - offset, rows], [offset, rows]])
matrix = cv2.getPerspectiveTransform(src, dst)
cv_bgr_ipm_reverse = cv2.warpPerspective(cv_bgr, matrix, (cols,rows))
return cv_bgr_ipm_reverse
def draw_histogram(cols,rows,histogram,lineType):
'''
ヒストグラムを描画する
args:
cols: 画像縦サイズ
rows: 画像横サイズ
histogram: ヒストグラム
inlineType: 描画ラインタイプ
returns:
cv_rgb_histogram: ヒストグラムOpenCV RGB画像データ
'''
# ヒストグラムの最大値を画像高さ*0.9の値に変換する
max_value = np.max(histogram)
if max_value > 0:
np.putmask(histogram,histogram>=0,np.uint32(rows - rows*0.9*histogram/max_value))
# ヒストグラムをx,y座標に変換する
_x = np.arange(len(histogram))
pts_histogram = np.transpose(np.vstack([_x, histogram]))
# ヒストグラムを描画する
cv_rgb_histogram = new_rgb(rows,len(histogram))
cv2.polylines(cv_rgb_histogram,[np.int32(pts_histogram)],False,(255, 255, 0),lineType=lineType)
# ヒストグラム画像をリサイズする
if cols != len(histogram):
cv_rgb_histogram = cv2.resize(cv_rgb_histogram, (cols,rows), interpolation = cv2.INTER_LINEAR)
return cv_rgb_histogram
def draw_ellipse_and_tilt(cols,rows,plot_y,pts_line,line_polyfit_const):
'''
弧と傾き角を描画する
描画値 ピクセル座標系における計算
args:
cols: 画像横サイズ
rows: 画像縦サイズ
plot_y: Y軸に均等なY座標群
pts_line: ライン上の座標群
line_polyfit_const: ラインの曲線式の定数
returns:
cv_rgb_ellipse: 弧のOpenCV RGB画像データ
cv_rgb_tilt: 傾き角のOpenCV RGB画像データ
'''
########################################
# 弧を描画する
########################################
cv_rgb_ellipse = new_rgb(rows,cols)
quarter_y = (np.max(plot_y) - np.min(plot_y))/4
########################################
# 下半分の座標と角度を求める
########################################
y0 = np.max(plot_y) - 2*quarter_y
y1 = np.max(plot_y)
_x0,_x1, \
x,y,r, \
rotate_deg,angle_deg, \
tilt_deg = calc_curve(y0,y1,line_polyfit_const)
'''
# 弧を描画する
cv2.ellipse(cv_rgb_ellipse,(int(x),int(y)),(int(r),int(r)),rotate_deg,0,angle_deg,(255,0,0),-1)
or
pts_ellipse = np.array(pts_line[:,int(pts_line.shape[1]/2):,:]).astype(int)
pts_ellipse = np.concatenate((pts_ellipse,np.array([[[x,y]]]).astype(int)),axis=1)
cv2.fillPoly(cv_rgb_ellipse, [pts_ellipse], (255,0,0))
cv2.ellipse()は小数点以下の角度が描画範囲に影響を与える時、正しく描画できない
そのため、ポリゴンで描画する
'''
pts_ellipse = np.array(pts_line[:,int(pts_line.shape[1]/2):,:]).astype(int)
# x,yが画面より非常に遠く離れている時、描画に時間がかかるため、直線の式から画面上の点を取得する
# 2点(x,y),(x0,y0)を通る直線と、y=0,y=rows,x=0,x=colsの4直線との交点を求める
if x<0 or x>cols or y<0 or y>rows:
x0 = line_polyfit_const[0]*y0**2 + line_polyfit_const[1]*y0 + line_polyfit_const[2]
x1 = line_polyfit_const[0]*y1**2 + line_polyfit_const[1]*y1 + line_polyfit_const[2]
y0_1 = 0 # y = 0
x0_1 = calc_line(y,x,y0,x0,y0_1)
y0_2 = rows # y = rows
x0_2 = calc_line(y,x,y0,x0,y0_2)
x0_3 = 0 # x = 0
y0_3 = calc_line(x,y,x0,y0,x0_3)
x0_4 = cols # x = cols
y0_4 = calc_line(x,y,x0,y0,x0_4)
pts_x0 = [x0_1,x0_2,x0_3,x0_4]
pts_y0 = [y0_1,y0_2,y0_3,y0_4]
y1_1 = 0 # y = 0
x1_1 = calc_line(y,x,y1,x1,y1_1)
y1_2 = rows # y = rows
x1_2 = calc_line(y,x,y1,x1,y1_2)
x1_3 = 0 # x = 0
y1_3 = calc_line(x,y,x1,y1,x1_3)
x1_4 = cols # x = cols
y1_4 = calc_line(x,y,x1,y1,x1_4)
pts_x1 = [x1_1,x1_2,x1_3,x1_4]
pts_y1 = [y1_1,y1_2,y1_3,y1_4]
if x<0 or x>cols:
for i in range(4):
if (x<0 and pts_x0[i] == 0) or (x>cols and pts_x0[i] == cols):
pts_x0 = pts_x0[i]
pts_y0 = pts_y0[i]
break
for i in range(4):
if (x<0 and pts_x1[i] == 0) or (x>cols and pts_x1[i] == cols):
pts_x1 = pts_x1[i]
pts_y1 = pts_y1[i]
break
elif y<0 or y>rows:
for i in range(4):
if (y<0 and pts_y0[i] == 0) or (y>rows and pts_y0[i] == rows):
pts_x0 = pts_x0[i]
pts_y0 = pts_y0[i]
break
for i in range(4):
if (y<0 and pts_y1[i] == 0) or (y>rows and pts_y1[i] == rows):
pts_x1 = pts_x1[i]
pts_y1 = pts_y1[i]
break
pts_ellipse = np.concatenate((pts_ellipse,np.array([[[pts_x1,pts_y1],[pts_x0,pts_y0]]]).astype(int)),axis=1)
else:
pts_ellipse = np.concatenate((pts_ellipse,np.array([[[x,y]]]).astype(int)),axis=1)
cv2.fillPoly(cv_rgb_ellipse, [pts_ellipse], (255,0,0))
########################################
# 傾きを描画する
########################################
cv_rgb_tilt = new_rgb(rows,cols)
x0 = line_polyfit_const[0]*y0**2 + line_polyfit_const[1]*y0 + line_polyfit_const[2]
x1 = line_polyfit_const[0]*y1**2 + line_polyfit_const[1]*y1 + line_polyfit_const[2]
pts_tilt = np.array([[x0,y0],[x1,y1],[x1,y0]]).astype(int)
cv2.fillPoly(cv_rgb_tilt,[pts_tilt],(255,0,0))
########################################
# 上半分の座標と角度を求める
########################################
quarter_y = (np.max(plot_y) - np.min(plot_y))/4
y0 = np.min(plot_y)
y1 = np.max(plot_y) - 2*quarter_y
_x0,_x1, \
x,y,r, \
rotate_deg,angle_deg, \
tilt_deg = calc_curve(y0,y1,line_polyfit_const)
# 弧を描画する
#cv2.ellipse(cv_rgb_ellipse,(int(x),int(y)),(int(r),int(r)),rotate_deg,0,angle_deg,(0,255,255),-1)
pts_ellipse = np.array(pts_line[:,:int(pts_line.shape[1]/2),:]).astype(int)
# x,yが画面より非常に遠く離れている時、描画に時間がかかるため、直線の式から画面上の点を取得する
# 2点(x,y),(x0,y0)を通る直線と、y=0,y=rows,x=0,x=colsの4直線との交点を求める
if x<0 or x>cols or y<0 or y>rows:
x0 = line_polyfit_const[0]*y0**2 + line_polyfit_const[1]*y0 + line_polyfit_const[2]
x1 = line_polyfit_const[0]*y1**2 + line_polyfit_const[1]*y1 + line_polyfit_const[2]
y0_1 = 0 # y = 0
x0_1 = calc_line(y,x,y0,x0,y0_1)
y0_2 = rows # y = rows
x0_2 = calc_line(y,x,y0,x0,y0_2)
x0_3 = 0 # x = 0
y0_3 = calc_line(x,y,x0,y0,x0_3)
x0_4 = cols # x = cols
y0_4 = calc_line(x,y,x0,y0,x0_4)
pts_x0 = [x0_1,x0_2,x0_3,x0_4]
pts_y0 = [y0_1,y0_2,y0_3,y0_4]
y1_1 = 0 # y = 0
x1_1 = calc_line(y,x,y1,x1,y1_1)
y1_2 = rows # y = rows
x1_2 = calc_line(y,x,y1,x1,y1_2)
x1_3 = 0 # x = 0
y1_3 = calc_line(x,y,x1,y1,x1_3)
x1_4 = cols # x = cols
y1_4 = calc_line(x,y,x1,y1,x1_4)
pts_x1 = [x1_1,x1_2,x1_3,x1_4]
pts_y1 = [y1_1,y1_2,y1_3,y1_4]
if x<0 or x>cols:
for i in range(4):
if (x<0 and pts_x0[i] == 0) or (x>cols and pts_x0[i] == cols):
pts_x0 = pts_x0[i]
pts_y0 = pts_y0[i]
break
for i in range(4):
if (x<0 and pts_x1[i] == 0) or (x>cols and pts_x1[i] == cols):
pts_x1 = pts_x1[i]
pts_y1 = pts_y1[i]
break
elif y<0 or y>rows:
for i in range(4):
if (y<0 and pts_y0[i] == 0) or (y>rows and pts_y0[i] == rows):
pts_x0 = pts_x0[i]
pts_y0 = pts_y0[i]
break
for i in range(4):
if (y<0 and pts_y1[i] == 0) or (y>rows and pts_y1[i] == rows):
pts_x1 = pts_x1[i]
pts_y1 = pts_y1[i]
break
#print("{} {} {} {}".format(pts_x0,pts_y0,pts_x1,pts_y1))
pts_ellipse = np.concatenate((pts_ellipse,np.array([[[pts_x1,pts_y1],[pts_x0,pts_y0]]]).astype(int)),axis=1)
else:
pts_ellipse = np.concatenate((pts_ellipse,np.array([[[x,y]]]).astype(int)),axis=1)
cv2.fillPoly(cv_rgb_ellipse, [pts_ellipse], (0,255,255))
########################################
# 傾きを描画する
########################################
x0 = line_polyfit_const[0]*y0**2 + line_polyfit_const[1]*y0 + line_polyfit_const[2]
x1 = line_polyfit_const[0]*y1**2 + line_polyfit_const[1]*y1 + line_polyfit_const[2]
pts_tilt = np.array([[x0,y0],[x1,y1],[x1,y0]]).astype(int)
cv2.fillPoly(cv_rgb_tilt,[pts_tilt],(0,255,255))
# 弧にラインを描画する
cv2.polylines(cv_rgb_ellipse,[pts_line],False,(0,255,255))
# 傾きにラインを描画する
cv2.polylines(cv_rgb_tilt,[pts_line],False,(0,255,255))
return cv_rgb_ellipse,cv_rgb_tilt
def draw_text(cv_rgb,display_str,color,start_x,start_y,fontFace, fontScale, fontThickness):
'''
画面に文字を描く
args:
cv_rgb: 描画対象のOpenCV RGB画像データ
strings: 文字配列
color: 色配列
start_x: テキスト描画x座標
start_y: テキスト描画y座標
fontFace: fontFace
fontScale: fontScale
fontThickness: fontThickness
return:
end_x: テキスト描画x座標
end_y: テキスト描画y座標
'''
max_text_width = 0
max_text_height = 0
[(text_width, text_height), baseLine] = cv2.getTextSize(text=display_str[0], fontFace=fontFace, fontScale=fontScale, thickness=fontThickness)
x_left = int(baseLine)
y_top = int(baseLine)
for i in range(len(display_str)):
[(text_width, text_height), baseLine] = cv2.getTextSize(text=display_str[i], fontFace=fontFace, fontScale=fontScale, thickness=fontThickness)
if max_text_width < text_width:
max_text_width = text_width
if max_text_height < text_height:
max_text_height = text_height
for i in range(len(display_str)):
cv2.putText(cv_rgb, display_str[i], org=(start_x, start_y + int(max_text_height*1.2 + (max_text_height*1.2 * i))), fontFace=fontFace, fontScale=fontScale, thickness=fontThickness, color=color)
end_x = int(x_left + max_text_width + 2)
end_y = start_y + int(max_text_height*1.2 + (max_text_height*1.2 * i))
return end_x, end_y
def sliding_windows(cv_bin):
'''
sliding windowを行い、1本の線を構成するピクセル座標を求める
args:
cv_bin: 2値化したライン画像のOpenCV grayscale画像データ
returns:
cv_rgb_sliding_windows: sliding window処理のOpenCV RGB画像データ
histogram: 入力画像の下半分の列毎のピクセル総数の配列(1,col)
line_x: ラインを構成するピクセルのx座標群
line_y: ラインを構成するピクセルのy座標群
'''
'''
画像下半分のピクセル数を列毎にカウントしたものをhistogramとする
'''
rows, cols = cv_bin.shape[:2]
# 画面下半分のピクセル数をカウントする
histogram = np.sum(cv_bin[int(rows/2):,:], axis=0)
# sliding windows描画用にライン画像をRGBに変換する
cv_rgb_sliding_windows = bin_to_rgb(cv_bin)
'''
plt.title('HISTOGRAM')
plt.plot(histogram)
plt.show()
plt.title('before windows')
plt.imshow(cv_rgb_sliding_windows)
plt.show()
'''
'''
sliding windowの開始位置となるx座標を求める
histogramは画像幅ピクセル数分の配列数としているため、
histogramの配列index番号が画像のx座標となる
variables:
win_line_x: sliding windowの現在のx座標
'''
# windowのカレント位置をヒストグラム最大となる位置で初期化する
win_line_x = np.argmax(histogram)
# window分割数を決める
nwindows = int(rows/5)
# windowの高さを決める
window_height = np.int(rows/nwindows)
# 画像内のすべての非ゼロピクセルのxとyの位置を特定する
nonzero = cv_bin.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# window幅のマージン
margin = int(cols/10)
# windowをセンタリングするための最小ピクセル数
minpix = margin/2
# ラインピクセルindexを持つための配列
lane_line_idx = []
# windowの色
rectangle_color=(0,160,0)
'''
sliding windows
window処理から、ラインとなるピクセルを取得する
枠が被ってしまう場合、直前の領域の多い方を優先枠範囲に取る
空枠の時、片方が検出しているなら、そのx軸の移動範囲に追従する
'''
for window in range(nwindows):
# windowの座標を求める
win_y_low = rows - (window+1)*window_height
win_y_high = rows - window*window_height
win_line_x_low = win_line_x - margin
win_line_x_high = win_line_x + margin
# windowの枠を描画する
cv2.rectangle(cv_rgb_sliding_windows,(win_line_x_low,win_y_low),(win_line_x_high,win_y_high),rectangle_color, 1)
# ウィンドウ内のxとyの非ゼロピクセルを取得する
win_line_idx = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_line_x_low) & (nonzerox < win_line_x_high)).nonzero()[0]
# 枠内画素数をカウントする
win_num_lines = len(win_line_idx)
# window内ラインピクセルをラインピクセルに追加する
lane_line_idx.append(win_line_idx)
# window開始x座標を更新する
if win_num_lines > minpix:
last_win_line_x = win_line_x
win_line_x = np.int(np.mean(nonzerox[win_line_idx]))
# window毎の配列を結合する
lane_line_idx = np.concatenate(lane_line_idx)
# ラインピクセル座標を取得する
line_x = nonzerox[lane_line_idx]
line_y = nonzeroy[lane_line_idx]
'''
ラインとなるピクセルに色を付けて描画する
'''
high_color=200
low_color=0
cv_rgb_sliding_windows[line_y, line_x] = [high_color, low_color, low_color]
return cv_rgb_sliding_windows, histogram, line_x, line_y
def polynormal_fit(pts_y,pts_x):
'''
曲線を構成する点から二次多項式を求める
(y軸の値は均等に取るのでxを求める式となる)
args:
pts_x: x座標配列
pts_y: y座標配列
returns:
polyfit_const: 二次曲線x=ay**2+by+cの[a,b,c]の値
'''
polyfit_const = np.polyfit(pts_y, pts_x, 2)
return polyfit_const
def calc_line_curve(line_x,line_y,plot_y):
'''
ラインを構成する(ピクセルorメートル)点座標から、曲線と曲線上の座標を求める
args:
line_x: ラインを構成する点のx座標群
line_y: ラインを構成する点のy座標群
plot_y: 曲線上のy座標群
returns:
line_polyfit_const: ライン曲線の定数
pts_line: ライン曲線上の[x,y]座標群
'''
'''
点座標群からラインの二次多項式を求める
ラインの二次多項式を求める
ラインの曲率半径を求める
ラインの画像下から画像中央までの角度を求める
'''
# ラインの二次多項式を求める
line_polyfit_const = polynormal_fit(line_y,line_x)
# y軸に対するラインの二次多項式上のx座標を求める
line_plot_x = line_polyfit_const[0]*plot_y**2 + line_polyfit_const[1]*plot_y + line_polyfit_const[2]
'''
x,y座標を[x,y]配列に変換する
'''
pts_line = np.int32(np.array([np.transpose(np.vstack([line_plot_x, plot_y]))]))
#pts_line = pts_line.reshape((-1,1,2))
return line_polyfit_const, pts_line
def calc_curve(curve_y0,curve_y1,curve_polyfit_const):
'''
曲線を計算する
args:
curve_y0: 曲線上y座標
curve_y1: 曲線下y座標
curve_ployfit_const: 曲線の定数
returns:
curve_x0: 曲線上y座標に対するx座標
curve_x1: 曲線下y座標に対するx座標
x: 円の中心x座標
y: 円の中心y座標
r: 円の半径r (曲率半径r)
rotate_deg: 弧の回転角度
angle_deg: 弧の描画角度
curve_tilt_deg: y軸との傾き角度
'''
# 中間点における曲率半径Rを求める
curve_y = curve_y1-curve_y0
curve_r = calc_curvature_radius(curve_polyfit_const,curve_y)
# x座標を求める
curve_x0 = curve_polyfit_const[0]*curve_y0**2 + curve_polyfit_const[1]*curve_y0 + curve_polyfit_const[2]
curve_x1 = curve_polyfit_const[0]*curve_y1**2 + curve_polyfit_const[1]*curve_y1 + curve_polyfit_const[2]
# 2点と半径と曲線の定数から円の中心座標を求める
py = curve_y1
px = curve_x1
qy = curve_y0
qx = curve_x0
r = curve_r
x,y = calc_circle_center_point(px,py,qx,qy,r,curve_polyfit_const[0])
# 弧の描画角度を求める
rotate_deg, angle_deg = calc_ellipse_angle(py,px,qy,qx,r,x,y,curve_polyfit_const[0])
#print("py={},px={},qy={},qx={},x={},y={},r={}".format(py,px,qy,qx,x,y,r))
#print("rotate_deg={} angle_deg={}".format(rotate_deg,angle_deg))
# 垂直方向との傾き角を求める
# プラスなら左カーブ、マイナスなら右カーブ
curve_tilt_rad = math.atan((px-qx)/(py-qy))
curve_tilt_deg = math.degrees(curve_tilt_rad)
#print("curve_tilt_deg={}".format(curve_tilt_deg))
return curve_x0,curve_x1,x,y,r,rotate_deg,angle_deg,curve_tilt_deg
def calc_curvature_radius(const,y):
'''
二次曲線のy座標における曲率半径Rを求める
args:
const: 二次曲線y=ax**2+bx+cの[a,b,c]の値
y: 二次曲線上の点Pのy座標
returns:
r: 曲率半径R
'''
r = (1 + (2*const[0]*y + const[1])**2)**1.5/np.abs(2*const[0])
return r
def calc_circle_center_point(px,py,qx,qy,r,const):
'''
2点と半径rから円の中心座標を求める
args:
py: 円上の点Pのy座標
px: 円上の点Pのx座標
qy: 円上の点Qのy座標
qx: 円上の点Qのx座標
r: 円の半径r
const: 候補2点の識別子
return:
x: 円の中心点x座標
y: 円の中心点y座標
'''
if const > 0:
x=((py - qy)*(np.sqrt(-(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2)*(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2 - 4*r**2))*(px - qx) - (py + qy)*(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2)) + (px**2 + py**2 - qx**2 - qy**2)*(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2))/(2*(px - qx)*(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2))
y=(np.sqrt(-(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2)*(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2 - 4*r**2))*(-px + qx)/2 + (py + qy)*(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2)/2)/(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2)
else:
x=(-(py - qy)*(np.sqrt(-(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2)*(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2 - 4*r**2))*(px - qx) + (py + qy)*(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2)) + (px**2 + py**2 - qx**2 - qy**2)*(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2))/(2*(px - qx)*(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2))
y=(np.sqrt(-(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2)*(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2 - 4*r**2))*(px - qx)/2 + (py + qy)*(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2)/2)/(px**2 - 2*px*qx + py**2 - 2*py*qy + qx**2 + qy**2)
return x,y
def calc_ellipse_angle(py,px,qy,qx,r,x,y,const):
# ellipseの角度は左回転角度が-、右回転角度が+
if const < 0:
rotate_rad = math.asin((py-y)/r)
if x > px:
rotate_rad -= math.pi
rotate_deg = math.degrees(rotate_rad)
# 二等辺三角形から円の中心角を求める
ml = np.linalg.norm( | np.array([px,py]) | numpy.array |
import numpy as np
import pandas as pd
import tensorflow as tf
from dense import dense_to_one_hot
# 加载数据集,输入和结果拆分
train = pd.read_csv("./data/train.csv")
images = train.iloc[:, 1:].values
labels_flat = train.iloc[:, 0].values.ravel()
# 输入进行处理
images = images.astype(np.float)
images = | np.multiply(images, 1.0 / 255.0) | numpy.multiply |
import math
import cv2 as cv
import numpy as np
class ImageProcessor:
"""Class for image processing.
Attributes:
"""
def __init__(self, fp=None):
"""Initialize image process class.
Args:
fp (str) : File path to the image.
"""
if fp is not None:
self.load_img(fp)
else:
self.img = None
self.processed_img = None
self.width = None
self.height = None
self.channels = None
def load_img(self, fp):
"""Load image from disk
Args:
fp (str): Path to image file
"""
self.img = cv.imread(fp)
cv.cvtColor(self.img, cv.COLOR_BGR2RGB, self.img)
self.processed_img = self.img
self.update_img_property()
def update_img_property(self):
"""Update image properties, including height, width and channels"""
self.height, self.width, self.channels = self.img.shape
def get_img(self):
"""Get image"""
return self.processed_img
def set_img(self, img):
"""Set given image to class image"""
self.img = img
def restore_changes(self):
"""Restore changes of image"""
self.processed_img = self.img
def save_changes(self):
"""Save changes on the processed image"""
self.img = self.processed_img
self.update_img_property()
def save_img(self, fp):
"""Save the image to disk"""
cv.cvtColor(self.img, cv.COLOR_BGR2RGB, self.img)
cv.imwrite(fp, self.img)
def show(self, img=None, name='Image'):
"""Display image. Press 'esc' to exit.
Args:
img (numpy.array): Image array representation.
name (str): Name of the window.
"""
if img is None:
img = self.img
cv.cvtColor(img, cv.COLOR_RGB2BGR, img)
cv.imshow(name, img)
if cv.waitKey(0) == 27:
cv.destroyAllWindows()
def get_split_color(self, mode):
"""Split image color
Args:
mode (str): b - blue; r - red; g - green.
Returns:
Single channel image.
"""
if mode == 'b':
img = self.img[:, :, 2]
elif mode == 'r':
img = self.img[:, :, 0]
elif mode == 'g':
img = self.img[:, :, 1]
else:
raise Exception("Color option not exist!")
self.processed_img = img
return img
def get_pixel_color(self, height, width):
"""Get pixel color
Args:
height (int): Height position.
width (int): Width position.
Returns:
A tuple of rgb color.
"""
return self.img[height][width]
def get_shape(self):
"""Get image shape.
Returns:
Height, width and number of channels.
"""
return self.height, self.width, self.channels
def shift(self, x, y, cut=False):
"""Shift the image vertically and horizontally.
If cut the shifted image, the part shifted out will not
appear and the image size remain the same. If not cut the
image, the blank area will be filled with black. Size of
image will increase.
Args:
x (int): Number of pixels shift on height
y (int): Number of pixels shift on width
cut (bool): Cut the image or not.
Returns:
Numpy array representation of shifted image.
"""
transform_mat = np.array([[1, 0, x],
[0, 1, y],
[0, 0, 1]], dtype=np.int32)
height, width, channels = self.get_shape()
if not cut:
img = self.create_blank_img(height + abs(x), width + abs(y))
for i in range(self.height):
for j in range(self.width):
# Get new position
src = np.array([i, j, 1], dtype=np.int32)
dst = np.dot(transform_mat, src)
if x >= 0 and y >= 0:
img[dst[0]][dst[1]] = self.img[i][j]
elif y >= 0:
img[i][dst[1]] = self.img[i][j]
elif x >= 0:
img[dst[0]][j] = self.img[i][j]
else:
img[i][j] = self.img[i][j]
else:
img = self.create_blank_img()
for i in range(self.height):
for j in range(self.width):
src = np.array([i, j, 1], dtype=np.int32)
dst = np.dot(transform_mat, src)
if 0 <= dst[0] < self.height:
if 0 <= dst[1] < self.width:
img[dst[0]][dst[1]] = self.img[i][j]
self.processed_img = img
return img
def rotate(self, angle, clockwise=True, cut=True):
"""Rotates the image clockwise or anti-clockwise.
Rotate the image. Keep the full image or cutting edges.
Args:
angle (int): The angle of rotations.
clockwise (bool): Clockwise or not.
cut (bool): If rotation cutting the image or not.
Returns:
Rotated image.
"""
if not clockwise:
angle = -angle
rad = angle * math.pi / 180.0
cos_a = math.cos(rad)
sin_a = math.sin(rad)
height, width, channels = self.get_shape()
trans_descartes = np.array([[-1, 0, 0],
[0, 1, 0],
[0.5 * height, -0.5 * width, 1]], dtype=np.float32)
trans_back = np.array([[-1, 0, 0],
[0, 1, 0],
[0.5 * height, 0.5 * width, 1]], dtype=np.float32)
rotate_mat = np.array([[cos_a, sin_a, 0],
[-sin_a, cos_a, 0],
[0, 0, 1]])
trans_mat = np.dot(np.dot(trans_descartes, rotate_mat), trans_back)
if cut:
img = self.create_blank_img()
for i in range(self.height):
for j in range(self.width):
src = np.array([i, j, 1], dtype=np.int32)
dst = np.dot(src, trans_mat)
x = int(dst[0])
y = int(dst[1])
if 0 <= x < height and 0 <= y < width:
img[x][y] = self.img[i][j]
else:
org_x1 = np.array([0.5 * height, -0.5 * width, 1], dtype=np.int32)
org_x2 = np.array([-0.5 * height, -0.5 * width, 1], dtype=np.int32)
new_x1 = np.dot(org_x1, rotate_mat)
new_x2 = np.dot(org_x2, rotate_mat)
new_height = 2 * math.ceil(max(abs(new_x1[0]), abs(new_x2[0])))
new_width = 2 * math.ceil(max(abs(new_x1[1]), abs(new_x2[1])))
img = self.create_blank_img(new_height + 1, new_width + 1)
new_trans_back = np.array([[-1, 0, 0],
[0, 1, 0],
[0.5 * new_height, 0.5 * new_width, 1]], dtype=np.float32)
new_trans_mat = np.dot(np.dot(trans_descartes, rotate_mat), new_trans_back)
for i in range(self.height):
for j in range(self.width):
src = np.array([i, j, 1], dtype=np.int32)
dst = np.dot(src, new_trans_mat)
x = int(dst[0])
y = int(dst[1])
img[x][y] = self.img[i][j]
self.processed_img = img
return img
def resize(self, m, n):
"""Resize the image
Args:
m (float): scaler on heght.
n (float): scaler on width.
Returns:
Resized image.
"""
height, width, channels = self.get_shape()
height = int(height * m)
width = int(width * n)
img = self.create_blank_img(height, width, channels)
for i in range(height):
for j in range(width):
src_i = int(i / m)
src_j = int(j / n)
img[i][j] = self.img[src_i][src_j]
self.processed_img = img
return img
def trans_gray(self, level=256):
"""Transform an RGB image to Gray Scale image.
Gray scale can be quantized to 256, 128, 64, 32,
16, 8, 4, 2 levels.
Args:
level (int): Quantization level. Default 256.
Returns:
Gray scale image.
"""
if self.img is None:
return
n = math.log2(level)
if n < 1 or n > 8:
raise ValueError('Quantization level wrong! Must be exponential value of 2')
# Turn image from RGB to Gray scale image
img = self.create_blank_img(channels=1)
step = 256 / level
if self.channels is 3:
for i in range(self.height):
for j in range(self.width):
pixel = self.img[i][j]
gray = 0.299 * pixel[0] + 0.587 * pixel[1] + 0.114 * pixel[2]
mapped_gray = int(gray / step) / (level - 1) * 255
img[i][j] = round(mapped_gray)
else:
for i in range(self.height):
for j in range(self.width):
pixel = self.img[i][j]
mapped_gray = int(pixel / step) / (level - 1) * 255
img[i][j] = round(mapped_gray)
self.processed_img = img
return img
def create_blank_img(self, height=None, width=None, channels=3):
"""Create a blank pure black image.
Default create a blank black image with same height,
width and channels as the loaded image.
Args:
height (int): Height of new image. Measured by pixels.
width (int): Width of new image. Measured by pixels.
channels (int): Channels. Default 3, RGB.
Returns:
New image.
"""
if not height and not width:
height, width, _ = self.get_shape()
if not height or not width:
raise Exception("Invalid height or width!")
if channels is None:
channels = 1
size = (height, width, channels)
img = np.zeros(size, dtype=np.uint8)
return img
def get_hist(self):
"""Get histogram of given image
Returns:
Image of histogram of the image.
"""
hist = np.zeros(256, dtype=np.uint32)
hist_img = np.zeros((256, 256, 3), dtype=np.uint8)
img = self.trans_gray()
for i in range(self.height):
for j in range(self.width):
# print(img[i][j][0])
g_p = int(img[i][j])
hist[g_p] += 1
# Maximum count in all 256 levels
max_freq = max(hist)
for i in range(256):
x = (i, 255)
# Calculate the relative frequency compared to maximum frequency
p = int(255 - hist[i] * 255 / max_freq)
y = (i, p)
cv.line(hist_img, x, y, (0, 255, 0))
return hist_img
def hist_equalization(self):
"""Histogram equalization of the image.
Returns:
Image after histogram equalization.
"""
hist = np.zeros(256, dtype=np.uint32)
img = self.trans_gray()
for i in range(self.height):
for j in range(self.width):
g_p = int(img[i][j])
hist[g_p] += 1
hist_c = np.zeros(256, dtype=np.uint32)
hist_c[0] = hist[0]
for i in range(1, 256):
hist_c[i] = hist_c[i - 1] + hist[i]
factor = 255.0 / (self.height * self.width)
for i in range(self.height):
for j in range(self.width):
g_p = int(img[i][j])
g_q = int(factor * hist_c[g_p])
img[i][j] = g_q
self.processed_img = img
return img
def smooth(self, h=None):
"""Smooth
Args:
h (numpy.array): Smooth operator
Return:
Image after smoothing.
"""
height = self.height
width = self.width
img = self.trans_gray()
filtered_img = self.create_blank_img(channels=1)
if h is None:
h = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 16.0
for i in range(height):
for j in range(width):
if i in [0, height - 1] or j in [0, width - 1]:
filtered_img[i][j] = img[i][j]
else:
x = img[i - 1:i + 2, j - 1:j + 2]
x = x.squeeze()
m = | np.multiply(x, h) | numpy.multiply |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import linear_sum_assignment
from scipy import signal
from sklearn.neighbors import KernelDensity
import copy
import os
import utm
import rasterio
from CountLine import CountLine
import sys
sys.path.append('/home/golden/general-detection/functions')
import koger_tracking as ktf
def mark_bats_on_image(image_raw, centers, radii=None,
scale_circle_size=5, contours=None,
draw_contours=False):
'''
Draw a bunch of circles on given image
image: 2D or 3D image
centers: shape(n,2) array of circle centers
radii: list of circle radii
'''
if len(image_raw.shape) < 2:
print('image has too few dimensions')
return None
if len(image_raw.shape) == 2:
color = 200
else:
if image_raw.shape[2] == 3:
color = (0, 255, 255)
else:
print('image is the wrong shape')
return None
image = np.copy(image_raw)
if radii is None:
radii = np.ones(len(centers))
for circle_ind, radius in enumerate(radii):
cv2.circle(image,
(centers[circle_ind, 0].astype(int),
centers[circle_ind, 1].astype(int)),
int(radius * scale_circle_size), color , 1)
if draw_contours and contours:
for contour in contours:
if len(contour.shape) > 1:
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box_d = np.int0(box)
cv2.drawContours(image, [box_d], 0, (0,255,100), 1)
return image
def get_tracks_in_frame(frame_ind, track_list):
""" Return list of all tracks present in frame ind. """
tracks_in_frame = []
for track in track_list:
if (track['last_frame'] >= frame_ind
and track['first_frame'] <= frame_ind):
tracks_in_frame.append(track)
return tracks_in_frame
def draw_tracks_on_frame(frame, frame_ind, track_list,
positions=None, figure_scale=60,
track_width=2, position_alpha=.5,
draw_whole_track=False, shift=0):
""" Draw all active tracks and all detected bat locations on given frame.
frame: loaded image - np array
frame_ind: frame number
track_list: list of all tracks in observation
positions: all detected bat positions in observation
figure_scale: how big to display output image
track_width: width of plotted tracks
position_alpha: alpha of position dots
draw_whole_track: Boolean draw track in the future of frame_ind
shift: compensate for lack of padding in network when drawing tracks
on input frames
"""
plt.figure(
figsize = (int(frame.shape[1] / figure_scale),
int(frame.shape[0] / figure_scale)))
plt.imshow(frame)
num_tracks = 0
for track in track_list:
if (track['last_frame'] >= frame_ind
and track['first_frame'] <= frame_ind):
rel_frame = frame_ind - track['first_frame']
if draw_whole_track:
plt.plot(track['track'][:, 0] + shift,
track['track'][:, 1] + shift,
linewidth=track_width)
else:
plt.plot(track['track'][:rel_frame, 0] + shift,
track['track'][:rel_frame, 1] + shift,
linewidth=track_width)
num_tracks += 1
if positions:
plt.scatter(positions[frame_ind][:,0] + shift,
positions[frame_ind][:,1] + shift,
c='red', alpha=position_alpha)
plt.title('Tracks: {}, Bats: {}'.format(num_tracks,
len(positions[frame_ind])))
def subtract_background(images, image_ind, background_sum):
'''
Subtract an averaged background from the image. Average over frame_range in the past and future
images: 3d numpy array (num images, height, width)
image_ind: index in circular image array
background_sum: sum of blue channel pixels across 0 dimension of images
'''
background = np.floor_divide(background_sum, images.shape[0])
# The order of subtraction means dark bats are now light in image_dif
image_dif = background - images[image_ind, :, :, 2]
return image_dif, background
def preprocess_to_binary(image, binary_thresh, background):
'''
Converts 2D image to binary after rescaling pixel intensity
image: 2D np array
low_pix_value: pixel value below which all pixels are set to 0
high_pix_value: pixel value above which all pixels are set to 255
binary_thresh: number from 0 - 255, above set to 255, bellow, set to 0
background: background image (2D probably blue channel)
'''
# # Rescale image pixels within range
# image_rescale = exposure.rescale_intensity(
# image, in_range=(low_pix_value, high_pix_value), out_range=(0, 255))
image_rescale = image
# Binarize image based on threshold
min_difference = 5
threshold = binary_thresh * background
threshold = np.where(threshold < min_difference, min_difference, threshold)
binary_image = np.where(image < threshold, 0, 255)
return binary_image
def get_blob_info(binary_image, background=None, size_threshold=0):
'''
Get contours from binary image. Then find center and average radius of each contour
binary_image: 2D image
background: 2D array used to see locally how dark the background is
size_threshold: radius above which blob is considered real
'''
contours, hierarchy = cv2.findContours(binary_image.astype(np.uint8).copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
centers = []
# Size of bounding rectangles
sizes = []
areas = []
# angle of bounding rectangle
angles = []
rects = []
good_contours = []
contours = [np.squeeze(contour) for contour in contours]
for contour_ind, contour in enumerate(contours):
if len(contour.shape) > 1:
rect = cv2.minAreaRect(contour)
if background is not None:
darkness = background[int(rect[0][1]), int(rect[0][0])]
if darkness < 30:
dark_size_threshold = size_threshold + 22
elif darkness < 50:
dark_size_threshold = size_threshold + 15
elif darkness < 80:
dark_size_threshold = size_threshold + 10
elif darkness < 100:
dark_size_threshold = size_threshold + 5
# elif darkness < 130:
# dark_size_threshold = size_threshold + 3
else:
dark_size_threshold = size_threshold
else:
dark_size_threshold = 0 # just used in if statement
area = rect[1][0] * rect[1][1]
if (area >= dark_size_threshold) or background is None:
centers.append(rect[0])
sizes.append(rect[1])
angles.append(rect[2])
good_contours.append(contour)
areas.append(area)
rects.append(rect)
if centers:
centers = np.stack(centers, 0)
sizes = np.stack(sizes, 0)
else:
centers = np.zeros((0,2))
return (centers, np.array(areas), good_contours, angles, sizes, rects)
def draw_circles_on_image(image, centers, sizes, rects=None):
'''
Draw a bunch of circles on given image
image: 2D or 3D image
centers: shape(n,2) array of circle centers
rects: list of minimum bounding rectangles
'''
if len(image.shape) < 2:
print('image has too few dimensions')
return None
if len(image.shape) == 2:
color = 200
rect_color = 100
else:
if image.shape[2] == 3:
color = (0, 255, 255)
rect_color = (0,255,100)
else:
print('image is the wrong shape')
return None
for circle_ind, size in enumerate(sizes):
cv2.circle(image, (centers[circle_ind, 0].astype(int), centers[circle_ind, 1].astype(int)),
int(np.max(size)), color , 1)
if rects:
for rect in rects:
box = cv2.boxPoints(rect)
box_d = np.int0(box)
cv2.drawContours(image, [box_d], 0, rect_color, 1)
return image
def update_circular_image_array(images, image_ind, image_files, frame_num, background_sum):
""" Add new image if nessesary and increment image_ind.
Also update sum of pixels across array for background subtraction.
If frame_num is less than half size of array than don't need to
replace image since intitally all images in average are in the future.
images: image array size (num images averaging, height, width, channel)
image_ind: index of focal frame in images
image_files: list of all image files in observation
frame_num: current frame number in observation
background_sum: sum of current frames blue dimension across frames
"""
if (frame_num > int(images.shape[0] / 2)
and frame_num < (len(image_files) - int(images.shape[0] / 2))):
replace_ind = image_ind + int(images.shape[0] / 2)
replace_ind %= images.shape[0]
# Subtract the pixel values that are about to be removed from background
background_sum -= images[replace_ind, :, :, 2]
image_file = image_files[frame_num + int(images.shape[0] / 2)]
image = cv2.imread(image_file)
images[replace_ind] = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Add new pixel values to the background sum
background_sum += images[replace_ind, :, :, 2]
image_ind += 1
# image_ind should always be in between 0 and images.shape - 1
image_ind = image_ind % images.shape[0]
return images, image_ind, background_sum
def initialize_image_array(image_files, focal_frame_ind, num_images):
""" Create array of num_images x h x w x 3.
Args:
image_files (list): sorted paths to all image files in observation
focal_frame_ind (int): number of the frame being process
num_images (int): number of frames used for background subtraction
return array, index in array where focal frame is located
"""
images = []
first_frame_ind = focal_frame_ind - (num_images // 2)
if num_images % 2 == 0:
# even
last_frame_ind = focal_frame_ind + (num_images // 2) - 1
else:
# odd
last_frame_ind = focal_frame_ind + (num_images // 2)
for file in image_files[first_frame_ind:last_frame_ind+1]:
image = cv2.imread(file)
images.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
images = np.stack(images)
focal_ind = num_images // 2
return(images, focal_ind)
def process_frame(images, focal_frame_ind, bat_thresh, background_sum, bat_area_thresh, debug=False):
"""Process bat frame.
images: n x h x w x c array where the n images are averaged together for background subtraction
focal_frame_ind: which index in images array should be processed
bat_thresh: float value to use for thresholding bat from background
background_sum: sum of all blue channel pixels across the n dimension of images
debug: if true return binary image
"""
size_threshold = bat_area_thresh
max_bats = 600
mean = | np.mean(images[focal_frame_ind, :, :, 2]) | numpy.mean |
import unittest
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from dateutil.tz import tzlocal
from ipywidgets import widgets
from nwbwidgets.misc import (
show_psth_raster,
PSTHWidget,
show_decomposition_traces,
show_decomposition_series,
RasterWidget,
show_session_raster,
show_annotations,
RasterGridWidget,
raster_grid,
)
from pynwb import NWBFile
from pynwb.misc import DecompositionSeries, AnnotationSeries
def test_show_psth():
data = np.random.random([6, 50])
assert isinstance(show_psth_raster(data=data, start=0, end=1), plt.Subplot)
def test_show_annotations():
timestamps = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
annotations = AnnotationSeries(name="test_annotations", timestamps=timestamps)
show_annotations(annotations)
class ShowPSTHTestCase(unittest.TestCase):
def setUp(self):
"""
Trials must exist.
"""
start_time = datetime(2017, 4, 3, 11, tzinfo=tzlocal())
create_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal())
self.nwbfile = NWBFile(
session_description="NWBFile for PSTH",
identifier="NWB123",
session_start_time=start_time,
file_create_date=create_date,
)
self.nwbfile.add_unit_column("location", "the anatomical location of this unit")
self.nwbfile.add_unit_column(
"quality", "the quality for the inference of this unit"
)
self.nwbfile.add_unit(
spike_times=[2.2, 3.0, 4.5],
obs_intervals=[[1, 10]],
location="CA1",
quality=0.95,
)
self.nwbfile.add_unit(
spike_times=[2.2, 3.0, 25.0, 26.0],
obs_intervals=[[1, 10], [20, 30]],
location="CA3",
quality=0.85,
)
self.nwbfile.add_unit(
spike_times=[1.2, 2.3, 3.3, 4.5],
obs_intervals=[[1, 10], [20, 30]],
location="CA1",
quality=0.90,
)
self.nwbfile.add_trial_column(
name="stim", description="the visual stimuli during the trial"
)
self.nwbfile.add_trial(start_time=0.0, stop_time=2.0, stim="person")
self.nwbfile.add_trial(start_time=3.0, stop_time=5.0, stim="ocean")
self.nwbfile.add_trial(start_time=6.0, stop_time=8.0, stim="desert")
def test_psth_widget(self):
assert isinstance(PSTHWidget(self.nwbfile.units), widgets.Widget)
def test_raster_widget(self):
assert isinstance(RasterWidget(self.nwbfile.units), widgets.Widget)
def test_show_session_raster(self):
assert isinstance(show_session_raster(self.nwbfile.units), plt.Axes)
def test_raster_grid_widget(self):
assert isinstance(RasterGridWidget(self.nwbfile.units), widgets.Widget)
def test_raster_grid(self):
trials = self.nwbfile.units.get_ancestor("NWBFile").trials
assert isinstance(
raster_grid(
self.nwbfile.units,
time_intervals=trials,
index=0,
start=-0.5,
end=20.0,
),
plt.Figure,
)
class ShowDecompositionTestCase(unittest.TestCase):
def setUp(self):
data = | np.random.rand(160, 2, 3) | numpy.random.rand |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import hashlib
from warnings import warn
import six
import numpy as np
import pandas as pd
import matplotlib.collections as mcoll
import matplotlib.text as mtext
import matplotlib.transforms as mtransforms
from matplotlib.offsetbox import (TextArea, HPacker, VPacker)
from matplotlib.offsetbox import AuxTransformBox
from matplotlib.colors import ListedColormap
from mizani.bounds import rescale
from ..aes import rename_aesthetics
from ..scales.scale import scale_continuous
from ..utils import ColoredDrawingArea
from .guide import guide
class guide_colorbar(guide):
"""
Guide colorbar
Parameters
----------
barwidth : float
Width (in pixels) of the colorbar.
barheight : float
Height (in pixels) of the colorbar.
nbin : int
Number of bins for drawing a colorbar. A larger value yields
a smoother colorbar. Default is 20.
raster : bool
Whether to render the colorbar as a raster object.
ticks : bool
Whether tick marks on colorbar should be visible.
draw_ulim : bool
Whether to show the upper limit tick marks.
draw_llim : bool
Whether to show the lower limit tick marks.
direction : str in ``['horizontal', 'vertical']``
Direction of the guide.
kwargs : dict
Parameters passed on to :class:`.guide`
"""
# bar
barwidth = 23
barheight = 23*5
nbin = 20 # maximum number of bins
raster = True
# ticks
ticks = True
draw_ulim = True
draw_llim = True
# parameter
available_aes = {'colour', 'color', 'fill'}
def train(self, scale):
# Do nothing if scales are inappropriate
if set(scale.aesthetics) & {'color', 'colour', 'fill'} == 0:
warn("colorbar guide needs color or fill scales.")
return None
if not issubclass(scale.__class__, scale_continuous):
warn("colorbar guide needs continuous scales")
return None
# value = breaks (numeric) is used for determining the
# position of ticks
limits = scale.limits
breaks = scale.get_breaks(strict=True)
breaks = | np.asarray(breaks) | numpy.asarray |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pandas as pd
import torch
import kvt
def compute_metrics(predicts, labels):
N, H, W = predicts.shape
predicts = predicts.reshape((-1, H*W))
labels = labels.reshape((-1, H*W))
sum_p = np.sum(predicts, axis=1)
sum_l = np.sum(labels, axis=1)
intersection = np.sum(np.logical_and(predicts, labels), axis=1)
numer = 2*intersection
denom = sum_p + sum_l
dice = numer / (denom + 1e-6)
empty_indices = | np.where(sum_l <= 0) | numpy.where |
"""
Catalog manipulation and lookup utilities
NOT IMPLEMENTED
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import os
import sys
from astropy.io import fits
if sys.version_info[0] == 3:
xrange = range
# download from https://github.com/cristobal-sifon/readfile
import readfile
def crossmatch(cat1, cat2, cols1=0, cols2=0, tolerance=0, relative=0):
"""
Cross-match two catalogs according to some user-specified criteria
Parameters
----------
cat1,cat2 : np.ndarray or dict
Whatever values should be cross-matched in each catalog.
They could be object names, coordinates, etc, and there may
be more than one entry per catalog.
cols1,cols2 : list of int or list of str
The column number(s) in each catalog to use. Both must have
the same number of entries. Ignored if cat1,cat2 are single
columns. If cat1 or cat2 is/are dict, then the
corresponding col1/col2 must be a list of strings with
entry names in the catalog.
tolerance : float
relative or absolute tolerance when comparing to arrays of
numbers. If set to zero, matching will be exact.
relative : int (0,1,2)
Whether the tolerance is an absolute value (0), or with
respect to the values in cat1 or cat2.
Returns
------
match1,match2 : (array of) boolean arrays
Mask arrays containing True for objects that pass the
matching criteria and False for those that don't
"""
cats = [cat1, cat2]
cols = [cols1, cols2]
# need to check the depth of cat1,cat2 and always make 2d
for i in xrange(2):
if len( | np.array(cats[i]) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import math
import json
import cv2
import os
from collections import defaultdict
import pycocotools.coco as coco
import torch
import torch.utils.data as data
from centertrack.utils.image import flip, color_aug
from centertrack.utils.image import get_affine_transform, affine_transform
from centertrack.utils.image import gaussian_radius, draw_umich_gaussian
import copy
class GenericDataset(data.Dataset):
is_fusion_dataset = False
default_resolution = None
num_categories = None
class_name = None
# cat_ids: map from 'category_id' in the annotation files to 1..num_categories
# Not using 0 because 0 is used for don't care region and ignore loss.
cat_ids = None
max_objs = None
rest_focal_length = 1200
num_joints = 17
flip_idx = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16]]
edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[4, 6], [3, 5], [5, 6],
[5, 7], [7, 9], [6, 8], [8, 10],
[6, 12], [5, 11], [11, 12],
[12, 14], [14, 16], [11, 13], [13, 15]]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
_eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
_eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
ignore_val = 1
nuscenes_att_range = {0: [0, 1], 1: [0, 1], 2: [2, 3, 4], 3: [2, 3, 4],
4: [2, 3, 4], 5: [5, 6, 7], 6: [5, 6, 7], 7: [5, 6, 7]}
def __init__(self, opt=None, split=None, ann_path=None, img_dir=None):
super(GenericDataset, self).__init__()
if opt is not None and split is not None:
self.split = split
self.opt = opt
self._data_rng = np.random.RandomState(123)
if ann_path is not None and img_dir is not None:
print('==> initializing {} data from {}, \n images from {} ...'.format(
split, ann_path, img_dir))
self.coco = coco.COCO(ann_path)
self.images = self.coco.getImgIds()
if opt.tracking:
if not ('videos' in self.coco.dataset):
self.fake_video_data()
print('Creating video index!')
self.video_to_images = defaultdict(list)
for image in self.coco.dataset['images']:
self.video_to_images[image['video_id']].append(image)
self.img_dir = img_dir
def __getitem__(self, index):
opt = self.opt
img, anns, img_info, img_path = self._load_data(index)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
s = max(img.shape[0], img.shape[1]) * 1.0 if not self.opt.not_max_crop \
else np.array([img.shape[1], img.shape[0]], np.float32)
aug_s, rot, flipped = 1, 0, 0
if self.split == 'train':
c, aug_s, rot = self._get_aug_param(c, s, width, height)
s = s * aug_s
if np.random.random() < opt.flip:
flipped = 1
img = img[:, ::-1, :]
anns = self._flip_anns(anns, width)
trans_input = get_affine_transform(
c, s, rot, [opt.input_w, opt.input_h])
trans_output = get_affine_transform(
c, s, rot, [opt.output_w, opt.output_h])
inp = self._get_input(img, trans_input)
ret = {'image': inp}
gt_det = {'bboxes': [], 'scores': [], 'clses': [], 'cts': []}
pre_cts, track_ids = None, None
if opt.tracking:
pre_image, pre_anns, frame_dist = self._load_pre_data(
img_info['video_id'], img_info['frame_id'],
img_info['sensor_id'] if 'sensor_id' in img_info else 1)
if flipped:
pre_image = pre_image[:, ::-1, :].copy()
pre_anns = self._flip_anns(pre_anns, width)
if opt.same_aug_pre and frame_dist != 0:
trans_input_pre = trans_input
trans_output_pre = trans_output
else:
c_pre, aug_s_pre, _ = self._get_aug_param(
c, s, width, height, disturb=True)
s_pre = s * aug_s_pre
trans_input_pre = get_affine_transform(
c_pre, s_pre, rot, [opt.input_w, opt.input_h])
trans_output_pre = get_affine_transform(
c_pre, s_pre, rot, [opt.output_w, opt.output_h])
pre_img = self._get_input(pre_image, trans_input_pre)
pre_hm, pre_cts, track_ids = self._get_pre_dets(
pre_anns, trans_input_pre, trans_output_pre)
ret['pre_img'] = pre_img
if opt.pre_hm:
ret['pre_hm'] = pre_hm
### init samples
self._init_ret(ret, gt_det)
calib = self._get_calib(img_info, width, height)
num_objs = min(len(anns), self.max_objs)
for k in range(num_objs):
ann = anns[k]
cls_id = int(self.cat_ids[ann['category_id']])
if cls_id > self.opt.num_classes or cls_id <= -999:
continue
bbox, bbox_amodal = self._get_bbox_output(
ann['bbox'], trans_output, height, width)
if cls_id <= 0 or ('iscrowd' in ann and ann['iscrowd'] > 0):
self._mask_ignore_or_crowd(ret, cls_id, bbox)
continue
self._add_instance(
ret, gt_det, k, cls_id, bbox, bbox_amodal, ann, trans_output, aug_s,
calib, pre_cts, track_ids)
if self.opt.debug > 0:
gt_det = self._format_gt_det(gt_det)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_info['id'],
'img_path': img_path, 'calib': calib,
'flipped': flipped}
ret['meta'] = meta
return ret
def get_default_calib(self, width, height):
calib = np.array([[self.rest_focal_length, 0, width / 2, 0],
[0, self.rest_focal_length, height / 2, 0],
[0, 0, 1, 0]])
return calib
def _load_image_anns(self, img_id, coco, img_dir):
img_info = coco.loadImgs(ids=[img_id])[0]
file_name = img_info['file_name']
img_path = os.path.join(img_dir, file_name)
ann_ids = coco.getAnnIds(imgIds=[img_id])
anns = copy.deepcopy(coco.loadAnns(ids=ann_ids))
img = cv2.imread(img_path)
return img, anns, img_info, img_path
def _load_data(self, index):
coco = self.coco
img_dir = self.img_dir
img_id = self.images[index]
img, anns, img_info, img_path = self._load_image_anns(img_id, coco, img_dir)
return img, anns, img_info, img_path
def _load_pre_data(self, video_id, frame_id, sensor_id=1):
img_infos = self.video_to_images[video_id]
# If training, random sample nearby frames as the "previoud" frame
# If testing, get the exact prevous frame
if 'train' in self.split:
img_ids = [(img_info['id'], img_info['frame_id']) \
for img_info in img_infos \
if abs(img_info['frame_id'] - frame_id) < self.opt.max_frame_dist and \
(not ('sensor_id' in img_info) or img_info['sensor_id'] == sensor_id)]
else:
img_ids = [(img_info['id'], img_info['frame_id']) \
for img_info in img_infos \
if (img_info['frame_id'] - frame_id) == -1 and \
(not ('sensor_id' in img_info) or img_info['sensor_id'] == sensor_id)]
if len(img_ids) == 0:
img_ids = [(img_info['id'], img_info['frame_id']) \
for img_info in img_infos \
if (img_info['frame_id'] - frame_id) == 0 and \
(not ('sensor_id' in img_info) or img_info['sensor_id'] == sensor_id)]
rand_id = np.random.choice(len(img_ids))
img_id, pre_frame_id = img_ids[rand_id]
frame_dist = abs(frame_id - pre_frame_id)
img, anns, _, _ = self._load_image_anns(img_id, self.coco, self.img_dir)
return img, anns, frame_dist
def _get_pre_dets(self, anns, trans_input, trans_output):
hm_h, hm_w = self.opt.input_h, self.opt.input_w
down_ratio = self.opt.down_ratio
trans = trans_input
reutrn_hm = self.opt.pre_hm
pre_hm = np.zeros((1, hm_h, hm_w), dtype=np.float32) if reutrn_hm else None
pre_cts, track_ids = [], []
for ann in anns:
cls_id = int(self.cat_ids[ann['category_id']])
if cls_id > self.opt.num_classes or cls_id <= -99 or \
('iscrowd' in ann and ann['iscrowd'] > 0):
continue
bbox = self._coco_box_to_bbox(ann['bbox'])
bbox[:2] = affine_transform(bbox[:2], trans)
bbox[2:] = affine_transform(bbox[2:], trans)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, hm_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, hm_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
max_rad = 1
if (h > 0 and w > 0):
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
max_rad = max(max_rad, radius)
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct0 = ct.copy()
conf = 1
ct[0] = ct[0] + np.random.randn() * self.opt.hm_disturb * w
ct[1] = ct[1] + np.random.randn() * self.opt.hm_disturb * h
conf = 1 if np.random.random() > self.opt.lost_disturb else 0
ct_int = ct.astype(np.int32)
if conf == 0:
pre_cts.append(ct / down_ratio)
else:
pre_cts.append(ct0 / down_ratio)
track_ids.append(ann['track_id'] if 'track_id' in ann else -1)
if reutrn_hm:
draw_umich_gaussian(pre_hm[0], ct_int, radius, k=conf)
if np.random.random() < self.opt.fp_disturb and reutrn_hm:
ct2 = ct0.copy()
# Hard code heatmap disturb ratio, haven't tried other numbers.
ct2[0] = ct2[0] + np.random.randn() * 0.05 * w
ct2[1] = ct2[1] + np.random.randn() * 0.05 * h
ct2_int = ct2.astype(np.int32)
draw_umich_gaussian(pre_hm[0], ct2_int, radius, k=conf)
return pre_hm, pre_cts, track_ids
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def _get_aug_param(self, c, s, width, height, disturb=False):
if (not self.opt.not_rand_crop) and not disturb:
aug_s = np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, width)
h_border = self._get_border(128, height)
c[0] = np.random.randint(low=w_border, high=width - w_border)
c[1] = np.random.randint(low=h_border, high=height - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
if type(s) == float:
s = [s, s]
c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
aug_s = np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.aug_rot:
rf = self.opt.rotate
rot = np.clip(np.random.randn()*rf, -rf*2, rf*2)
else:
rot = 0
return c, aug_s, rot
def _flip_anns(self, anns, width):
for k in range(len(anns)):
bbox = anns[k]['bbox']
anns[k]['bbox'] = [
width - bbox[0] - 1 - bbox[2], bbox[1], bbox[2], bbox[3]]
if 'hps' in self.opt.heads and 'keypoints' in anns[k]:
keypoints = np.array(anns[k]['keypoints'], dtype=np.float32).reshape(
self.num_joints, 3)
keypoints[:, 0] = width - keypoints[:, 0] - 1
for e in self.flip_idx:
keypoints[e[0]], keypoints[e[1]] = \
keypoints[e[1]].copy(), keypoints[e[0]].copy()
anns[k]['keypoints'] = keypoints.reshape(-1).tolist()
if 'rot' in self.opt.heads and 'alpha' in anns[k]:
anns[k]['alpha'] = np.pi - anns[k]['alpha'] if anns[k]['alpha'] > 0 \
else - np.pi - anns[k]['alpha']
if 'amodel_offset' in self.opt.heads and 'amodel_center' in anns[k]:
anns[k]['amodel_center'][0] = width - anns[k]['amodel_center'][0] - 1
if self.opt.velocity and 'velocity' in anns[k]:
anns[k]['velocity'] = [-10000, -10000, -10000]
return anns
def _get_input(self, img, trans_input):
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_w, self.opt.input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
return inp
def _init_ret(self, ret, gt_det):
max_objs = self.max_objs * self.opt.dense_reg
ret['hm'] = np.zeros(
(self.opt.num_classes, self.opt.output_h, self.opt.output_w),
np.float32)
ret['ind'] = np.zeros((max_objs), dtype=np.int64)
ret['cat'] = np.zeros((max_objs), dtype=np.int64)
ret['mask'] = np.zeros((max_objs), dtype=np.float32)
regression_head_dims = {
'reg': 2, 'wh': 2, 'tracking': 2, 'ltrb': 4, 'ltrb_amodal': 4,
'nuscenes_att': 8, 'velocity': 3, 'hps': self.num_joints * 2,
'dep': 1, 'dim': 3, 'amodel_offset': 2}
for head in regression_head_dims:
if head in self.opt.heads:
ret[head] = np.zeros(
(max_objs, regression_head_dims[head]), dtype=np.float32)
ret[head + '_mask'] = np.zeros(
(max_objs, regression_head_dims[head]), dtype=np.float32)
gt_det[head] = []
if 'hm_hp' in self.opt.heads:
num_joints = self.num_joints
ret['hm_hp'] = np.zeros(
(num_joints, self.opt.output_h, self.opt.output_w), dtype=np.float32)
ret['hm_hp_mask'] = np.zeros(
(max_objs * num_joints), dtype=np.float32)
ret['hp_offset'] = np.zeros(
(max_objs * num_joints, 2), dtype=np.float32)
ret['hp_ind'] = np.zeros((max_objs * num_joints), dtype=np.int64)
ret['hp_offset_mask'] = np.zeros(
(max_objs * num_joints, 2), dtype=np.float32)
ret['joint'] = np.zeros((max_objs * num_joints), dtype=np.int64)
if 'rot' in self.opt.heads:
ret['rotbin'] = np.zeros((max_objs, 2), dtype=np.int64)
ret['rotres'] = np.zeros((max_objs, 2), dtype=np.float32)
ret['rot_mask'] = np.zeros((max_objs), dtype=np.float32)
gt_det.update({'rot': []})
def _get_calib(self, img_info, width, height):
if 'calib' in img_info:
calib = np.array(img_info['calib'], dtype=np.float32)
else:
calib = np.array([[self.rest_focal_length, 0, width / 2, 0],
[0, self.rest_focal_length, height / 2, 0],
[0, 0, 1, 0]])
return calib
def _ignore_region(self, region, ignore_val=1):
np.maximum(region, ignore_val, out=region)
def _mask_ignore_or_crowd(self, ret, cls_id, bbox):
# mask out crowd region, only rectangular mask is supported
if cls_id == 0: # ignore all classes
self._ignore_region(ret['hm'][:, int(bbox[1]): int(bbox[3]) + 1,
int(bbox[0]): int(bbox[2]) + 1])
else:
# mask out one specific class
self._ignore_region(ret['hm'][abs(cls_id) - 1,
int(bbox[1]): int(bbox[3]) + 1,
int(bbox[0]): int(bbox[2]) + 1])
if ('hm_hp' in ret) and cls_id <= 1:
self._ignore_region(ret['hm_hp'][:, int(bbox[1]): int(bbox[3]) + 1,
int(bbox[0]): int(bbox[2]) + 1])
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_bbox_output(self, bbox, trans_output, height, width):
bbox = self._coco_box_to_bbox(bbox).copy()
rect = np.array([[bbox[0], bbox[1]], [bbox[0], bbox[3]],
[bbox[2], bbox[3]], [bbox[2], bbox[1]]], dtype=np.float32)
for t in range(4):
rect[t] = affine_transform(rect[t], trans_output)
bbox[:2] = rect[:, 0].min(), rect[:, 1].min()
bbox[2:] = rect[:, 0].max(), rect[:, 1].max()
bbox_amodal = copy.deepcopy(bbox)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.opt.output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.opt.output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
return bbox, bbox_amodal
def _add_instance(
self, ret, gt_det, k, cls_id, bbox, bbox_amodal, ann, trans_output,
aug_s, calib, pre_cts=None, track_ids=None):
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h <= 0 or w <= 0:
return
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
ret['cat'][k] = cls_id - 1
ret['mask'][k] = 1
if 'wh' in ret:
ret['wh'][k] = 1. * w, 1. * h
ret['wh_mask'][k] = 1
ret['ind'][k] = ct_int[1] * self.opt.output_w + ct_int[0]
ret['reg'][k] = ct - ct_int
ret['reg_mask'][k] = 1
draw_umich_gaussian(ret['hm'][cls_id - 1], ct_int, radius)
gt_det['bboxes'].append(
np.array([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2], dtype=np.float32))
gt_det['scores'].append(1)
gt_det['clses'].append(cls_id - 1)
gt_det['cts'].append(ct)
if 'tracking' in self.opt.heads:
if ann['track_id'] in track_ids:
pre_ct = pre_cts[track_ids.index(ann['track_id'])]
ret['tracking_mask'][k] = 1
ret['tracking'][k] = pre_ct - ct_int
gt_det['tracking'].append(ret['tracking'][k])
else:
gt_det['tracking'].append(np.zeros(2, np.float32))
if 'ltrb' in self.opt.heads:
ret['ltrb'][k] = bbox[0] - ct_int[0], bbox[1] - ct_int[1], \
bbox[2] - ct_int[0], bbox[3] - ct_int[1]
ret['ltrb_mask'][k] = 1
if 'ltrb_amodal' in self.opt.heads:
ret['ltrb_amodal'][k] = \
bbox_amodal[0] - ct_int[0], bbox_amodal[1] - ct_int[1], \
bbox_amodal[2] - ct_int[0], bbox_amodal[3] - ct_int[1]
ret['ltrb_amodal_mask'][k] = 1
gt_det['ltrb_amodal'].append(bbox_amodal)
if 'nuscenes_att' in self.opt.heads:
if ('attributes' in ann) and ann['attributes'] > 0:
att = int(ann['attributes'] - 1)
ret['nuscenes_att'][k][att] = 1
ret['nuscenes_att_mask'][k][self.nuscenes_att_range[att]] = 1
gt_det['nuscenes_att'].append(ret['nuscenes_att'][k])
if 'velocity' in self.opt.heads:
if ('velocity' in ann) and min(ann['velocity']) > -1000:
ret['velocity'][k] = np.array(ann['velocity'], np.float32)[:3]
ret['velocity_mask'][k] = 1
gt_det['velocity'].append(ret['velocity'][k])
if 'hps' in self.opt.heads:
self._add_hps(ret, k, ann, gt_det, trans_output, ct_int, bbox, h, w)
if 'rot' in self.opt.heads:
self._add_rot(ret, ann, k, gt_det)
if 'dep' in self.opt.heads:
if 'depth' in ann:
ret['dep_mask'][k] = 1
ret['dep'][k] = ann['depth'] * aug_s
gt_det['dep'].append(ret['dep'][k])
else:
gt_det['dep'].append(2)
if 'dim' in self.opt.heads:
if 'dim' in ann:
ret['dim_mask'][k] = 1
ret['dim'][k] = ann['dim']
gt_det['dim'].append(ret['dim'][k])
else:
gt_det['dim'].append([1,1,1])
if 'amodel_offset' in self.opt.heads:
if 'amodel_center' in ann:
amodel_center = affine_transform(ann['amodel_center'], trans_output)
ret['amodel_offset_mask'][k] = 1
ret['amodel_offset'][k] = amodel_center - ct_int
gt_det['amodel_offset'].append(ret['amodel_offset'][k])
else:
gt_det['amodel_offset'].append([0, 0])
def _add_hps(self, ret, k, ann, gt_det, trans_output, ct_int, bbox, h, w):
num_joints = self.num_joints
pts = np.array(ann['keypoints'], np.float32).reshape(num_joints, 3) \
if 'keypoints' in ann else np.zeros((self.num_joints, 3), np.float32)
if self.opt.simple_radius > 0:
hp_radius = int(simple_radius(h, w, min_overlap=self.opt.simple_radius))
else:
hp_radius = gaussian_radius((math.ceil(h), math.ceil(w)))
hp_radius = max(0, int(hp_radius))
for j in range(num_joints):
pts[j, :2] = affine_transform(pts[j, :2], trans_output)
if pts[j, 2] > 0:
if pts[j, 0] >= 0 and pts[j, 0] < self.opt.output_w and \
pts[j, 1] >= 0 and pts[j, 1] < self.opt.output_h:
ret['hps'][k, j * 2: j * 2 + 2] = pts[j, :2] - ct_int
ret['hps_mask'][k, j * 2: j * 2 + 2] = 1
pt_int = pts[j, :2].astype(np.int32)
ret['hp_offset'][k * num_joints + j] = pts[j, :2] - pt_int
ret['hp_ind'][k * num_joints + j] = \
pt_int[1] * self.opt.output_w + pt_int[0]
ret['hp_offset_mask'][k * num_joints + j] = 1
ret['hm_hp_mask'][k * num_joints + j] = 1
ret['joint'][k * num_joints + j] = j
draw_umich_gaussian(
ret['hm_hp'][j], pt_int, hp_radius)
if pts[j, 2] == 1:
ret['hm_hp'][j, pt_int[1], pt_int[0]] = self.ignore_val
ret['hp_offset_mask'][k * num_joints + j] = 0
ret['hm_hp_mask'][k * num_joints + j] = 0
else:
pts[j, :2] *= 0
else:
pts[j, :2] *= 0
self._ignore_region(
ret['hm_hp'][j, int(bbox[1]): int(bbox[3]) + 1,
int(bbox[0]): int(bbox[2]) + 1])
gt_det['hps'].append(pts[:, :2].reshape(num_joints * 2))
def _add_rot(self, ret, ann, k, gt_det):
if 'alpha' in ann:
ret['rot_mask'][k] = 1
alpha = ann['alpha']
if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:
ret['rotbin'][k, 0] = 1
ret['rotres'][k, 0] = alpha - (-0.5 * np.pi)
if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:
ret['rotbin'][k, 1] = 1
ret['rotres'][k, 1] = alpha - (0.5 * np.pi)
gt_det['rot'].append(self._alpha_to_8(ann['alpha']))
else:
gt_det['rot'].append(self._alpha_to_8(0))
def _alpha_to_8(self, alpha):
ret = [0, 0, 0, 1, 0, 0, 0, 1]
if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:
r = alpha - (-0.5 * np.pi)
ret[1] = 1
ret[2], ret[3] = np.sin(r), np.cos(r)
if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:
r = alpha - (0.5 * np.pi)
ret[5] = 1
ret[6], ret[7] = np.sin(r), | np.cos(r) | numpy.cos |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Modul is used for skeleton binary 3D data analysis
"""
# import sys
# import os.path
# path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../extern/dicom2fem/src"))
import logging
logger = logging.getLogger(__name__)
import traceback
import numpy as np
import scipy.ndimage
import scipy.interpolate
from io import open
class SkeletonAnalyser:
"""
| Example:
| skan = SkeletonAnalyser(data3d_skel, volume_data, voxelsize_mm)
| stats = skan.skeleton_analysis()
| data3d_skel: 3d array with skeleton as 1s and background as 0s
| use_filter_small_objects: removing small objects
| filter_small_threshold: threshold for small filtering
:arg cut_wrong_skeleton: remove short skeleton edges to terminal
:arg aggregate_near_nodes_distance: combine near nodes to one. Parameter
defines distance in mm.
"""
def __init__(
self,
data3d_skel,
volume_data=None,
voxelsize_mm=[1, 1, 1],
use_filter_small=False,
filter_small_threshold=3,
cut_wrong_skeleton=True,
aggregate_near_nodes_distance=0,
):
# for not
self.volume_data = volume_data
self.voxelsize_mm = voxelsize_mm
self.aggregate_near_nodes_distance = aggregate_near_nodes_distance
# get array with 1 for edge, 2 is node and 3 is terminal
logger.debug("Generating sklabel...")
if use_filter_small:
data3d_skel = self.filter_small_objects(data3d_skel, filter_small_threshold)
self.data3d_skel = data3d_skel
# generate nodes and enges (sklabel)
logger.debug("__skeleton_nodes, __generate_sklabel")
skelet_nodes = self.__skeleton_nodes(data3d_skel)
self.sklabel = self.__generate_sklabel(skelet_nodes)
self.cut_wrong_skeleton = cut_wrong_skeleton
self.curve_order = 2
self.spline_smoothing = None
logger.debug(
"Inited SkeletonAnalyser - voxelsize:"
+ str(voxelsize_mm)
+ " volumedata:"
+ str(volume_data is not None)
)
logger.debug("aggreg %s", self.aggregate_near_nodes_distance)
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
self.shifted_zero = None
self.shifted_sklabel = None
self.stats = None
self.branch_label = None
def to_yaml(self, filename):
if self.stats is None:
logger.error("Run .skeleton_analysis() before .to_yaml()")
return
from ruamel.yaml import YAML
yaml = YAML(typ="unsafe")
with open(filename, "wt", encoding="utf-8") as f:
yaml.dump(self.stats, f)
def skeleton_analysis(self, guiUpdateFunction=None):
"""
| Glossary:
| element: line structure of skeleton connected to node on both ends. (index>0)
| node: connection point of elements. It is one or few voxelsize_mm. (index<0)
| terminal: terminal node
"""
def updateFunction(num, length, part):
if (
int(length / 100.0) == 0
or (num % int(length / 100.0) == 0)
or num == length
):
if guiUpdateFunction is not None:
guiUpdateFunction(num, length, part)
logger.info(
"skeleton_analysis: processed "
+ str(num)
+ "/"
+ str(length)
+ ", part "
+ str(part)
)
if self.cut_wrong_skeleton:
updateFunction(0, 1, "cuting wrong skeleton")
self.__cut_short_skeleton_terminal_edges()
stats = {}
len_edg = np.max(self.sklabel)
len_node = np.min(self.sklabel)
logger.debug("len_edg: " + str(len_edg) + " len_node: " + str(len_node))
# init radius analysis
logger.debug("__radius_analysis_init")
if self.volume_data is not None:
skdst = self.__radius_analysis_init()
# get edges and nodes that are near the edge. (+bounding box)
logger.debug("skeleton_analysis: starting element_neighbors processing")
self.elm_neigh = {}
self.elm_box = {}
for edg_number in list(range(len_node, 0)) + list(range(1, len_edg + 1)):
self.elm_neigh[edg_number], self.elm_box[
edg_number
] = self.__element_neighbors(edg_number)
# update gui progress
updateFunction(
edg_number + abs(len_node) + 1,
abs(len_node) + len_edg + 1,
"generating node->connected_edges lookup table",
)
logger.debug("skeleton_analysis: finished element_neighbors processing")
# clear unneeded data. IMPORTANT!!
self.__clean_shifted()
# get main stats
logger.debug(
"skeleton_analysis: starting processing part: length, radius, "
+ "curve and connections of edge"
)
# TODO switch A and B based on neighborhood maximal radius
for edg_number in list(range(1, len_edg + 1)):
try:
edgst = {}
edgst.update(self.__connection_analysis(edg_number))
if "nodeB_ZYX_mm" in edgst and "nodeA_ZYX_mm":
edgst = self.__ordered_points_with_pixel_length(edg_number, edgst)
edgst = self.__edge_curve(edg_number, edgst)
edgst.update(self.__edge_length(edg_number, edgst))
edgst.update(self.__edge_vectors(edg_number, edgst))
else:
logger.warning(
"No B point for edge ID {}. No length computation.".format(
edg_number
)
)
# edgst = edge_analysis(sklabel, i)
if self.volume_data is not None:
edgst["radius_mm"] = float(
self.__radius_analysis(edg_number, skdst)
) # slow (this takes most of time)
stats[edgst["id"]] = edgst
# update gui progress
updateFunction(
edg_number, len_edg, "length, radius, curve, connections of edge"
)
except Exception as e:
logger.warning(
"Problem in connection analysis\n" + traceback.format_exc()
)
logger.debug(
"skeleton_analysis: finished processing part: length, radius, "
+ "curve, connections of edge"
)
# @TODO dokončit
logger.debug(
"skeleton_analysis: starting processing part: angles of connected edges"
)
for edg_number in list(range(1, len_edg + 1)):
try:
if "nodeB_ZYX_mm" in edgst and "nodeA_ZYX_mm" in edgst:
edgst = stats[edg_number]
edgst.update(self.__connected_edge_angle(edg_number, stats))
updateFunction(edg_number, len_edg, "angles of connected edges")
except Exception as e:
logger.warning("Problem in angle analysis\n" + traceback.format_exc())
self.stats = stats
logger.debug(
"skeleton_analysis: finished processing part: angles of connected edges"
)
return stats
def __remove_edge_from_stats(self, stats, edge):
logger.debug("Cutting edge id:" + str(edge) + " from stats")
edg_stats = stats[edge]
connected_edgs = edg_stats["connectedEdgesA"] + edg_stats["connectedEdgesB"]
for connected in connected_edgs:
try:
stats[connected]["connectedEdgesA"].remove(edge)
except:
pass
try:
stats[connected]["connectedEdgesB"].remove(edge)
except:
pass
del stats[edge]
return stats
def __clean_shifted(self):
del self.shifted_zero # needed by __element_neighbors
self.shifted_zero = None
del self.shifted_sklabel # needed by __element_neighbors
self.shifted_sklabel = None
# mozna fix kratkodobych potizi, ale skutecny problem byl jinde
# try:
# del(self.shifted_zero) # needed by __element_neighbors
# except:
# logger.warning('self.shifted_zero does not exsist')
# try:
# del(self.shifted_sklabel) # needed by __element_neighbors
# except:
# logger.warning('self.shifted_zero does not exsist')
def __cut_short_skeleton_terminal_edges(self, cut_ratio=2.0):
"""
cut_ratio = 2.0 -> if radius of terminal edge is 2x its lenght or more,
remove it
"""
def remove_elm(elm_id, elm_neigh, elm_box, sklabel):
sklabel[sklabel == elm_id] = 0
del elm_neigh[elm_id]
del elm_box[elm_id]
for elm in elm_neigh:
elm_neigh[elm] = [x for x in elm_neigh[elm] if x != elm]
return elm_neigh, elm_box, sklabel
len_edg = np.max(self.sklabel)
len_node = np.min(self.sklabel)
logger.debug("len_edg: " + str(len_edg) + " len_node: " + str(len_node))
# get edges and nodes that are near the edge. (+bounding box)
logger.debug("skeleton_analysis: starting element_neighbors processing")
self.elm_neigh = {}
self.elm_box = {}
for edg_number in list(range(len_node, 0)) + list(range(1, len_edg + 1)):
self.elm_neigh[edg_number], self.elm_box[
edg_number
] = self.__element_neighbors(edg_number)
logger.debug("skeleton_analysis: finished element_neighbors processing")
# clear unneeded data. IMPORTANT!!
self.__clean_shifted()
# remove edges+nodes that are not connected to rest of the skeleton
logger.debug(
"skeleton_analysis: Cut - Removing edges that are not"
+ " connected to rest of the skeleton (not counting its nodes)"
)
cut_elm_neigh = dict(self.elm_neigh)
cut_elm_box = dict(self.elm_box)
for elm in self.elm_neigh:
elm = int(elm)
if elm > 0: # if edge
conn_nodes = [i for i in self.elm_neigh[elm] if i < 0]
conn_edges = []
for n in conn_nodes:
try:
nn = self.elm_neigh[n] # get neighbours elements of node
except:
logger.debug(
"Node " + str(n) + " not found! May be already deleted."
)
continue
for (
e
) in (
nn
): # if there are other edges connected to node add them to conn_edges
if e > 0 and e not in conn_edges and e != elm:
conn_edges.append(e)
if (
len(conn_edges) == 0
): # if no other edges are connected to nodes, remove from skeleton
logger.debug(
"removing edge "
+ str(elm)
+ " with its nodes "
+ str(self.elm_neigh[elm])
)
for night in self.elm_neigh[elm]:
remove_elm(night, cut_elm_neigh, cut_elm_box, self.sklabel)
self.elm_neigh = cut_elm_neigh
self.elm_box = cut_elm_box
# remove elements that are not connected to the rest of skeleton
logger.debug(
"skeleton_analysis: Cut - Removing elements that are not connected"
+ " to rest of the skeleton"
)
cut_elm_neigh = dict(self.elm_neigh)
cut_elm_box = dict(self.elm_box)
for elm in self.elm_neigh:
elm = int(elm)
if len(self.elm_neigh[elm]) == 0:
logger.debug("removing element " + str(elm))
remove_elm(elm, cut_elm_neigh, cut_elm_box, self.sklabel)
self.elm_neigh = cut_elm_neigh
self.elm_box = cut_elm_box
# get list of terminal nodes
logger.debug("skeleton_analysis: Cut - get list of terminal nodes")
terminal_nodes = []
for elm in self.elm_neigh:
if elm < 0: # if node
conn_edges = [i for i in self.elm_neigh[elm] if i > 0]
if len(conn_edges) == 1: # if only one edge is connected
terminal_nodes.append(elm)
# init radius analysis
logger.debug("__radius_analysis_init")
if self.volume_data is not None:
skdst = self.__radius_analysis_init()
# removes end terminal edges based on radius/length ratio
logger.debug(
"skeleton_analysis: Cut - Removing bad terminal edges based on"
+ " radius/length ratio"
)
cut_elm_neigh = dict(self.elm_neigh)
cut_elm_box = dict(self.elm_box)
for tn in terminal_nodes:
te = [i for i in self.elm_neigh[tn] if i > 0][0] # terminal edge
radius = float(self.__radius_analysis(te, skdst))
edgst = self.__connection_analysis(int(te))
edgst = self.__ordered_points_with_pixel_length(edg_number, edg_stats=edgst)
edgst.update(self.__edge_length(edg_number, edgst))
length = edgst["lengthEstimation"]
# logger.debug(str(radius / float(length))+" "+str(radius)+" "+str(length))
if (radius / float(length)) > cut_ratio:
logger.debug("removing edge " + str(te) + " with its terminal node.")
remove_elm(elm, cut_elm_neigh, cut_elm_box, self.sklabel)
self.elm_neigh = cut_elm_neigh
self.elm_box = cut_elm_box
# check if some nodes are not forks but just curves
logger.debug(
"skeleton_analysis: Cut - check if some nodes are not forks but just curves"
)
for elm in self.elm_neigh:
if elm < 0:
conn_edges = [i for i in self.elm_neigh[elm] if i > 0]
if len(conn_edges) == 2:
logger.warning(
"Node " + str(elm) + " is just a curve!!! FIX THIS!!!"
)
# TODO
# regenerate new nodes and edges from cut skeleton (sklabel)
logger.debug("regenerate new nodes and edges from cut skeleton")
self.sklabel[self.sklabel != 0] = 1
skelet_nodes = self.__skeleton_nodes(self.sklabel)
self.sklabel = self.__generate_sklabel(skelet_nodes)
def __skeleton_nodes(self, data3d_skel, kernel=None):
"""
Return 3d ndarray where 0 is background, 1 is skeleton, 2 is node
and 3 is terminal node
"""
if kernel is None:
kernel = np.ones([3, 3, 3])
mocnost = scipy.ndimage.filters.convolve(data3d_skel, kernel) * data3d_skel
nodes = (mocnost > 3).astype(np.int8)
terminals = ((mocnost == 2) | (mocnost == 1)).astype(np.int8)
data3d_skel[nodes == 1] = 2
data3d_skel[terminals == 1] = 3
data3d_skel = self.__skeleton_nodes_aggregation(data3d_skel)
return data3d_skel
def __skeleton_nodes_aggregation(self, data3d_skel):
"""
aggregate near nodes
"""
method = "auto"
if self.aggregate_near_nodes_distance > 0:
# d1_dbg = data3d_skel.copy()
# sklabel_edg0, len_edg0 = scipy.ndimage.label(data3d_skel)
# print('generate structure')
structure = generate_binary_elipsoid(
self.aggregate_near_nodes_distance / np.asarray(self.voxelsize_mm)
)
# print('perform dilation ', data3d_skel.shape)
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
# TODO select best method
# old simple method
nd_dil = scipy.ndimage.binary_dilation(data3d_skel == 2, structure)
# per partes method even slower
# nd_dil = self.__skeleton_nodes_aggregation_per_each_node(data3d_skel==2, structure)
data3d_skel[nd_dil & data3d_skel > 0] = 2
sklabel_edg1, len_edg1 = scipy.ndimage.label(data3d_skel)
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
# import sed3
# ed = sed3.sed3(data3d_skel)
# ed.show()
return data3d_skel
def __skeleton_nodes_aggregation_per_each_node(self, data3d_skel2, structure):
node_list = np.nonzero(data3d_skel2)
nlz = zip(node_list[0], node_list[1], node_list[2])
for node_xyz in nlz:
data3d_skel2 = self.__node_dilatation(data3d_skel2, node_xyz, structure)
return data3d_skel2
def __node_dilatation(self, data3d_skel2, node_xyz, structure):
"""
this function is called for each node
"""
border = structure.shape
xlim = [
max(0, node_xyz[0] - border[0]),
min(data3d_skel2.shape[0], node_xyz[0] + border[0]),
]
ylim = [
max(0, node_xyz[1] - border[1]),
min(data3d_skel2.shape[1], node_xyz[1] + border[1]),
]
zlim = [
max(0, node_xyz[2] - border[2]),
min(data3d_skel2.shape[2], node_xyz[2] + border[2]),
]
# dilation on small box
nd_dil = scipy.ndimage.binary_dilation(
data3d_skel2[xlim[0] : xlim[1], ylim[0] : ylim[1], zlim[0] : zlim[1]] == 2,
structure,
)
# nd_dil = nd_dil * 2
data3d_skel2[xlim[0] : xlim[1], ylim[0] : ylim[1], zlim[0] : zlim[1]] = nd_dil
return data3d_skel2
def __label_edge_by_its_terminal(self, labeled_terminals):
import functools
import scipy
def max_or_zero(a):
return min(np.max(a), 0)
fp = np.ones([3, 3, 3], dtype=np.int)
median_filter = functools.partial(
scipy.ndimage.generic_filter, function=np.max, footprint=fp
)
mf = median_filter(labeled_terminals)
for label in list(range(np.min(labeled_terminals), 0)):
neigh = np.min(mf[labeled_terminals == label])
labeled_terminals[labeled_terminals == neigh] = label
return labeled_terminals
def filter_small_objects(self, skel, threshold=4):
"""
Remove small objects from
terminals are connected to edges
"""
skeleton_nodes = self.__skeleton_nodes(skel)
logger.debug("skn 2 " + str(np.sum(skeleton_nodes == 2)))
logger.debug("skn 3 " + str(np.sum(skeleton_nodes == 3)))
# delete nodes
nodes = skeleton_nodes == 2
skeleton_nodes[nodes] = 0
# pe = ped.sed3(skeleton_nodes)
# pe.show()
labeled_terminals = self.__generate_sklabel(skeleton_nodes)
logger.debug("deleted nodes")
labeled_terminals = self.__label_edge_by_its_terminal(labeled_terminals)
# pe = ped.sed3(labeled_terminals)
# pe.show()
for i in list(range(np.min(labeled_terminals), 0)):
lti = labeled_terminals == i
if np.sum(lti) < threshold:
# delete small
labeled_terminals[lti] = 0
logger.debug("mazani %s %s" % (str(i), np.sum(lti)))
# bring nodes back
labeled_terminals[nodes] = 1
return (labeled_terminals != 0).astype(np.int)
def __generate_sklabel(self, skelet_nodes):
sklabel_edg, len_edg = scipy.ndimage.label(
skelet_nodes == 1, structure=np.ones([3, 3, 3])
)
sklabel_nod, len_nod = scipy.ndimage.label(
skelet_nodes > 1, structure=np.ones([3, 3, 3])
)
sklabel = sklabel_edg - sklabel_nod
return sklabel
def get_branch_label(self):
"""
:return:
"""
if self.branch_label is None:
self.__generate_branch_label()
if self.volume_data is not None:
self.branch_label[self.volume_data == 0] = 0
return self.branch_label
def __generate_branch_label(self, ignore_nodes=True):
# if self.sklabel is None:
# sknodes = self.__skeleton_nodes(self.data3d_skel)
# self.sklabel = self.__generate_sklabel(skelet_nodes=sknodes)
import imma
import imma.image_manipulation
if ignore_nodes:
import copy
sklabel = self.sklabel.copy()
# delete nodes
sklabel[sklabel < 0] = 0
else:
sklabel = self.sklabel
self.branch_label = imma.image_manipulation.distance_segmentation(sklabel)
pass
def __edge_vectors(self, edg_number, edg_stats):
"""
| Return begin and end vector of edge.
| run after __edge_curve()
"""
# this edge
try:
curve_params = edg_stats["curve_params"]
vectorA = self.__get_vector_from_curve(0.25, 0, curve_params)
vectorB = self.__get_vector_from_curve(0.75, 1, curve_params)
except: # Exception as ex:
logger.warning(traceback.format_exc())
# print(ex)
return {}
return {"vectorA": vectorA.tolist(), "vectorB": vectorB.tolist()}
def __vectors_to_angle_deg(self, v1, v2):
"""
Return angle of two vectors in degrees
"""
# get normalised vectors
v1u = v1 / np.linalg.norm(v1)
v2u = v2 / np.linalg.norm(v2)
# print('v1u ', v1u, ' v2u ', v2u)
angle = np.arccos(np.dot(v1u, v2u))
# special cases
if np.isnan(angle):
if (v1u == v2u).all():
angle == 0
else:
angle == np.pi
angle_deg = np.degrees(angle)
# print('angl ', angle, ' angl_deg ', angle_deg)
return angle_deg
def __vector_of_connected_edge(self, edg_number, stats, edg_end, con_edg_order):
"""
| find common node with connected edge and its vector
| edg_end: Which end of edge you want (0 or 1)
| con_edg_order: Which edge of selected end of edge you want (0,1)
"""
if edg_end == "A":
connectedEdges = stats[edg_number]["connectedEdgesA"]
ndid = "nodeIdA"
elif edg_end == "B":
connectedEdges = stats[edg_number]["connectedEdgesB"]
ndid = "nodeIdB"
else:
logger.error("Wrong edg_end in __vector_of_connected_edge()")
if len(connectedEdges) <= con_edg_order:
return None
connected_edge_id = connectedEdges[con_edg_order]
if len(stats) < connected_edge_id:
logger.warning(
"Not found connected edge with ID: " + str(connected_edge_id)
)
return None
connectedEdgeStats = stats[connected_edge_id]
# import pdb; pdb.set_trace()
if stats[edg_number][ndid] == connectedEdgeStats["nodeIdA"]:
# sousední hrana u uzlu na konci 0 má stejný node na
# svém konci 0 jako
# nynější hrana
vector = connectedEdgeStats["vectorA"]
elif stats[edg_number][ndid] == connectedEdgeStats["nodeIdB"]:
vector = connectedEdgeStats["vectorB"]
return vector
def perpendicular_to_two_vects(self, v1, v2):
# determinant
a = (v1[1] * v2[2]) - (v1[2] * v2[1])
b = -((v1[0] * v2[2]) - (v1[2] * v2[0]))
c = (v1[0] * v2[1]) - (v1[1] * v2[0])
return [a, b, c]
def projection_of_vect_to_xy_plane(self, vect, xy1, xy2):
"""
Return porojection of vect to xy plane given by vectprs xy1 and xy2
"""
norm = self.perpendicular_to_two_vects(xy1, xy2)
vect_proj = np.array(vect) - (
np.dot(vect, norm) / np.linalg.norm(norm) ** 2
) * np.array(norm)
return vect_proj
def __connected_edge_angle_on_one_end(self, edg_number, stats, edg_end):
"""
creates phiXa, phiXb and phiXc.
:param edg_number: integer with edg_number
:param stats: dictionary with all statistics and computations
:param edg_end: letter 'A' or 'B'
See Schwen2012 : Analysis and algorithmic generation of hepatic vascular
system.
"""
out = {}
vector_key = "vector" + edg_end
vectorX0 = None
vectorX1 = None
vector = None
try:
vector = stats[edg_number][vector_key]
except: # Exception as e:
logger.debug(traceback.format_exc())
# try:
vectorX0 = self.__vector_of_connected_edge(edg_number, stats, edg_end, 0)
# phiXa = self.__vectors_to_angle_deg(vectorX0, vector)
# out.update({'phiA0' + edg_end + 'a': phiXa.tolist()})
# except: # Exception as e:
# logger.debug(traceback.format_exc())
# try:
vectorX1 = self.__vector_of_connected_edge(edg_number, stats, edg_end, 1)
# except: # Exception as e:
# logger.debug(traceback.format_exc())
if (vectorX0 is not None) and (vectorX1 is not None) and (vector is not None):
vect_proj = self.projection_of_vect_to_xy_plane(vector, vectorX0, vectorX1)
phiXa = self.__vectors_to_angle_deg(vectorX0, vectorX1)
phiXb = self.__vectors_to_angle_deg(vector, vect_proj)
vectorX01avg = np.array(vectorX0 / np.linalg.norm(vectorX0)) + np.array(
vectorX1 / np.linalg.norm(vectorX1)
)
phiXc = self.__vectors_to_angle_deg(vectorX01avg, vect_proj)
out.update(
{
"phi" + "a": phiXa.tolist(),
"phi" + "b": phiXb.tolist(),
"phi" + "c": phiXc.tolist(),
"vector" + "0": vectorX0,
"vector" + "1": vectorX1,
}
)
# out.update({
# 'phi' + edg_end + 'a': phiXa.tolist(),
# 'phi' + edg_end + 'b': phiXb.tolist(),
# 'phi' + edg_end + 'c': phiXc.tolist(),
# 'vector' + edg_end + '0': vectorX0,
# 'vector' + edg_end + '1': vectorX1,
# })
return out
# return phiXA, phiXb, phiXc, vectorX0, vectorX1
# except: # Exception as e:
# logger.warning(traceback.print_exc())
return None
def __connected_edge_angle(self, edg_number, stats):
"""
count angles betwen end vectors of edges
"""
def setAB(statsA, statsB):
stA = {}
stB = {}
edg_end = "A"
statstmp = statsA
if statsA is not None:
stA = {
"phi" + edg_end + "a": statstmp["phia"],
"phi" + edg_end + "b": statstmp["phib"],
"phi" + edg_end + "c": statstmp["phic"],
"vector" + edg_end + "0": statstmp["vector0"],
"vector" + edg_end + "1": statstmp["vector1"],
}
edg_end = "B"
statstmp = statsB
if statsB is not None:
stB = {
"phi" + edg_end + "a": statstmp["phia"],
"phi" + edg_end + "b": statstmp["phib"],
"phi" + edg_end + "c": statstmp["phic"],
"vector" + edg_end + "0": statstmp["vector0"],
"vector" + edg_end + "1": statstmp["vector1"],
}
return stA, stB
statsA = self.__connected_edge_angle_on_one_end(edg_number, stats, "A")
statsB = self.__connected_edge_angle_on_one_end(edg_number, stats, "B")
stA, stB = setAB(statsA, statsB)
out = {}
out.update(stA)
out.update(stB)
angleA0 = 0
return out
def __swapAB(self, edg_number, stats):
"""
Function can swap A and B node
:param edg_number:
:param stats:
:return:
"""
import copy
keys = stats[edg_number].keys()
# vector = stats[edg_number][vector_key]
for key in keys:
k2 = copy.copy(key)
idx = k2.find("A")
k2[idx] = "B"
if k2 in keys:
tmp = stats[edg_number][key]
stats[edg_number][key] = stats[edg_number][k2]
pass
def __get_vector_from_curve(self, t0, t1, curve_params):
return np.array(curve_model(t1, curve_params)) - np.array(
curve_model(t0, curve_params)
)
# def node_analysis(sklabel):
# pass
def __element_neighbors(self, el_number):
"""
Gives array of element neighbors numbers (edges+nodes/terminals)
| input:
| self.sklabel - original labeled data
| el_number - element label
| uses/creates:
| self.shifted_sklabel - all labels shifted to positive numbers
| self.shifted_zero - value of original 0
| returns:
| array of neighbor values
| - nodes for edge, edges for node
| element bounding box (with border)
"""
# check if we have shifted sklabel, if not create it.
# try:
# self.shifted_zero
# self.shifted_sklabel
# except AttributeError:
if (self.shifted_sklabel is None) or (self.shifted_zero is None):
logger.debug("Generating shifted sklabel...")
self.shifted_zero = abs(np.min(self.sklabel)) + 1
self.shifted_sklabel = self.sklabel + self.shifted_zero
el_number_shifted = el_number + self.shifted_zero
BOUNDARY_PX = 5
if el_number < 0:
# cant have max_label<0
box = scipy.ndimage.find_objects(
self.shifted_sklabel, max_label=el_number_shifted
)
else:
box = scipy.ndimage.find_objects(self.sklabel, max_label=el_number)
box = box[len(box) - 1]
d = max(0, box[0].start - BOUNDARY_PX)
u = min(self.sklabel.shape[0], box[0].stop + BOUNDARY_PX)
slice_z = slice(d, u)
d = max(0, box[1].start - BOUNDARY_PX)
u = min(self.sklabel.shape[1], box[1].stop + BOUNDARY_PX)
slice_y = slice(d, u)
d = max(0, box[2].start - BOUNDARY_PX)
u = min(self.sklabel.shape[2], box[2].stop + BOUNDARY_PX)
slice_x = slice(d, u)
box = (slice_z, slice_y, slice_x)
sklabelcr = self.sklabel[box]
# element crop
element = sklabelcr == el_number
dilat_element = scipy.ndimage.morphology.binary_dilation(
element, structure=np.ones([3, 3, 3])
)
neighborhood = sklabelcr * dilat_element
neighbors = np.unique(neighborhood)
neighbors = neighbors[neighbors != 0]
neighbors = neighbors[neighbors != el_number]
if el_number > 0: # elnumber is edge
neighbors = neighbors[neighbors < 0] # return nodes
elif el_number < 0: # elnumber is node
neighbors = neighbors[neighbors > 0] # return edge
else:
logger.warning("Element is zero!!")
neighbors = []
return neighbors, box
def __length_from_curve_spline(self, edg_stats, N=20, spline_order=3):
"""
Get length from list of points in edge stats.
:param edg_stats: dict with key "orderedPoints_mm"
:param N: Number of points used for reconstruction
:param spline_order: Order of spline
:return:
"""
pts_mm_ord = edg_stats["orderedPoints_mm"]
if len(pts_mm_ord[0]) <= spline_order:
return None
tck, u = scipy.interpolate.splprep(
pts_mm_ord, s=self.spline_smoothing, k=spline_order
)
t = np.linspace(0.0, 1.0, N)
x, y, z = scipy.interpolate.splev(t, tck)
length = self.__count_length(x, y, z, N)
return length
def __length_from_curve_poly(self, edg_stats, N=10):
px = np.poly1d(edg_stats["curve_params"]["fitParamsX"])
py = np.poly1d(edg_stats["curve_params"]["fitParamsY"])
pz = np.poly1d(edg_stats["curve_params"]["fitParamsZ"])
t = np.linspace(0.0, 1.0, N)
x = px(t)
y = py(t)
z = pz(t)
return self.__count_length(x, y, z, N)
def __count_length(self, x, y, z, N):
length = 0
for i in list(range(N - 1)):
p1 = np.asarray([x[i], y[i], z[i]])
p2 = np.asarray([x[i + 1], y[i + 1], z[i + 1]])
length += np.linalg.norm(p2 - p1)
return length
def __edge_length(self, edg_number, edg_stats):
"""
Computes estimated length of edge, distance from end nodes and
tortosity.
| needs:
| edg_stats['nodeIdA']
| edg_stats['nodeIdB']
| edg_stats['nodeA_ZYX']
| edg_stats['nodeB_ZYX']
| output:
| 'lengthEstimation' - Estimated length of edge
| 'nodesDistance' - Distance between connected nodes
| 'tortuosity' - Tortuosity
"""
# test for needed data
try:
edg_stats["nodeIdA"]
edg_stats["nodeA_ZYX"]
except:
hasNodeA = False
else:
hasNodeA = True
try:
edg_stats["nodeIdB"]
edg_stats["nodeB_ZYX"]
except:
hasNodeB = False
else:
hasNodeB = True
if (not hasNodeA) and (not hasNodeB):
logger.warning(
"__edge_length doesnt have needed data!!! Using unreliable" + "method."
)
length = float(
np.sum(self.sklabel[self.elm_box[edg_number]] == edg_number) + 2
)
medium_voxel_length = (
self.voxelsize_mm[0] + self.voxelsize_mm[1] + self.voxelsize_mm[2]
) / 3.0
length = length * medium_voxel_length
stats = {
"lengthEstimation": float(length),
"nodesDistance": None,
"tortuosity": 1,
}
return stats
# crop used area
box = self.elm_box[edg_number]
sklabelcr = self.sklabel[box]
# get absolute position of nodes
if hasNodeA and not hasNodeB:
logger.warning("__edge_length has only one node!!! using one node mode.")
nodeA_pos_abs = edg_stats["nodeA_ZYX"]
one_node_mode = True
elif hasNodeB and not hasNodeA:
logger.warning("__edge_length has only one node!!! using one node mode.")
nodeA_pos_abs = edg_stats["nodeB_ZYX"]
one_node_mode = True
else:
nodeA_pos_abs = edg_stats["nodeA_ZYX"]
nodeB_pos_abs = edg_stats["nodeB_ZYX"]
one_node_mode = False
# get realtive position of nodes [Z,Y,X]
nodeA_pos = np.array(
[
nodeA_pos_abs[0] - box[0].start,
nodeA_pos_abs[1] - box[1].start,
nodeA_pos_abs[2] - box[2].start,
]
)
if not one_node_mode:
nodeB_pos = np.array(
[
nodeB_pos_abs[0] - box[0].start,
nodeB_pos_abs[1] - box[1].start,
nodeB_pos_abs[2] - box[2].start,
]
)
# get position in mm
nodeA_pos = nodeA_pos * self.voxelsize_mm
if not one_node_mode:
nodeB_pos = nodeB_pos * self.voxelsize_mm
else:
nodeB_pos = None
# get positions of edge points
# points = (sklabelcr == edg_number).nonzero()
# points_mm = [
# np.array(points[0] * self.voxelsize_mm[0]),
# np.array(points[1] * self.voxelsize_mm[1]),
# np.array(points[2] * self.voxelsize_mm[2])
# ]
#
# _, length_pixel = self.__ordered_points_mm(
# points_mm, nodeA_pos, nodeB_pos, one_node_mode)
# length_pixel = float(length_pixel)
length_pixel = edg_stats["lengthEstimationPixel"]
length = length_pixel
length_poly = None
length_spline = None
if not one_node_mode:
try:
length_poly = self.__length_from_curve_poly(edg_stats)
except:
logger.info("problem with length_poly")
try:
length_spline = self.__length_from_curve_spline(edg_stats)
except:
logger.info(traceback.format_exc())
logger.info("problem with length_spline")
logger.error("problem with spline")
if length_spline is not None:
length = length_spline
else:
pass
# get distance between nodes
pts_mm = np.asarray(edg_stats["orderedPoints_mm"])
nodes_distance = np.linalg.norm(pts_mm[:, 0] - pts_mm[:, -1])
stats = {
"lengthEstimationPoly": float_or_none(length_poly),
"lengthEstimationSpline": float_or_none(length_spline),
"lengthEstimation": float(length),
# 'lengthEstimationPixel': float(length_pixel),
"nodesDistance": float_or_none(nodes_distance),
"tortuosity": float(length / float(nodes_distance)),
}
return stats
def __ordered_points_with_pixel_length(self, edg_number, edg_stats):
box = self.elm_box[edg_number]
sklabelcr = self.sklabel[box]
# get positions of edge points
point0_mm = np.array(edg_stats["nodeA_ZYX_mm"])
point1_mm = np.array(edg_stats["nodeB_ZYX_mm"])
pts_mm_ord, pixel_length = get_ordered_points_mm_from_labeled_image(
sklabelcr,
edg_number,
self.voxelsize_mm,
point0_mm,
point1_mm,
offset_mm=box,
)
# edg_stats["orderedPoints_mm"]
edg_stats["orderedPoints_mm_X"] = pts_mm_ord[0]
edg_stats["orderedPoints_mm_Y"] = pts_mm_ord[1]
edg_stats["orderedPoints_mm_Z"] = pts_mm_ord[2]
edg_stats["orderedPoints_mm"] = pts_mm_ord
edg_stats["lengthEstimationPixel"] = pixel_length
return edg_stats
def __edge_curve(self, edg_number, edg_stats):
"""
Return params of curve and its starts and ends locations
| needs:
| edg_stats['nodeA_ZYX_mm']
| edg_stats['nodeB_ZYX_mm']
"""
retval = {}
if "orderedPoints_mm" not in edg_stats:
edg_stats = self.__ordered_points_with_pixel_length(edg_number, edg_stats)
pts_mm_ord = edg_stats["orderedPoints_mm"]
try:
point0_mm = np.array(edg_stats["nodeA_ZYX_mm"])
point1_mm = np.array(edg_stats["nodeB_ZYX_mm"])
t = np.linspace(0.0, 1.0, len(pts_mm_ord[0]))
fitParamsX = np.polyfit(t, pts_mm_ord[0], self.curve_order)
fitParamsY = np.polyfit(t, pts_mm_ord[1], self.curve_order)
fitParamsZ = np.polyfit(t, pts_mm_ord[2], self.curve_order)
# Spline
# s - smoothing
# w - weight
w = np.ones(len(pts_mm_ord[0]))
# first and last have big weight
w[1] = len(pts_mm_ord[0])
w[-1] = len(pts_mm_ord[0])
# tckl = np.asarray(tck).tolist()
retval = {
"curve_params": {
"start": list(point0_mm.tolist()),
"vector": list((point1_mm - point0_mm).tolist()),
"fitParamsX": list(fitParamsX.tolist()),
"fitParamsY": list(fitParamsY.tolist()),
"fitParamsZ": list(fitParamsZ.tolist()),
"fitCurveStrX": str(np.poly1d(fitParamsX)),
"fitCurveStrY": str(np.poly1d(fitParamsY)),
"fitCurveStrZ": str(np.poly1d(fitParamsZ)),
# 'fitParamsSpline': tck
}
}
except Exception as ex:
logger.warning("Problem in __edge_curve()")
logger.warning(traceback.format_exc())
print(ex)
edg_stats.update(retval)
return edg_stats
# def edge_analysis(sklabel, edg_number):
# element dilate * sklabel[sklabel < 0]
# pass
def __radius_analysis_init(self):
"""
Computes skeleton with distances from edge of volume.
| sklabel: skeleton or labeled skeleton
| volume_data: volumetric data with zeros and ones
"""
uq = np.unique(self.volume_data)
if len(uq) < 2:
logger.Error("labels 0 and 1 expected in volume data")
raise ValueError("Volumetric data are expected to be 0 and 1.")
return None
if (uq[0] == 0) & (uq[1] == 1):
dst = scipy.ndimage.morphology.distance_transform_edt(
self.volume_data, sampling=self.voxelsize_mm
)
# import ipdb; ipdb.set_trace() # BREAKPOINT
dst = dst * (self.sklabel != 0)
return dst
else:
logger.Error(
"__radius_analysis_init() error. Values are expected be 0 and 1"
)
raise ValueError("Volumetric data are expected to be 0 and 1.")
return None
def __radius_analysis(self, edg_number, skdst):
"""
Return smaller radius of tube
"""
# returns mean distance from skeleton to vessel border = vessel radius
edg_skdst = skdst * (self.sklabel == edg_number)
return np.mean(edg_skdst[edg_skdst != 0])
def __connection_analysis(self, edg_number):
"""
Analysis of which edge is connected
"""
edg_neigh = self.elm_neigh[edg_number]
if len(edg_neigh) == 1:
logger.warning(
"Only one ("
+ str(edg_neigh)
+ ") connected node in connection_analysis()"
+ " for edge number "
+ str(edg_number)
)
# get edges connected to end nodes
connectedEdgesA = np.array(self.elm_neigh[edg_neigh[0]])
# remove edg_number from connectedEdges list
connectedEdgesA = connectedEdgesA[connectedEdgesA != edg_number]
# get pixel and mm position of end nodes
# node A
box0 = self.elm_box[edg_neigh[0]]
nd00, nd01, nd02 = (edg_neigh[0] == self.sklabel[box0]).nonzero()
point0_mean = [np.mean(nd00), np.mean(nd01), np.mean(nd02)]
point0 = np.array(
[
float(point0_mean[0] + box0[0].start),
float(point0_mean[1] + box0[1].start),
float(point0_mean[2] + box0[2].start),
]
)
# node position -> mm
point0_mm = point0 * self.voxelsize_mm
edg_stats = {
"id": edg_number,
"nodeIdA": int(edg_neigh[0]),
"connectedEdgesA": connectedEdgesA.tolist(),
"nodeA_ZYX": point0.tolist(),
"nodeA_ZYX_mm": point0_mm.tolist(),
}
elif len(edg_neigh) != 2:
logger.warning(
"Wrong number ("
+ str(edg_neigh)
+ ") of connected nodes in connection_analysis()"
+ " for edge number "
+ str(edg_number)
)
edg_stats = {"id": edg_number}
else:
# get edges connected to end nodes
connectedEdgesA = np.array(self.elm_neigh[edg_neigh[0]])
connectedEdgesB = np.array(self.elm_neigh[edg_neigh[1]])
# remove edg_number from connectedEdges list
connectedEdgesA = connectedEdgesA[connectedEdgesA != edg_number]
connectedEdgesB = connectedEdgesB[connectedEdgesB != edg_number]
# get pixel and mm position of end nodes
# node A
box0 = self.elm_box[edg_neigh[0]]
nd00, nd01, nd02 = (edg_neigh[0] == self.sklabel[box0]).nonzero()
point0_mean = [np.mean(nd00), np.mean(nd01), np.mean(nd02)]
point0 = np.array(
[
float(point0_mean[0] + box0[0].start),
float(point0_mean[1] + box0[1].start),
float(point0_mean[2] + box0[2].start),
]
)
# node B
box1 = self.elm_box[edg_neigh[1]]
nd10, nd11, nd12 = (edg_neigh[1] == self.sklabel[box1]).nonzero()
point1_mean = [np.mean(nd10), np.mean(nd11), np.mean(nd12)]
point1 = np.array(
[
float(point1_mean[0] + box1[0].start),
float(point1_mean[1] + box1[1].start),
float(point1_mean[2] + box1[2].start),
]
)
# node position -> mm
point0_mm = point0 * self.voxelsize_mm
point1_mm = point1 * self.voxelsize_mm
edg_stats = {
"id": edg_number,
"nodeIdA": int(edg_neigh[0]),
"nodeIdB": int(edg_neigh[1]),
"connectedEdgesA": connectedEdgesA.tolist(),
"connectedEdgesB": connectedEdgesB.tolist(),
"nodeA_ZYX": point0.tolist(),
"nodeB_ZYX": point1.tolist(),
"nodeA_ZYX_mm": point0_mm.tolist(),
"nodeB_ZYX_mm": point1_mm.tolist(),
}
return edg_stats
def generate_binary_elipsoid(ndradius=[1, 1, 1]):
"""
generate binary elipsoid shape
"""
ndradius = np.asarray(ndradius).astype(np.double)
shape = ((ndradius * 2) + 1).astype(np.uint)
logger.debug("elipsoid shape %s", str(shape))
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
x, y, z = np.indices(shape)
center1 = ndradius
mask = (
((x - ndradius[0]) ** 2) / ndradius[0] ** 2
+ ((y - ndradius[1]) ** 2) / ndradius[1] ** 2
+ ((z - ndradius[2]) ** 2) / ndradius[2] ** 2
)
# (y - ndradius[1])**2 < radius1**2
# mask = mask radius1**1
return mask < 1
def float_or_none(number):
if number is None:
return None
else:
return float(number)
def curve_model(t, params):
p0 = params["start"][0] + t * params["vector"][0]
p1 = params["start"][1] + t * params["vector"][1]
p2 = params["start"][2] + t * params["vector"][2]
return [p0, p1, p2]
def get_ordered_points_mm(points_mm, nodeA_pos, nodeB_pos, one_node_mode=False):
"""
:param points_mm: list of not ordered points
:param nodeA_pos: start point
:param nodeB_pos: end point
:param one_node_mode: if no end point is given
:return:
"""
length = 0
startpoint = nodeA_pos
pt_mm = [[nodeA_pos[0]], [nodeA_pos[1]], [nodeA_pos[2]]]
while len(points_mm[0]) != 0:
# get closest point to startpoint
p_length = float("Inf") # get max length
closest_num = -1
for p in list(range(0, len(points_mm[0]))):
test_point = np.array([points_mm[0][p], points_mm[1][p], points_mm[2][p]])
p_length_new = np.linalg.norm(startpoint - test_point)
if p_length_new < p_length:
p_length = p_length_new
closest_num = p
closest = np.array(
[
points_mm[0][closest_num],
points_mm[1][closest_num],
points_mm[2][closest_num],
]
)
# add length
pt_mm[0].append(points_mm[0][closest_num])
pt_mm[1].append(points_mm[1][closest_num])
pt_mm[2].append(points_mm[2][closest_num])
length += np.linalg.norm(closest - startpoint)
# replace startpoint with used point
startpoint = closest
# remove used point from points
points_mm = [
np.delete(points_mm[0], closest_num),
np.delete(points_mm[1], closest_num),
np.delete(points_mm[2], closest_num),
]
# add length to nodeB
if not one_node_mode:
length += np.linalg.norm(nodeB_pos - startpoint)
pt_mm[0].append(nodeB_pos[0])
pt_mm[1].append(nodeB_pos[1])
pt_mm[2].append(nodeB_pos[2])
return | np.asarray(pt_mm) | numpy.asarray |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.