prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: utf-8 -*-
"""Tests of GLSAR and diagnostics against Gretl
Created on Thu Feb 02 21:15:47 2012
Author: <NAME>
License: BSD-3
"""
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
def compare_ftest(contrast_res, other, decimal=(5,4)):
assert_almost_equal(contrast_res.fvalue, other[0], decimal=decimal[0])
assert_almost_equal(contrast_res.pvalue, other[1], decimal=decimal[1])
assert_equal(contrast_res.df_num, other[2])
assert_equal(contrast_res.df_denom, other[3])
assert_equal("f", other[4])
class TestGLSARGretl:
def test_all(self):
d = macrodata.load_pandas().data
#import datasetswsm.greene as g
#d = g.load('5-1')
#growth rates
gs_l_realinv = 400 * np.diff( | np.log(d['realinv'].values) | numpy.log |
"""
Created on Thu Oct. 10 2019
Recent changes for the version 0.1.1:
1) Insead of giving the input optical penetration depth only give the input
of the complex refractive index "n". This is a material parameter, so
the input is given in the simulation --> add_layer(.) command.
Now "LB" and "TMM" source are initialized almost in the same way
2) One of the Outputs of sim.run() is T. But now we change it to be a
3 dimensional array, with dim0 = time; dim1 = space; dim2 = subsystem
3) The input for the visual class in the v.contour() function should not be
a string but just numbers corresponding to different systems.
@author: <NAME>
<EMAIL>
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from bspline import Bspline
from bspline.splinelab import aptknt
import time
from matplotlib.animation import FuncAnimation as movie
from tqdm import tqdm #Progressbar
#==============================================================================
class temperature(object):
def __init__(self):
self.plt_points = 30 #number of points in x grid
self.length = np.array([0,0]) #length of x space,starting from 0
self.Left_BC_Type = 1 #Boundary conditions Default is Neumann
self.Right_BC_Type = 1 #1=> Neumann; 0=> Dirichlet
self.init = lambda x: 300+0*x # initial temperature of probe
self.n = np.array([1,1],dtype=complex) # Initial refractive index air|...|air
self.conductivity = [1] #This gets deleted after initialisation
self.heatCapacity = [1] #those values are just here to make space
self.rho = [1] #Actual values are given, when 'addLayer(length, conductivity,heatCapacity,rho)' is executed
self.collocpts = 12
self.setup = False #first time setup to not double calculated
def getProperties(self): #to depict the properties of the object
for i in (self.__dict__):
name = str(i)
value = str(self.__dict__[i])
print('{:<20}{:>10}'.format( name,value ))
def __repr__(self):
return('Temperature')
#for every layer, a function to calculate the derivative of k(T)
def diff_conductivity(self,phi,num_of_material):
eps =1e-9
dc = (self.conductivity[num_of_material](phi+eps)-self.conductivity[num_of_material](phi))/eps
return(dc)
#Creating the key matrices for B-splines. Those are A0,A1,A2
#A0 => Zero derivative; A1 => 1st order derivative....
#We create the matices for every layer, with respective length ect
#then we put them together to Abig => Boundary and interface conditions are applied here.
def Msetup(self):
#Deleting the ifrst element of the default initialization
#After creating the element with 'addLayer' we dont need them!
if not self.setup:
self.length = self.length[1:]
self.conductivity = self.conductivity[1:]
self.heatCapacity = self.heatCapacity[1:]
self.rho = self.rho[1:]
self.setup = True
#Length and numper of grid points for each respective layer
length = self.length
plt_points = self.plt_points
num_of_points = self.collocpts #Number of points per layer used in the spline for collocation
order = 5 #order of the spline
x = np.array(np.zeros([np.size(length)-1,num_of_points]))
x_plt = np.array(np.zeros([np.size(length)-1,plt_points]))
knot_vector = np.array(np.zeros([np.size(length)-1,num_of_points+order+1]))
basis = np.array(np.zeros(np.size(length)-1))
A0h = []; A1h = []; A2h = []; Ch = [];
LayerMat = np.array([np.zeros((num_of_points,num_of_points))])
#Create all the big matices A0,A1,A2 & C. C is used to map on a fine mesh in x- space.
#For every layer we set up splines between the boundaries
for i in range(0,np.size(length)-1):
x[i,:] = np.linspace(length[i], length[i+1] , num_of_points)
x_plt[i,:] = np.linspace(length[i], length[i+1] , plt_points)
knot_vector[i,:] = aptknt(x[i,:], order) #prepare for Spline matrix
basis = Bspline(knot_vector[i,:],order)
A0hinter = basis.collmat(x[i,:], deriv_order = 0); A0hinter[-1,-1] = 1
A1hinter = basis.collmat(x[i,:], deriv_order = 1); A1hinter[-1] = -np.flip(A1hinter[0],0)
A2hinter = basis.collmat(x[i,:], deriv_order = 2); A2hinter[-1,-1] = 1
Chinter = basis.collmat(x_plt[i,:], deriv_order = 0); Chinter[-1,-1] = 1
LayerMat = np.append(LayerMat,np.array([np.dot(A2hinter,np.linalg.inv(A0hinter))]),axis = 0)
A0h = np.append(A0h,A0hinter)
A1h = np.append(A1h,A1hinter)
A2h = np.append(A2h,A2hinter)
Ch = np.append(Ch,Chinter)
#Reshape the long string of appended Matrix, such that
#rows: x-points; colums: i´th basis spline
LayerMat = LayerMat[1:,:,:]
A0h = np.reshape(A0h, (-1,num_of_points))
A1h = np.reshape(A1h, (-1,num_of_points))
A2h = np.reshape(A2h, (-1,num_of_points))
Ch = np.reshape(Ch,(-1,num_of_points))
#Ch => More points in x, but same number of basis splines
#Clearing the interface points, to not double count
N = num_of_points
plp = plt_points
interfaces = np.shape(x)[0]-1
sizeA = np.shape(x)[0]*N-interfaces
sizeCb = np.shape(x)[0]*plp-interfaces
Abig = np.zeros([sizeA,sizeA])
A1b = np.zeros([sizeA,sizeA])
A2b = np.zeros([sizeA,sizeA])
Cb = np.zeros([sizeCb,sizeA])
#Clearing the double counts from the space grid
xflat = x.flatten()
x_plt_flat = x_plt.flatten()
#index of double counts
doublec = np.array([np.arange(1,len(length)-1)])*N
doublec_plt = np.array([np.arange(1,len(length)-1)])*plp
xflat = np.delete(xflat,doublec)
x_plt_flat = np.delete(x_plt_flat,doublec_plt)
#Filling the big matrices.
startA = 0; endA = N-1
startC = 0; endC = plp-1
for i in range(0,interfaces+1):
Abig[startA:endA,startA:endA+1] = A0h[startA+i:endA+i,:]
A1b[startA:endA+1,startA:endA+1] = A1h[startA+i:endA+i+1,:]
A2b[startA:endA+1,startA:endA+1] = A2h[startA+i:endA+i+1,:]
Cb[startC:endC+1,startA:endA+1] = Ch[startC+i:endC+i+1,:]
startA += N-1; endA += N-1
startC += plp-1; endC += plp-1
#Create A00 with no interface condition to correctly compute phi in loop
#The copy needs to be done befor interface conditions are applied in Abig
A00 = Abig.copy()
A00[-1,-1] = 1;
#Here we make init, conductivity & capacity all functions, in case they are
# given as integeres or floats. Also thorw warinings if not every layer has a
# conducitvity or capacity ============================================
#Making init a function, in case it is given as a scalar
if np.size(self.init) == 1 and isinstance(self.init,(int,float)):
dummy = self.init
self.init = lambda x: dummy + 0*x
if len(length) > 2: #multilayer case
if len(length)-1 !=( len(self.heatCapacity) & len(self.conductivity) ):
print('--------------------------------------------------------')
print('The number of different layers must match the number of number of' \
'inputs for Conductivity, heatCapacity, rho.')
print('--------------------------------------------------------')
if np.size(self.conductivity) is not interfaces+1:
print('--------------------------------------------------------')
print('Not every Layer has been given a conductivity function' \
'Adjust your input of the conductivity functions with respect to the layers.')
print('--------------------------------------------------------')
if np.size(self.heatCapacity) is not interfaces+1:
print('--------------------------------------------------------')
print('Not every Layer has been given a heatCapacity function value.'\
'Adjust your input of the heatCapacity functions with respect to the layers.')
print('--------------------------------------------------------')
#Make Functions in case heat capacity/conductivity are given as variables
if (all(self.conductivity) or all(self.heatCapacity) or all(self.init)) == False:
print('No heatCapacity, conductivity or initial function given.')
print('--------------------------------------------------------')
#make the conductivity always a function
if len(length) >2 or np.size(self.conductivity)>=2:
for j in list(range (0,np.size(self.conductivity))):
if isinstance(self.conductivity[j],(int,float,list)) :
dummy3 = self.conductivity[j]
self.conductivity[j] = (lambda b: lambda a: b+0*a)(dummy3)
#make the conductivity always a function
for j in list(range (0,np.size(self.heatCapacity))):
if isinstance(self.heatCapacity[j],(int, float,list)) :
dummy4 = self.heatCapacity[j]
self.heatCapacity[j] = (lambda b: lambda a: b+0*a)(dummy4)
else :
if isinstance(self.conductivity[0],(int,float)):
dummy1 = self.conductivity
self.conductivity = [lambda phi: dummy1 + 0*phi]
if isinstance(self.heatCapacity[0],(int,float)):
dummy2 = self.heatCapacity
self.heatCapacity = lambda phi: dummy2 + 0*phi
self.heatCapacity = [self.heatCapacity]
#End of function creation for init(x), conductivity[l](phi), heatCapacity[l](phi)
# with respect to every layer 'l' =====================================
def interconditions(phi,interfaces):
N = num_of_points
end_i = N-1
intercondiL = np.zeros((interfaces,N))
intercondiR = np.zeros((interfaces,N))
for i in range(interfaces):
intercondiL[i] = self.conductivity[i](phi[end_i])*A1h[end_i+i]
intercondiR[i] = self.conductivity[i+1](phi[end_i])*A1h[end_i+i+1]
end_i += N-1
return(intercondiL,intercondiR)
#Initial Electron temperature
initphi = self.init(xflat)
initphi_large = self.init(x_plt_flat)
intercon = interconditions(initphi,interfaces)
#filling up Abig wiht the interface condition in the middle of the grid
start_i = 0; end_i = N-1
for i in range(0,interfaces):
Abig[end_i,start_i:end_i] = intercon[0][i][:-1]#Lhs interface flow
Abig[end_i,end_i+1:end_i+N] = -intercon[1][i][1:]#Rhs interface flow
Abig[end_i,end_i] = intercon[0][i][-1] -intercon[1][i][0]
start_i += N-1; end_i += N-1
Abig[-1,-1] = 1 #to correct Cox algorithm
#Now Matrix Abig is completed and interface condition is applied.
#Treating 2 types of boundary conditions: 0=> Dirichlet; 1=> Neumann,
# where 0´th and -1´th row need to be first order derivatives for flux.
neumannBL = A1b[0].copy();
neumannBR = A1b[-1].copy();
if self.Left_BC_Type == 1: Abig[0] = -neumannBL
if self.Right_BC_Type == 1: Abig[-1] = neumannBR
#Clear for BC! (first and last row need to be cleared to correctly apply BC)
A1b[0] = 0; A2b[0] = 0;
A1b[-1] = 0; A2b[-1] = 0;
#Get inital c coefficients for splines using init (=phi_init)
c = np.dot(np.linalg.inv(A00),self.init(xflat))
#Passed on properties to the simulation class
return(c,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h)
def addLayer(self,L,refind,conductivity,heatCapacity,rho):
"""
Add parameters of every layer:
(length,conductivity[electron,lattice,spin],heatCapacity[electron, lattice, spin],density, coupling[E-L,L-S,S-E])
The units in SI are:
[length] = m
[n] = complex refractive index
[conductivity] = W/(mK)
[heatCapacity] = J/(m^3K^2)
[density] = kg/m^3
[Coupling] = W/(m^3K)
"""
self.length = np.append(self.length,self.length[-1]+L)
#Squeez in the refracitve index between two layers of air: air|...|...|air
self.n = np.concatenate((self.n[:-1],[refind],[self.n[-1]]))
self.conductivity.append(conductivity)
self.heatCapacity.append(heatCapacity)
self.rho = np.append(self.rho,rho)
#==============================================================================
class simulation(object):
def __init__(self,num_of_temp,source):
self.temp_data = temperature() #import the temperatuer object
self.num_of_temp = num_of_temp #1 if only electron temp. 2 if electron and lattice temp.
self.start_time = 0 #starting time (can be negative)
self.final_time = 10 #time when simulation stops
self.time_step = [] #can either be given or is automatically calculated in stability
self.left_BC = 0 #function or constant what the boundary condition
self.right_BC = 0 #on the left or right side of the problem is.
self.stability_lim = [270,3000]
self.temp_data_Lat = [] #Default case is without lattice temperature
self.temp_data_Spin = []
if num_of_temp >= 2: #if Lattice temp is considered
self.temp_data_Lat = temperature() #in case also a lattice module is given
self.coupling = [] #Coupling between Electron and Lattice system
self.left_BC_L = 0 #Setting the default to zero flux
self.right_BC_L = 0 #The BC type is indicated in the temperature class
if num_of_temp == 3: #In case spin coupling is also considered
self.temp_data_Spin = temperature()
self.coupling_LS = [] #Coupling between Lattice and Spin system
self.coupling_SE = [] #Coupling between Electron and Spin system
self.left_BC_S = 0 #Default zero flux Neumann boundary conditions
self.right_BC_S = 0 #On both sides
self.source = source #object source can be passed on
#to depict the properties of the object
def getProperties(self):
for i in (self.__dict__):
name = str(i)
value = str(self.__dict__[i])
print('{:<20}{:>10}'.format( name,value ))
def __repr__(self):
return('Simulation')
def changeInit(self,system,function):
"""
Change the initial condition of every system.
.changeInit(system,function) has 2 input arguments
system --> string "electron" or "lattice" or "spin"
function --> a function handle or a number defining the value of the
system at t=0 over the entire domain x.
"""
if (system == "electron") or (system == "Electron") or (system == 1):
self.temp_data.init = function
if (system == "lattice") or (system == "Lattice") or (system == 2):
self.temp_data_Lat.init = function
if (system == "spin") or (system == "Spin") or (system == 3):
self.temp_data_Spin = function
def changeBC_Type(self,system,side,BCType):
"""
Function to change the type of the boundary condition on the left and
right side of the material, for every system, "electron", "lattice", "spin"
respectively.
.changeBC_Type(system,side,BCType) has 3 inputs, all of them are strings.
system --> "electron" or "lattice" or "spin". Altenatively: "1", "2", "3"
side --> "left" or "right"
BCType --> "dirichlet" fixing the value/ "neumann" fixing the flux.
"""
if (system == "electron") or (system == "Electron") or (system == 1):
if side == "left":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data.Left_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data.Left_BC_Type = 1
if side == "right":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data.Right_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data.Right_BC_Type = 1
if (system == "lattice") or (system == "Lattice") or (system == 2):
if side == "left":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Lat.Left_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Lat.Left_BC_Type = 1
if side == "right":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Lat.Right_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Lat.Right_BC_Type = 1
if (system == "spin") or (system == "Spin") or (system == 3):
print("Line 326 Spinsystem")
if side == "left":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Spin.Left_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Spin.Left_BC_Type = 1
if side == "right":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Spin.Right_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Spin.Right_BC_Type = 1
def changeBC_Value(self,system,side,function):
"""
Function to change the value of the boundary condition on the left and
right side of the material, for every system, "electron", "lattice", "spin"
respectively.
.changeBC_Value(system,side,function) the first two are strings,
the last one is a function handle or a number.
system --> "electron" or "lattice" or "spin"| Altenatively: "1", "2", "3"
side --> "left" or "right"
function--> function or number fixing the value on the boundaries for all times.
"""
if (system == "electron") or (system == "Electron") or (system == 1):
if side == "left":
self.left_BC = function
if side == "right":
self.right_BC = function
if (system == "lattice") or (system == "Lattice") or (system == 2):
if side == "left":
self.left_BC_L = function
if side == "right":
self.right_BC_L = function
if (system == "spin") or (system == "Spin") or (system == 3):
if side == "left":
self.left_BC_S = function
if side == "right":
self.right_BC_S = function
def addSubstrate(self,name = "silicon"):
"""
Automatically create in the silicon substrate using input
parameters, mostly taken from:
Contribution of the electron-phonon interaction
to Lindhard energy partition at low energy in Ge and Si
detectors for astroparticle physics applications, by
<NAME> and <NAME>
Note: Refractive index for 400 nm light!
"""
if (name == "Silicon") or (name =="silicon") or (name =="Si"):
k_el_Si = 130#W/(m*K);
k_lat_Si = lambda T: np.piecewise(T,[T<=120.7,T>120.7],\
[lambda T: 100*(0.09*T**3*(0.016*np.exp(-0.05*T)+np.exp(-0.14*T))),
lambda T: 100*(13*1e3*T**(-1.6))])
rho_Si = 2.32e3#kg/(m**3)
C_el_Si = lambda Te: 150/rho_Si *Te
C_lat_Si = 1.6e6/rho_Si
G_Si = 1e17*18#W/(m**3*K)
#Set three layers of Silicon after each other.
#The space resolution on the Film|Substrate edge is high
#and decreases as one moves in bulk direction
if self.num_of_temp == 2:#Lattice only in the 2T
self.temp_data_Lat.addLayer(20e-9,5.5674+0.38612j,k_lat_Si,C_lat_Si,rho_Si)
self.coupling = np.append(self.coupling,G_Si)
self.temp_data_Lat.addLayer(100e-9,5.5674+0.38612j,k_lat_Si,C_lat_Si,rho_Si)
self.coupling = np.append(self.coupling,G_Si)
self.temp_data_Lat.addLayer(100000e-9,5.5674+0.38612j,k_lat_Si,C_lat_Si,rho_Si)
self.coupling = np.append(self.coupling,G_Si)
#In the 1 and 2 temperature case electron always gets appended
self.temp_data.addLayer(20e-9,5.5674+0.38612j,k_el_Si,C_el_Si,rho_Si)
self.temp_data.addLayer(100e-9,5.5674+0.38612j,k_el_Si,C_el_Si,rho_Si)
self.temp_data.addLayer(100000e-9,5.5674+0.38612j,k_el_Si,C_el_Si,rho_Si)
def addLayer(self,L,n,conductivity,heatCapacity,rho,coupling=0,*args):
"""
Add parameters of every layer:
(length,conductivity[electron,lattice,spin],heatCapacity[electron, lattice, spin],density, coupling[E-L,L-S,S-E])
The units in SI are:
[length] = m
[n] = complex refractive index
[conductivity] = W/(mK)
[heatCapacity] = J/(m^3K^2)
[density] = kg/m^3
[Coupling] = W/(m^3K)
"""
#check all input arguments and make them to lists, for the multi layer case
#make list when given as int or float
typecheck = np.array([])
if type(conductivity) is not (list or type(typecheck)):
conductivity = [conductivity]
if type(heatCapacity) is not (list or type(typecheck)):
heatCapacity = [heatCapacity]
#do typecheck only for the lattice system in the 2TM-case
if self.num_of_temp == 2:
if (np.size(conductivity) or np.size(heatCapacity))<2:
print('Lattice parameters are missing.\n Add parameters for Lattice system.')
return(128)
self.temp_data_Lat.addLayer(L,n,conductivity[1],heatCapacity[1],rho)
#Only electron spin coupling is under consideration
self.coupling = np.append(self.coupling,coupling)
#do typecheck for the Lattice and the Spin system
if self.num_of_temp == 3:
if (np.size(conductivity) or np.size(heatCapacity) or np.size(coupling))<3:
print('Input parameters are missing.\n Add parameters for '\
'conductivity/heatCapacity or coupling for Lattice/Spin system.')
return(128)
self.temp_data_Lat.addLayer(L,n,conductivity[1],heatCapacity[1],rho)
self.temp_data_Spin.addLayer(L,n,conductivity[2],heatCapacity[2],rho)
#In the 3Tm case the coupling input arg is a vector of len 3. Unwrap them:
self.coupling = np.append(self.coupling,coupling[0])
self.coupling_LS = np.append(self.coupling_LS,coupling[1])
self.coupling_SE = np.append(self.coupling_SE,coupling[2])
#For the electronic system always add the parameters!
self.temp_data.addLayer(L,n,conductivity[0],heatCapacity[0],rho)
def interconditions(self,phi,interfaces,conductivity,N,A1h):
"""
A function which gives back an array where the intereface condition is returned
for the left and right side of the interface. Gets called in the E.E.-loop.
"""
end_i = N-1
intercondiL = np.zeros((interfaces,N))
intercondiR = np.zeros((interfaces,N))
for i in range(interfaces):
intercondiL[i] = conductivity[i](phi[end_i])*A1h[end_i+i]
intercondiR[i] = conductivity[i+1](phi[end_i])*A1h[end_i+i+1]
end_i += N-1
return(intercondiL,intercondiR)
def sourceprofile(self,absorptionprofile,timeprofile,xflat,x0,t,N):
#Consider Lambert Beers law in space and different types in time
if (absorptionprofile == "LB") and (self.source.fluence is not 0):
optical_penetration_depth = self.source.ref2delta(self.temp_data.n,self.source.lambda_vac)
if (timeprofile == "Gaussian"):
print('-----------------------------------------------------------')
print('Lambert Beer´s absorption law and a Gaussian time profile is applied as source.')
print('-----------------------------------------------------------')
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian)
if (timeprofile == "repGaussian") or (timeprofile == "RepGaussian"):
print('-----------------------------------------------------------')
print('Lambert Beer absorption profile and a repeated Gaussian time profile is taken into account for the source.'\
'The frequency of the pulse repetition has to be indicated via s.frequency = number (in 1/seconds).')
print('-----------------------------------------------------------')
self.source.multipulse = True
xmg, tmg = np.meshgrid(xflat,t)
if (self.source.frequency is not False):
time_range = tmg[-1,-1]-self.source.t0
pulses = int(round(time_range * self.source.frequency))
#Add up Gaussian pulses with different t0, according to the frequency given
#from t0 onwards, until the end of the time grid
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 + i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian,customtime)
if(self.source.frequency is not False) and (self.source.num_of_pulses is not False):
#Creating a certain number of pulses according to self.num_of_pulses
time_range = tmg[-1,-1]-self.source.t0
pulses = self.source.num_of_pulses
#If num_of_pulses is bigger too big to fit in the timerange [t0,t_end] throw warning
if (pulses > int(round(time_range * self.source.frequency))):
pulses = int(round(time_range * self.source.frequency))
print('Number of pulses is too big to fit in the timerange under consideration. \n'\
'Adjust t_end or consider a smaller number of pulses.')
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 +i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian,customtime)
if(self.source.frequency is False) and (self.source.num_of_pulses is False):
print('-----------------------------------------------------------')
print('Assign the propertiy s.frequncy, to consider a certain pulse frequency.\n'\
'If only a certain number of pulses should be considered, assign the value s.num_of_pulses = integer.')
print('-----------------------------------------------------------')
if (timeprofile == "custom") or (timeprofile == "Custom"):
[ttime,amplitude] = self.source.loadData
#To extract the custom time profile and the scaling factor
[sourcemat,customtime,scaling] = self.source.custom(t,xflat,ttime,amplitude,optical_penetration_depth[0])
#To get the space profile: Source with different optical penetration depth defined on the xflat gird
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian,customtime,scaling)
#Consider Transfer Matrix in space and different types in time
if (absorptionprofile == "TMM") and (self.source.fluence is not 0):
"""
This will implement a transfer matrix approach to local absorption
instead as using the Lambert Beer´s law considered in the Gaussian
source type.
"""
#Multiplying with 1e9, since the absorption()-function. In the source module only works if length is in units of nm!
x0m = x0*1e9#converte the lentgh into nm
if len(x0) is not (len(self.temp_data.n)-1):
print('-----------------------------------------------------------')
print('Number of considered layers does not match with given refractive indices.\n'\
'in ´temperature.n(Air|Film layer1|Film layer2|...|Air)´ anly consider the film layers. \n'\
'The refractive index of the substrate gets added automatically later when \n'\
'`simulation.addSubstrate(\'name\')` gets called.')
print('-----------------------------------------------------------')
if (timeprofile == "Gaussian"):
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m)
print('-----------------------------------------------------------')
print('Transfer matrix absorption profile and a Gaussian time profile is taken into account for the source.\n'\
'Length of every layer has to be given in units of meter.')
print('-----------------------------------------------------------')
if (timeprofile == "custom") or (timeprofile == "Custom"):
print('-----------------------------------------------------------')
print('Transfer matrix absorption profile of and a custom time profile is taken into account for the source.\n'\
'Length of every layer has to be given in units of meter.')
print('-----------------------------------------------------------')
if self.source.loadData is False:
print('-----------------------------------------------------------')
print('Import an array, containing the data of the custom pulse.'\
'arr[0,:] = time; arr[1,:] = amplitude')
print('-----------------------------------------------------------')
[ttime,amplitude] = self.source.loadData
lam = 1#Lamda does not matter here since the spacial absorption is calculated via TMM
[sourceM,customtime,scaling] = self.source.custom(t,xflat,ttime,amplitude,lam)
#The cfeateTMM(xgrid,timegrid,length,*args) has customtime as an optional argument
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m,customtime,scaling)
if (timeprofile == "RepGaussian") or (timeprofile== "repGaussian"):
print('-----------------------------------------------------------')
print('Transfer matrix absorption profile and a repeated Gaussian time profile is taken into account for the source.'\
'Length of every layer has to be given in units of meter.')
print('-----------------------------------------------------------')
self.source.multipulse = True
xmg, tmg = np.meshgrid(xflat,t)
if (self.source.frequency is not False):
time_range = tmg[-1,-1]-self.source.t0
pulses = int(round(time_range * self.source.frequency))
#Add up Gaussian pulses with different t0, according to the frequency given
#from t0 onwards, until the end of the time grid
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 + i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m,customtime)
if(self.source.frequency is not False) and (self.source.num_of_pulses is not False):
#Creating a certain number of pulses according to self.num_of_pulses
time_range = tmg[-1,-1]-self.source.t0
pulses = self.source.num_of_pulses
#If num_of_pulses is bigger too big to fit in the timerange [t0,t_end] throw warning
if (pulses > int(round(time_range * self.source.frequency))):
pulses = int(round(time_range * self.source.frequency))
print('Number of pulses is too big to fit in the timerange under consideration. \n'\
'Adjust t_end or consider a smaller number of pulses.')
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 +i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m,customtime)
if(self.source.frequency is False) and (self.source.num_of_pulses is False):
print('-----------------------------------------------------------')
print('Assign the propertiy s.frequncy, to consider a certain pulse frequency.\n'\
'If only a certain number of pulses should be considered, assign the value s.num_of_pulses = integer.')
print('-----------------------------------------------------------')
return(sourceM)
# This is the main Explicit Euler loop where the solution to T(x,t) is calculated.
def run(self):
idealtimestep = self.stability()
if not self.time_step:
self.time_step = idealtimestep
print('-----------------------------------------------------------')
print(' No specific time constant has been indicated. \n '\
'The stability region has been calculated and an appropriate timestep has been chosen.\n '\
'Timestep = {idealtimestep:.2e} s'.format(idealtimestep=idealtimestep))
print('-----------------------------------------------------------')
if (self.time_step-idealtimestep)/idealtimestep > 0.1:
print('-----------------------------------------------------------')
print('The manually chosen time step of {time_step:.2e} is eventually too big and could cause instabilities in the simulation.\n '\
'We suggest a timestep of {idealtimestep:.2e} s'.format(time_step=self.time_step,idealtimestep=idealtimestep))
print('-----------------------------------------------------------')
if(self.time_step-idealtimestep)/idealtimestep < -0.2:
print('-----------------------------------------------------------')
print('The maunually chosen time step of {time_step:.2e} is very small and will eventually cause a long simulation time.\n'\
'We suggest a timestep of {idealtimestep:.2e} s'.format(time_step=self.time_step,idealtimestep=idealtimestep))
print('-----------------------------------------------------------')
#loading simulation relevant properties from the structural tmeperature object
[c_E,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h] = self.temp_data.Msetup()
t = np.arange(self.start_time,self.final_time,self.time_step)
#only if the injection would make the time grid smaller, to not move into instable regime
if self.source.FWHM:
if (6*self.source.FWHM/200 < idealtimestep):
#inject 200 extra points around pulse to fully capture the shape of the pulse
tinj = np.linspace(self.source.t0 - 3*self.source.FWHM,self.source.t0 + 3*self.source.FWHM,200)
smaller = np.where(t<self.source.t0 - 3*self.source.FWHM)[0]
bigger = np.where(t>self.source.t0 + 3*self.source.FWHM)[0]
#new time grid with higher resolution
t = np.concatenate((t[smaller],tinj,t[bigger]),axis=0)
tstep = np.ones(len(t))
tstep[:-1] = np.diff(t); tstep[-1] = np.diff(t)[-1]
#If a more refined grid is choosen around t0. We inject a fine time grid around t0, to correctly capture the pulse shape
if self.source.adjusted_grid is not False:
if self.source.dt0 == False:
print('-----------------------------------------------------------')
print('The option for an adjusted grid is True, but no interval for a more refined grid has been given.'/
'Indicate dt0 (around which values the time grid should have higher resolution) in the source object')
print('-----------------------------------------------------------')
if 2*self.source.dt0/self.source.extra_points < idealtimestep:
print('-----------------------------------------------------------')
print('A refined Grid around t0 has been applied')
print('-----------------------------------------------------------')
tinj = np.linspace(self.source.t0-self.source.dt0,self.source.t0+self.source.dt0,self.source.extra_points)
smaller = np.where(t<self.source.t0 - self.source.dt0)[0]
bigger = np.where(t>self.source.t0 + self.source.dt0)[0]
#new time grid with higher resolution
t = np.concatenate((t[smaller],tinj,t[bigger]),axis=0)
tstep = np.ones(len(t))
tstep[:-1] = np.diff(t); tstep[-1] = np.diff(t)[-1]
else:
print('-----------------------------------------------------------')
print('No refined time grid is applied. The timestep is alerady very small.' \
'You can use the simulation class with the property self.time_step and '\
'assign it to a smaller value as the current time step.')
print('-----------------------------------------------------------')
#Initialize the systems and load the matrices
if self.temp_data_Lat:
if self.temp_data.plt_points is not self.temp_data_Lat.plt_points:
self.temp_data_Lat.plt_points = self.temp_data.plt_points
print('-----------------------------------------------------------')
print('The number of plotting points in the electron system \n'\
'is not the same as in the lattice system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
if self.temp_data.collocpts is not self.temp_data_Lat.collocpts:
self.temp_data_Lat.collocpts = self.temp_data.collocpts
print(self.temp_data_Lat.collocpts)
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the lattice system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
[c_L,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large_L,interfaces,LayerMat,A1h] = self.temp_data_Lat.Msetup()
if self.temp_data_Spin:
print("Line 728 Spinsystem")
if self.temp_data.plt_points is not self.temp_data_Spin.plt_points:
self.temp_data_Spin.plt_points = self.temp_data.plt_points
print('-----------------------------------------------------------')
print('The number of plotting points in the electron system \n'\
'is not the same as in the spin system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
if self.temp_data.collocpts is not self.temp_data_Spin.collocpts:
self.temp_data_Spin.collocpts = self.temp_data.collocpts
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the spin system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
[c_S,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large_S,interfaces,LayerMat,A1h] = self.temp_data_Spin.Msetup()
if (self.source.fluence == 0):
print('-----------------------------------------------------------')
print('No source is applied.\n'\
'source.fluence = 0')
print('-----------------------------------------------------------')
xmg, tmg = np.meshgrid(xflat,t)
sourceM = np.zeros_like(xmg)
else:
sourceM = self.sourceprofile(self.source.spaceprofile,self.source.timeprofile,xflat,self.temp_data.length,t,N)
#Making the boundary conditions a function of t, in case they are given as scalars
if isinstance(self.left_BC,(int,float)):
dummy = self.left_BC
self.left_BC = lambda t: dummy + 0*t
if isinstance(self.right_BC,(int,float)):
dummy1 = self.right_BC
self.right_BC = lambda t: dummy1 + 0*t
#Makint the boundary conditions a matrix for the electron case
BC_E = np.zeros((len(c_E),len(t)))
BC_E[0] = self.left_BC(t)
BC_E[-1] = self.right_BC(t)
#Checking the Lattice system boundary conditions
if self.temp_data_Lat:
if isinstance(self.left_BC_L,(int,float)):
dummy2 = self.left_BC_L
self.left_BC_L = lambda t: dummy2 + 0*t
if isinstance(self.right_BC_L,(int,float)):
dummy3 = self.right_BC_L
self.right_BC_L = lambda t: dummy3 + 0*t
#Makint the boundary conditions a matrix for the lattice case
BC_L = np.zeros((len(c_L),len(t)))
BC_L[0] = self.left_BC_L(t)
BC_L[-1] = self.right_BC_L(t)
#Checking the Spine system boundary conditions
#It impies that we at least consider 2 temperatures -> under this "if-tree"
if self.temp_data_Spin:
if isinstance(self.left_BC_S,(int,float)):
dummy4 = self.left_BC_S
self.left_BC_S = lambda t: dummy4 + 0*t
if isinstance(self.right_BC_S,(int,float)):
dummy5 = self.right_BC_S
self.right_BC_S = lambda t: dummy5 + 0*t
#Makint the boundary conditions a matrix for the Spin case
BC_S = np.zeros((len(c_S),len(t)))
BC_S[0] = self.left_BC_S(t)
BC_S[-1] = self.right_BC_S(t)
#Check if the Lattice/Spin and Spin/Electron coupling constants have the right size
if np.size(self.coupling_LS)<np.size(length)-1:
self.coupling_LS = self.coupling_LS*np.ones(np.size(self.temp_data.length)-1)
print('-----------------------------------------------------------')
print('Not every layer has a unique Lattice-Spin coupling constant \'G_LS \'.\n')\
('=> G_LS will be set to the value of the first layer = {coupling_LS[0]:.2e}\n for all other layers.'.format(coupling_LS=self.coupling_LS))
print('-----------------------------------------------------------')
if np.size(self.coupling_SE)<np.size(length)-1:
self.coupling_SE = self.coupling_SE*np.ones(np.size(self.temp_data.length)-1)
print('-----------------------------------------------------------')
print('Not every layer has a unique Spin-Electron coupling constant \'G_SE \'.\n')\
('=> G_SE will be set to the value of the first layer = {coupling_SE[0]:.2e}\n for all other layers.'.format(coupling_SE=self.coupling_SE))
print('-----------------------------------------------------------')
#If only the two temperature model is considered I only need to check one coupling constant
if np.size(self.coupling)<np.size(length)-1:
self.coupling = self.coupling*np.ones(np.size(self.temp_data.length)-1)
print('-----------------------------------------------------------')
print('Not every layer has a unique coupling constant \'G \'.\n')\
('=> G will be set to the value of the first layer = {coupling[0]:.2e}\n for all other layers.'.format(coupling=self.coupling))
print('-----------------------------------------------------------')
# The 3 Temperature Case is being considered
if self.temp_data_Spin:
#Setup arrays for electron temperature
phi_E = np.zeros((len(t),len(x_plt_flat))); phi_E[0] = initphi_large
Flow_1E = np.zeros(len(c_E))
Flow_2E = np.zeros(len(c_E))
dphi_E = np.zeros(len(c_E))
intphi_E = np.zeros(len(c_E))
#Setup arrays for lattice temperature
phi_L = np.zeros((len(t),len(x_plt_flat))); phi_L[0] = initphi_large_L #300*np.ones(len(phi_L[0]))
Flow_1L = np.zeros(len(c_L))
Flow_2L = np.zeros(len(c_L))
dphi_L = np.zeros(len(c_L))
intphi_L = np.zeros(len(c_L))
#Setup arrays for the spin temperature
phi_S = np.zeros((len(t),len(x_plt_flat))); phi_S[0] = initphi_large_S #300*np.ones(len(phi_L[0]))
Flow_1S = np.zeros(len(c_S))
Flow_2S = np.zeros(len(c_S))
dphi_S = np.zeros(len(c_S))
intphi_S = np.zeros(len(c_S))
#General setup for E.E. loop
condi = np.array([np.arange(1,len(length)-1)])*(N-1) #Index to apply interface condition
cnfill = np.array([np.arange(1,len(length)-1)])*(plp-1)#correct interface condition with real value for phi
A00[0] = 1; A00[-1] = 1 #Avoide devide through 0 in dphi_L! Clar for BC before intphi calc.
Abig_E = np.copy(Abig) #Since Abig can change due to interconditions we split it here
Abig_L = np.copy(Abig) #The interface conditions are applied on every time step
Abig_S = np.copy(Abig) #Every system gets individual matrix
start_EL = time.time()
for i in tqdm(range(1,len(t)),position = 0):
#Calculate Solution at every time step and respective derivatives
phi0_E = np.dot(A00,c_E); phi1_E = np.dot(A1b,c_E); phi2_E = np.dot(A2b,c_E)
phi0_L = np.dot(A00,c_L); phi1_L = np.dot(A1b,c_L); phi2_L = np.dot(A2b,c_L)
phi0_S = np.dot(A00,c_S); phi1_S = np.dot(A1b,c_S); phi2_S = np.dot(A2b,c_S)
#Calculating interface conditions which are applied later
intercon_E = self.interconditions(phi_E[i-1],interfaces,self.temp_data.conductivity,N,A1h)
intercon_L = self.interconditions(phi_L[i-1],interfaces,self.temp_data_Lat.conductivity,N,A1h)
intercon_S = self.interconditions(phi_S[i-1],interfaces,self.temp_data_Spin.conductivity,N,A1h)
startf = 0;endf = N-1
#Construct all picewise flows and piecewise dphi. Iterate over layers
for j in range(0,interfaces+1):
#electron: d/dx[k(phi) * d/dx(phi)]
Flow_1E[startf:endf] = self.temp_data.diff_conductivity(phi0_E[startf:endf],j)
Flow_2E[startf:endf] = self.temp_data.conductivity[j](phi0_E[startf:endf])
Flow_1E[startf:endf] *=phi1_E[startf:endf]**2
Flow_2E[startf:endf] *= phi2_E[startf:endf]
#lattice
Flow_1L[startf:endf] = self.temp_data_Lat.diff_conductivity(phi0_L[startf:endf],j)
Flow_2L[startf:endf] = self.temp_data_Lat.conductivity[j](phi0_L[startf:endf])
Flow_1L[startf:endf] *=phi1_L[startf:endf]**2
Flow_2L[startf:endf] *= phi2_L[startf:endf]
#Spin
Flow_1S[startf:endf] = self.temp_data_Spin.diff_conductivity(phi0_S[startf:endf],j)
Flow_2S[startf:endf] = self.temp_data_Spin.conductivity[j](phi0_S[startf:endf])
Flow_1S[startf:endf] *=phi1_S[startf:endf]**2
Flow_2S[startf:endf] *= phi2_S[startf:endf]
#calculate delta phi for electron, lattice and spin
#This is the core of the problem
dphi_E[startf:endf] = 1/(self.temp_data.heatCapacity[j](phi0_E)[startf:endf]*self.temp_data.rho[j])*\
(Flow_1E[startf:endf]+Flow_2E[startf:endf]+sourceM[i,startf:endf] +\
self.coupling[j]*(phi0_L[startf:endf]-phi0_E[startf:endf])+self.coupling_SE[j]*(phi0_S[startf:endf]-phi0_E[startf:endf]))
#Lattice time derivative
dphi_L[startf:endf] = 1/(self.temp_data_Lat.heatCapacity[j](phi0_L)[startf:endf]*self.temp_data_Lat.rho[j])*\
(Flow_1L[startf:endf]+Flow_2L[startf:endf] +\
self.coupling[j]*(phi0_E[startf:endf]-phi0_L[startf:endf])+self.coupling_LS[j]*(phi0_S[startf:endf]-phi0_L[startf:endf]))
#Spin system time derivative
dphi_S[startf:endf] = 1/(self.temp_data_Spin.heatCapacity[j](phi0_S)[startf:endf]*self.temp_data_Spin.rho[j])*\
(Flow_1S[startf:endf]+Flow_2S[startf:endf] +\
self.coupling_LS[j]*(phi0_L[startf:endf]-phi0_S[startf:endf])+self.coupling_SE[j]*(phi0_E[startf:endf]-phi0_S[startf:endf]))
startf += N-1; endf +=N-1 #Move one layer further
start_i = 0; end_i = N-1
#Apply interface conditions for all layers in every time step, i.e.:
#filling up Abig wiht the interface condition in the middle of the grid
for k in range(0,interfaces):
#for the electron system
Abig_E[end_i,start_i:end_i] = intercon_E[0][k][:-1]#Lhs interface flow
Abig_E[end_i,end_i+1:end_i+N] = -intercon_E[1][k][1:]#Rhs interface flow
Abig_E[end_i,end_i] = intercon_E[0][k][-1] -intercon_E[1][k][0]
#for the lattice system
Abig_L[end_i,start_i:end_i] = intercon_L[0][k][:-1]#Lhs interface flow
Abig_L[end_i,end_i+1:end_i+N] = -intercon_L[1][k][1:]#Rhs interface flow
Abig_L[end_i,end_i] = intercon_L[0][k][-1] -intercon_L[1][k][0]
#for the Spin system
Abig_S[end_i,start_i:end_i] = intercon_S[0][k][:-1]#Lhs interface flow
Abig_S[end_i,end_i+1:end_i+N] = -intercon_S[1][k][1:]#Rhs interface flow
Abig_S[end_i,end_i] = intercon_S[0][k][-1] -intercon_S[1][k][0]
start_i += N-1; end_i += N-1
#computing the flux for every time step at the boundaries
#If Neumann BC-> devide over k(T) since BC_Type = 1
#If Dirichlet BC -> devide over 1 since BC_Type = 0
Flux_E = BC_E[:,i]#Avoidint 0 in denominator
Flux_E[0] /= self.temp_data.conductivity[0](c_E[0])**self.temp_data.Left_BC_Type + 1e-12
Flux_E[-1] /= self.temp_data.conductivity[-1](c_E[-1])**self.temp_data.Right_BC_Type + 1e-12
Flux_L = BC_L[:,i]
Flux_L[0] /= self.temp_data_Lat.conductivity[0](c_L[0])**self.temp_data_Lat.Left_BC_Type + 1e-12
Flux_L[-1] /= self.temp_data_Lat.conductivity[-1](c_L[-1])**self.temp_data_Lat.Right_BC_Type + 1e-12
Flux_S = BC_S[:,i]
Flux_S[0] /= self.temp_data_Spin.conductivity[0](c_S[0])**self.temp_data_Spin.Left_BC_Type + 1e-12
Flux_S[-1] /= self.temp_data_Spin.conductivity[-1](c_S[-1])**self.temp_data_Spin.Right_BC_Type + 1e-12
#Clear for boundary conditions at the edgeds of the grid
dphi_E[0] = 0; dphi_E[-1] = 0;
phi0_E[0] = 0; phi0_E[-1] = 0;
dphi_L[0] = 0; dphi_L[-1] = 0;
phi0_L[0] = 0; phi0_L[-1] = 0;
dphi_S[0] = 0; dphi_S[-1] = 0;
phi0_S[0] = 0; phi0_S[-1] = 0;
#intermediate phi with low resolution in space according to explicit euler
intphi_E = phi0_E + tstep[i] * dphi_E + Flux_E
intphi_L = phi0_L + tstep[i] * dphi_L + Flux_L
intphi_S = phi0_S + tstep[i] * dphi_S + Flux_S
#Interface condition: Setting the rhs to 0, such that the heat transported (flux = Q = k*d/dx phi)
#from left is what comes out at the right hand side Q_1 -> Q_2
intphi_E[condi] = 0 #Interface condition: Q_1 -Q_2 = 0
intphi_L[condi] = 0
intphi_S[condi] = 0
#electron: use c to map on high resolution x-grid
#since in Abig, k(T(t)) is inserted we have to solve the system for every step
c_E = np.linalg.solve(Abig_E,intphi_E) # c(t) for every timestep
phi_E[i] = np.dot(Cb,c_E) # map spline coefficients to fine Cb grid
phi_E[i,cnfill] = c_E[condi] #correct the values for phi at interface
#lattice
c_L = np.linalg.solve(Abig_L,intphi_L)
phi_L[i] = np.dot(Cb,c_L)
phi_L[i,cnfill] = c_L[condi]
#spin
c_S = np.linalg.solve(Abig_S,intphi_S)
phi_S[i] = np.dot(Cb,c_S)
phi_S[i,cnfill] = c_S[condi]
end_EL = time.time()
print('-----------------------------------------------------------')
print('Heat diffusion in a coupled electron-latticelspin system has been simulated')
print('Eleapsed time in E.E.- loop:', end_EL-start_EL)
print('-----------------------------------------------------------')
T = []
T.append(phi_E); T.append(phi_L); T.append(phi_S)
return(x_plt_flat,t,T)
#=======End 3 temp Case =================================
#The two temperature model is considered
if self.temp_data_Lat:
#Setup arrays for electron temperature
phi_E = np.zeros((len(t),len(x_plt_flat))); phi_E[0] = initphi_large
Flow_1E = np.zeros(len(c_E))
Flow_2E = np.zeros(len(c_E))
dphi_E = np.zeros(len(c_E))
intphi_E = np.zeros(len(c_E))
#Setup arrays for lattice temperature
phi_L = np.zeros((len(t),len(x_plt_flat))); phi_L[0] = initphi_large_L
Flow_1L = np.zeros(len(c_L))
Flow_2L = np.zeros(len(c_L))
dphi_L = np.zeros(len(c_L))
intphi_L = np.zeros(len(c_L))
#General setup for E.E. loop
condi = np.array([np.arange(1,len(length)-1)])*(N-1) #Index to apply interface condition
cnfill = np.array([np.arange(1,len(length)-1)])*(plp-1)#correct interface condition with real value for phi
A00[0] = 1; A00[-1] = 1 #Avoide devide through 0 in dphi_L! Clar for BC before intphi calc.
Abig_E = np.copy(Abig) #Since Abig can change due to interconditions we split it here
Abig_L = np.copy(Abig) #The interface conditions are applied on every time step
start_EL = time.time()
for i in tqdm(range(1,len(t)),position = 0):
#Calculate Solution at every time step and respective derivatives
phi0_E = np.dot(A00,c_E); phi1_E = np.dot(A1b,c_E); phi2_E = np.dot(A2b,c_E)
phi0_L = np.dot(A00,c_L); phi1_L = np.dot(A1b,c_L); phi2_L = np.dot(A2b,c_L)
#Calculating interface conditions which are applied later
intercon_E = self.interconditions(phi_E[i-1],interfaces,self.temp_data.conductivity,N,A1h)
intercon_L = self.interconditions(phi_L[i-1],interfaces,self.temp_data_Lat.conductivity,N,A1h)
startf = 0;endf = N-1
#Construct all picewise flows and piecewise dphi Iterate over layers
for j in range(0,interfaces+1):
#electron
Flow_1E[startf:endf] = self.temp_data.diff_conductivity(phi0_E[startf:endf],j)
Flow_2E[startf:endf] = self.temp_data.conductivity[j](phi0_E[startf:endf])
Flow_1E[startf:endf] *=phi1_E[startf:endf]**2
Flow_2E[startf:endf] *= phi2_E[startf:endf]
#lattice
Flow_1L[startf:endf] = self.temp_data_Lat.diff_conductivity(phi0_L[startf:endf],j)
Flow_2L[startf:endf] = self.temp_data_Lat.conductivity[j](phi0_L[startf:endf])
Flow_1L[startf:endf] *=phi1_L[startf:endf]**2
Flow_2L[startf:endf] *= phi2_L[startf:endf]
#calculate delta phi for electron and lattice
#This is the core of the problem
dphi_E[startf:endf] = 1/(self.temp_data.heatCapacity[j](phi0_E)[startf:endf]*self.temp_data.rho[j])*\
(Flow_1E[startf:endf]+Flow_2E[startf:endf]+sourceM[i,startf:endf] + self.coupling[j]*(phi0_L[startf:endf]-phi0_E[startf:endf]))
dphi_L[startf:endf] = 1/(self.temp_data_Lat.heatCapacity[j](phi0_L)[startf:endf]*self.temp_data_Lat.rho[j])*\
(Flow_1L[startf:endf]+Flow_2L[startf:endf] + self.coupling[j]*(phi0_E[startf:endf]-phi0_L[startf:endf]))
startf += N-1; endf +=N-1
#filling up Abig wiht the interface condition in the middle of the grid
start_i = 0; end_i = N-1
for k in range(0,interfaces): #Apply interface conditions for all layers in every time step
#for the electron system
Abig_E[end_i,start_i:end_i] = intercon_E[0][k][:-1]#Lhs interface flow
Abig_E[end_i,end_i+1:end_i+N] = -intercon_E[1][k][1:]#Rhs interface flow
Abig_E[end_i,end_i] = intercon_E[0][k][-1] -intercon_E[1][k][0]
#for the lattice system
Abig_L[end_i,start_i:end_i] = intercon_L[0][k][:-1]#Lhs interface flow
Abig_L[end_i,end_i+1:end_i+N] = -intercon_L[1][k][1:]#Rhs interface flow
Abig_L[end_i,end_i] = intercon_L[0][k][-1] -intercon_L[1][k][0]
start_i += N-1; end_i += N-1
#computing the flux for every time step for the boundaries
Flux_E = BC_E[:,i]
Flux_E[0] /= self.temp_data.conductivity[0](c_E[0])**self.temp_data.Left_BC_Type + 1e-12
Flux_E[-1] /= self.temp_data.conductivity[-1](c_E[-1])**self.temp_data.Right_BC_Type + 1e-12
Flux_L = BC_L[:,i]
Flux_L[0] /= self.temp_data_Lat.conductivity[0](c_L[0])**self.temp_data_Lat.Left_BC_Type + 1e-12
Flux_L[-1] /= self.temp_data_Lat.conductivity[-1](c_L[-1])**self.temp_data_Lat.Right_BC_Type + 1e-12
#Clear for boundary conditions at the edgeds of the grid
dphi_E[0] = 0; dphi_E[-1] = 0; dphi_L[0] = 0; dphi_L[-1] = 0
phi0_E[0] = 0; phi0_E[-1] = 0; phi0_L[0] = 0; phi0_L[-1] = 0;
#intermediate phi with low resolution in space according to explicit euler
intphi_E = phi0_E + tstep[i] * dphi_E + Flux_E
intphi_E[condi] = 0
intphi_L = phi0_L + tstep[i] * dphi_L + Flux_L
intphi_L[condi] = 0 #Interface condition: Q_1 -Q_2 = 0
#electron: use c to map on high resolution x-grid
#since in Abig, k(T(t)) is inserted we have to solve the system for every step
c_E = np.linalg.solve(Abig_E,intphi_E) # c(t) for every timestep
phi_E[i] = np.dot(Cb,c_E) # map spline coefficients to fine Cb grid
phi_E[i,cnfill] = c_E[condi] #correct the values for phi at interface
#lattice
c_L = np.linalg.solve(Abig_L,intphi_L)
phi_L[i] = np.dot(Cb,c_L)
phi_L[i,cnfill] = c_L[condi]
end_EL = time.time()
print('-----------------------------------------------------------')
print('Heat diffusion in a coupled electron-lattice system has been simulated')
print('Eleapsed time in E.E.- loop:', end_EL-start_EL)
print('-----------------------------------------------------------')
T = []
T.append(phi_E); T.append(phi_L)
return(x_plt_flat,t,T)
#=============End 2Temp Case ========================
else: #this is the single temperature case. (Only electron temperature)
#prepare space to store phi solution on fine plt grid. And Flow_1,2 vectors
phi = np.zeros((len(t),len(x_plt_flat))); phi[0] = initphi_large
Flow_1 = np.zeros(len(c_E))
Flow_2 = np.zeros(len(c_E))
dphi = np.zeros(len(c_E))
intphi = np.zeros(len(c_E))
condi = np.array([np.arange(1,len(length)-1)])*(N-1) #Index to apply interface condition
cnfill = np.array([np.arange(1,len(length)-1)])*(plp-1) #correct interface condition with real value for phi
A00[0] = 1; A00[-1] = 1 #Avoid 1/0 division in dphi calculation. See E.E. loop
startE = time.time()
for i in tqdm(range(1,len(t)),position = 0):
phi0 = np.dot(A00,c_E); phi1 = np.dot(A1b,c_E); phi2 = np.dot(A2b,c_E)
intercon_E = self.interconditions(phi[i-1],interfaces,self.temp_data.conductivity,N,A1h) #get interface conditions for every time step
startf = 0;endf = N-1
#construct all picewise flows and piecewise dphi
for j in range(0,interfaces+1):
Flow_1[startf:endf] = self.temp_data.diff_conductivity(phi0[startf:endf],j)
Flow_2[startf:endf] = self.temp_data.conductivity[j](phi0[startf:endf])
Flow_1[startf:endf] *=phi1[startf:endf]**2
Flow_2[startf:endf] *= phi2[startf:endf]
dphi[startf:endf] = 1/(self.temp_data.heatCapacity[j](phi0)[startf:endf]*self.temp_data.rho[j])*\
(Flow_1[startf:endf]+Flow_2[startf:endf]+sourceM[i,startf:endf])
startf += N-1; endf +=N-1
#filling up Abig wiht the interface condition in the middle of the grid
start_i = 0; end_i = N-1
for k in range(0,interfaces): #Apply interface conditions for all layers in every time step
Abig[end_i,start_i:end_i] = intercon_E[0][k][:-1]#Lhs interface flow
Abig[end_i,end_i+1:end_i+N] = -intercon_E[1][k][1:]#Rhs interface flow
Abig[end_i,end_i] = intercon_E[0][k][-1] -intercon_E[1][k][0]
start_i += N-1; end_i += N-1
#computing the flux for every time step for the boundaries
Flux_E = BC_E[:,i]
Flux_E[0] /= self.temp_data.conductivity[0](c_E[0])**self.temp_data.Left_BC_Type + 1e-12
Flux_E[-1] /= self.temp_data.conductivity[-1](c_E[-1])**self.temp_data.Right_BC_Type + 1e-12
#Make space for BC
dphi[0] = 0; dphi[-1] = 0
phi0[0] = 0; phi0[-1] = 0
intphi = phi0 + tstep[i] * dphi + Flux_E
intphi[condi] = 0 #Interface condition: Q_1 -Q_2 = 0
# c(t) for every timestep
#since in Abig k(T(t)) is inserted we have to solve the system for every step
c_E = np.linalg.solve(Abig,intphi) #this system has to be solved in every time step
phi[i] = np.dot(Cb,c_E) # map spline coefficients to fine Cb grid
phi[i,cnfill] = c_E[condi] #correct the values for phi at interface
endE = time.time()
print('-----------------------------------------------------------')
print('Electron temperature heat diffusion has been simulated.')
print('Eleapsed time in E.E.- loop:', endE-startE)
print('-----------------------------------------------------------')
return(x_plt_flat,t,phi)
def stability(self):
"""
If only the electron temperature system is under consideration, we only
compute the eigenvalues of lambda_i = k/(C*rho)*A00^-1*A2b. This is
we consider the minimum Eigenvalue for each layer to represent the time konstant.
The time constant for E.E. is then given by -2/min(lambda_i), which is
the criterion for stability for E.E. loops, to obtain convergence.
"""
[c,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h] = self.temp_data.Msetup()
A00[0,0] = 1; A00[-1,-1] = 1
rho_E = self.temp_data.rho
conductivity_E = self.temp_data.conductivity
conductivity_E = np.asarray(conductivity_E)
typecheck = np.array([1])[0]
for i in range(0,len(conductivity_E)):
#In case conductivity is a function k(T) we compute a worst case scenario
#this is because we can only compare integers.
if not isinstance(conductivity_E[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
conductivity_E[i] = max(conductivity_E[i](testT))
heatCapacity_E = self.temp_data.heatCapacity
heatCapacity_E = np.asarray(heatCapacity_E)
for i in range(0,len(heatCapacity_E)):
#In case heatCapacity is a function C(T) we compute a worst case scenario
#and take an integer value to compare
if not isinstance(heatCapacity_E[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
heatCapacity_E[i] = min(heatCapacity_E[i](testT))
Eval = np.zeros(interfaces+1) #for each layer there will be an eigenvalue
koeff1 = conductivity_E/(heatCapacity_E*rho_E)
for i in range(0,interfaces+1):
Lambda = koeff1[i]*LayerMat[i]
Eval[i] = min(np.real(np.linalg.eig(Lambda)[0]))
tkonst_E = -1.9/Eval
if self.num_of_temp == 2:
"""
In the multy temperature case, we also consider the lattice dynamics,
with respective k_L/(C_L*rho_L) dynamics. In addition, we also have to
consider, the coupling between those two layers. I.e. G_mat.
with coefficients koeff_2 = G/(heatCapacity*rho)
Therefor we compute eigenvalues of the combined system:
lambda_i = eval(Lambda + G_mat) for each layer.
The time constant is again -2/min(lambda_i)
"""
if self.temp_data.collocpts is not self.temp_data_Lat.collocpts:
self.temp_data_Lat.collocpts = self.temp_data.collocpts
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the lattice system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
[c,A00_L,Abig,A1b,A2b_L,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h] = self.temp_data_Lat.Msetup()
A00_L[0,0] = 1; A00_L[-1,-1] = 1
rho_L = self.temp_data_Lat.rho
G = self.coupling
G = np.asarray(G)
conductivity_L = self.temp_data_Lat.conductivity
conductivity_L = np.asarray(conductivity_L)
#In case conductivity is a function k(T) we compute a worst case scenario
for i in range(0,len(conductivity_L)):
if not isinstance(conductivity_L[i],(int ,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
conductivity_L[i] = max(conductivity_L[i](testT))
heatCapacity_L = self.temp_data_Lat.heatCapacity
heatCapacity_L = np.asarray(heatCapacity_L)
#In case heatCapacity is a function C(T) we compute a worst case scenario
for i in range(0,len(heatCapacity_L)):
if not isinstance(heatCapacity_L[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
heatCapacity_L[i] = min(heatCapacity_L[i](testT))
#M: for every layer we load the respective matrix from the temperature class
M = np.shape(LayerMat)[1]
Lambda = np.zeros((2*M,2*M))
Eval = np.zeros(interfaces+1)
G_mat = np.zeros((2*M,2*M))
koeff1_E = conductivity_E/(heatCapacity_E*rho_E)
koeff1_L = conductivity_L/(heatCapacity_L*rho_L)
koeff2_E = G/(heatCapacity_E*rho_E)
koeff2_L = G/(heatCapacity_L*rho_L)
for i in range(0,interfaces+1):
Lambda[0:M,0:M] = koeff1_E[i]*LayerMat[i]
Lambda[M:,M:] = koeff1_L[i]*LayerMat[i]
G_mat[0:M,0:M] = -koeff2_E[i]*np.eye(M)
G_mat[0:M,M:] = koeff2_E[i]*np.eye(M)
G_mat[M:,0:M] = koeff2_L[i]*np.eye(M)
G_mat[M:,M:] = -koeff2_L[i]*np.eye(M)
Eval[i] = min(np.real(np.linalg.eig(Lambda+G_mat)[0]))
tkonst = -1.9/Eval
return(min(tkonst))
if self.num_of_temp == 3:
"""
Consider the case of a three temperature odel and follow the same
procedure as in the two TM case, except for now take all the coupling
constants int consideration!
"""
if self.temp_data.collocpts is not self.temp_data_Lat.collocpts:
self.temp_data_Lat.collocpts = self.temp_data.collocpts
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the lattice system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
if self.temp_data.collocpts is not self.temp_data_Spin.collocpts:
self.temp_data_Spin.collocpts = self.temp_data.collocpts
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the spin system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
[c,A00_L,Abig,A1b,A2b_L,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h] = self.temp_data_Lat.Msetup()
A00_L[0,0] = 1; A00_L[-1,-1] = 1
rho = self.temp_data_Lat.rho
#Load different coupling constants and make them arrays
G_EL = self.coupling; G_EL = np.asarray(G_EL)
G_LS = self.coupling_LS; G_LS = np.asarray(G_LS)
G_SE = self.coupling_SE; G_SE = np.asarray(G_SE)
conductivity_L = self.temp_data_Lat.conductivity
conductivity_L = np.asarray(conductivity_L)
conductivity_S = self.temp_data_Spin.conductivity
conductivity_S = np.asarray(conductivity_S)
heatCapacity_L = self.temp_data_Lat.heatCapacity
heatCapacity_L = np.asarray(heatCapacity_L)
heatCapacity_S = self.temp_data_Spin.heatCapacity
heatCapacity_S = np.asarray(heatCapacity_S)
#In case heatCapacity is a function C(T) we compute a worst case scenario
#That is we reduce the problem into a constant coefficient case
for i in range(0,len(conductivity_L)):
if not isinstance(conductivity_L[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
conductivity_L[i] = max(conductivity_L[i](testT))
for i in range(0,len(conductivity_S)):
if not isinstance(conductivity_S[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
conductivity_S[i] = max(conductivity_S[i](testT))
for i in range(0,len(heatCapacity_L)):
if not isinstance(heatCapacity_L[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
heatCapacity_L[i] = min(heatCapacity_L[i](testT))
for i in range(0,len(heatCapacity_S)):
if not isinstance(heatCapacity_S[i],(int,float,type(typecheck))):
testT = np.linspace(self.stability_lim[0],self.stability_lim[1],50)
heatCapacity_S[i] = min(heatCapacity_S[i](testT))
#construct Matrices for the Kronecker product
K11 = np.array([[1,0,0],[0,0,0],[0,0,0]])
K12 = np.array([[0,1,0],[0,0,0],[0,0,0]])
K13 = np.array([[0,0,1],[0,0,0],[0,0,0]])
K21 = np.array([[0,0,0],[1,0,0],[0,0,0]])
K22 = np.array([[0,0,0],[0,1,0],[0,0,0]])
K23 = np.array([[0,0,0],[0,0,1],[0,0,0]])
K31 = np.array([[0,0,0],[0,0,0],[1,0,0]])
K32 = np.array([[0,0,0],[0,0,0],[0,1,0]])
K33 = np.array([[0,0,0],[0,0,0],[0,0,1]])
#Unity matrix for kronecker product on the RHS and palce to store Eval
unity = np.eye(np.shape(LayerMat)[1])
Eval = np.zeros(interfaces+1)
#Compute the minimum eigenvalue for every layer
for i in range(0,interfaces+1):
coeff_E = conductivity_E[i]/(heatCapacity_E[i]*rho[i])
coeff_L = conductivity_L[i]/(heatCapacity_L[i]*rho[i])
coeff_S = conductivity_S[i]/(heatCapacity_S[i]*rho[i])
Lambda = np.kron(K11,coeff_E*LayerMat[i])
Lambda += np.kron(K22,coeff_L*LayerMat[i])
Lambda += np.kron(K33,coeff_S*LayerMat[i])
G_mat = np.kron(K11,-unity*coeff_E*(G_EL[i]+G_SE[i]))
G_mat += np.kron(K12,unity*coeff_E*G_EL[i])
G_mat += np.kron(K13,unity*coeff_E*G_SE[i])
G_mat += np.kron(K21,unity*coeff_L*G_EL[i])
G_mat += np.kron(K22,-unity*coeff_L*(G_EL[i]+G_LS[i]))
G_mat += np.kron(K23,unity*coeff_L*G_LS[i])
G_mat += np.kron(K31,unity*coeff_S*G_SE[i])
G_mat += np.kron(K32,unity*coeff_S*G_LS[i])
G_mat += np.kron(K33,-unity*coeff_S*(G_SE[i]+G_LS[i]))
#Compute the minimum eigenvalue for each layer
Eval[i] = min(np.real(np.linalg.eig(Lambda+G_mat)[0]))
tkonst = -1.9/Eval
#The global time constant will be guided by the fastest dynamics
#of all the layers!
return(min(tkonst))
else:
#if there is only electron temperature, only those dynamics will be
#considered, when time step for the E.E. loop is calculated.
return(min(tkonst_E))
class source(object):
def __init__(self):
self.spaceprofile = 'TMM'
self.timeprofile = 'Gaussian'
self.fluence = 0
self.t0 = 0
self.FWHM = False
self.loadData = False
self.multipulse = False
self.frequency = False
self.num_of_pulses = False
self.adjusted_grid = False
self.dt0 = False
self.extra_points = 200
self.theta_in = 0# 0 is perpendicular to surface/ pi/2 is grazing
self.lambda_vac = False
self.polarization = 'p'
def getProperties(self): # to depict the properties of the object
for i in (self.__dict__):
name = str(i)
value = str(self.__dict__[i])
print('{:<20}{:>10}'.format( name,value ))
def __repr__(self):
return('Source')
def ref2delta(self,refindex,lambdavac):
"""
Use the refractive index and compute the optical penetration depth
This is used for Lambert Beer´s absorption law.
"""
lambdavac_m = lambdavac*1e-9
#corp away the two layers of air and only consider target film layers
refindex = refindex[1:-1]
#If there is no imaginary part we avoid dividing over 0
for i in range(0,len(refindex)):
if np.imag(refindex[i]) == 0:
refindex[i] = refindex[i] + 1e-9j
deltap = (4*np.pi/lambdavac_m*np.imag(refindex))**(-1)
return(deltap)
def Gaussian(self,xmg,tmg,lam,A,sigma2,x0,customtime = None):
if not (self.fluence or self.FWHM):
print('------------------------------------------------------------')
print('Create a pulse with defining pulse properties. \n ' +\
'.fluence, .optical_penetration_depth, .FWHM')
print('------------------------------------------------------------')
if np.any(customtime) == None:
#Create a source with respect to each lam of every layer. Use the init_G_source function
Gauss = A*np.exp(-(tmg-self.t0)**2/(2*sigma2)) #Gaussian in time
else:
Gauss = A*customtime#custom in time
Gauss *= lam*np.exp(-lam*(xmg-x0))#space profile: LB decay
return(Gauss)
def init_G_source(self,xflat,x0,t,opt_pen,N,func,customtime = None,scaling = 0):
"""
First an empty array 'sourceM' is created.
Then we iterate over the different layers and call
func --> Gaussian.
This will create a 2D (time, space) Gaussian source grid
with different lam[i].
For each layer, the problem is receted, i.e. we have new
Amplitude, new scope of the x-grid, new lambda. Only sigma stays the same.
"""
lam = 1/opt_pen
#create space for solution of source in matrix form
xmg, tmg = np.meshgrid(xflat,t)
sourceM = np.zeros(np.shape(xmg))
if np.any(customtime) == None:
#Convert the input, fluence & FWHM given in 'source' class to Amplitude and sigma
sigma2 = self.FWHM**2/(2*np.log(2))
A = self.fluence/np.sqrt(2*np.pi*sigma2)
#loop over all layers and change lambda, Amplitude and scope of the x-grid
startL = 0; endL = N-1
for i in range(2,len(opt_pen)+2):
sourceM[:,startL:endL] = func(xmg[:,startL:endL],tmg[:,startL:endL],lam[i-2],A,sigma2,x0[i-2])
#at the end of each loop: the intensity of the end of previous layer
#will be the starting intensity of the next layer
A = A*np.exp(-(x0[i-1]-x0[i-2])*lam[i-2])
startL = endL; endL = i*N-i+1
else:#In the case of LB in space and custom in time
if (self.timeprofile== "RepGaussian") or (self.timeprofile=="repGaussian"):
sigma2 = self.FWHM**2/(2*np.log(2))
A = self.fluence/np.sqrt(2*np.pi*sigma2)
startL = 0; endL = N-1
for i in range(2,len(opt_pen)+2):
sourceM[:,startL:endL] = func(xmg[:,startL:endL],tmg[:,startL:endL],lam[i-2],A,sigma2,x0[i-2],customtime[:,startL:endL])
#at the end of each loop: the intensity of the end of previous layer
#will be the starting intensity of the next layer
A = A*np.exp(-(x0[i-1]-x0[i-2])*lam[i-2])
startL = endL; endL = i*N-i+1
if (self.timeprofile== "custom") or (self.timeprofile == "Custom"):
A = scaling#calculated in the custom() function
sigma2 = 0#It is not needed
startL = 0; endL = N-1
for i in range(2,len(opt_pen)+2):
sourceM[:,startL:endL] = func(xmg[:,startL:endL],tmg[:,startL:endL],lam[i-2],A,sigma2,x0[i-2],customtime[:,startL:endL])
#at the end of each loop: the intensity of the end of previous layer
#will be the starting intensity of the next layer
A = A*np.exp(-(x0[i-1]-x0[i-2])*lam[i-2])
startL = endL; endL = i*N-i+1
return(sourceM)
#mytime is the timegrid of the simulation
#time, amplitude are the timegrids of the inputdata collected from the lab
def custom(self,mytime,myspace,ttime,amplitude,opt_pen):
lam = 1/opt_pen
#Mapping the obtained data to the simulation time grid via interpolation
ampl1D = np.interp(mytime,ttime,amplitude**2)
#Compute the right amplitude. using the area under the curve
integr = np.trapz(ampl1D,mytime,np.diff(mytime))
#scling factore to get the amplitude right
scaling = self.fluence/integr
xmg,tmg = np.meshgrid(myspace,mytime)
ampltime= np.interp(tmg,ttime,amplitude**2)
#ampltime *= scaling
ampl2D = ampltime*lam*np.exp(-lam*xmg)
return(ampl2D,ampltime,scaling)
def fresnel(self,theta_in,n_in,n_out,pol):
n_in = complex(n_in); n_out = complex(n_out)
theta_out = np.arcsin(n_in*np.sin(theta_in)/n_out)
if pol == 's':
rs = (n_in*np.cos(theta_in) - n_out*np.cos(theta_out))/\
(n_in*np.cos(theta_in) + n_out*np.cos(theta_out))
ts = 2*n_in*np.cos(theta_in)/(n_in*np.cos(theta_in)+n_out*np.cos(theta_out))
return(theta_out,rs,ts)
if pol == 'p':
rp = (n_out*np.cos(theta_in)-n_in*np.cos(theta_out))/\
(n_out*np.cos(theta_in)+n_in*np.cos(theta_out))
tp = 2* n_in*np.cos(theta_in)/(n_out*np.cos(theta_in)+n_in*np.cos(theta_out))
return(theta_out,rp,tp)
def TM(self,theta_in,lambda0,n_vec,d_vec,pol):
#create complex arrays for variables
theta = np.zeros(len(n_vec), dtype = complex); theta[0] = theta_in
phi = np.zeros(len(n_vec),dtype = complex)
rn = np.zeros(len(n_vec)-1, dtype = complex)
tn = np.zeros_like(rn,dtype = complex)
M_n = np.zeros((len(n_vec),2,2), dtype = complex)
M = np.eye(2,dtype = complex)
for i in range(len(n_vec)-1): # to obtian all angels/rn/tn for each layer
[theta[i+1],rn[i],tn[i]] = self.fresnel(theta[i],n_vec[i],n_vec[i+1],pol)
#M = M0*M1*M2*M4*....
for k in range(1,len(n_vec)-1):#loop over all interfaces except 1st
phi[k] = 2*np.pi*n_vec[k]*np.cos(theta[k])*d_vec[k]/lambda0
Tn = np.array([[np.exp(-1j*phi[k]),0],[0,np.exp(1j*phi[k])]],dtype = complex)/tn[k]
Pn = np.array([[1,rn[k]],[rn[k],1]],dtype = complex)
M_n[k] = np.dot(Tn,Pn)
M = np.dot(M,M_n[k])
#compute for the first interface:
trans0 = np.array([[1,rn[0]],[rn[0],1]],dtype= complex)/tn[0]
M = np.dot(trans0,M)
#Complex transmission/reflection amplitude
t = 1/M[0,0]
r = M[1,0]/M[0,0]
#Fraction of power transmitted
if pol == 's': #s-polarized
T = np.abs(t)**2*np.real(n_vec[-1]*np.cos(theta[-1]))/\
np.real(n_vec[0]*np.cos(theta[0]))
elif pol == 'p': #p-polarized
T = np.abs(t)**2*np.real(n_vec[-1]*np.cos(np.conj(theta[-1])))/\
np.real(n_vec[0]*np.cos(np.conj(theta[0])))
#Fraction of power reflected
R = np.abs(r)**2
A = 1.-T-R
return(M,M_n,t,r,T,R,A,theta)
def layerAmpl(self,theta_in,lambda0,n_vec,d_vec,pol):
"""
After r & t have been calculated and all the respective matrices M_n
for each layer are known, we can go 'backwards', i.e. from the last to the
first layer, and compute all the amplituedes for the forward v_n and
backward w_n traveling wave. -> [v_n,w_n].T = M_n @ [v_{n+1},w_{n+1}].T
"""
[M,M_n,t,r,T,R,A,theta] = self.TM(theta_in,lambda0,n_vec,d_vec,pol)
vw_list = np.zeros((len(n_vec),2),dtype = complex)
vw =np.array([[t],[0]])
vw_list[-1,:] = vw.T
for i in range(len(n_vec)-2,0,-1):
vw = np.dot(M_n[i],vw)
vw_list[i,:] = vw.T
return(vw_list,theta)
def absorption(self,theta_in,lambda0,n_vec,d_vec,pol,points):
#reload the forward and backward wave coefficients for every layer
[vw_n,theta] = self.layerAmpl(theta_in,lambda0,n_vec,d_vec,pol)
total_len = np.sum(d_vec[1:-1])
pointcount = 0
#a is an array where the normalized absorbtion for the entire grid is stored
a = []
for i in range(1,len(n_vec)-1):
kz = 2*np.pi*n_vec[i]*np.cos(theta[i])/lambda0
#How many points, out of the total 'points' does each layer get, with respect to
#the length of the layer
if i == 1:
points_per_layer = int(np.floor(points/(len(d_vec)-2)))+1
if len(n_vec)-1 == 2: #only one layer is considered
points_per_layer = points
else: #All other layers get 11 points because of interface cutting
points_per_layer = int(np.floor(points/(len(d_vec)-2)))
#for every layer, the space grid is reset. I.e. every layer starts at 0
layer = np.linspace(0,d_vec[i],points_per_layer)
v = vw_n[i,0]; w = vw_n[i,1];#complex wave amplitudes for every layer
Ef = v * np.exp(1j * kz * layer)#forward traveling wave
Eb = w * np.exp(-1j * kz *layer)#backward traveling wave
if pol == 'p':#p-polarized
a_layer = (n_vec[i]*np.conj(np.cos(theta[i]))*(kz*np.abs(Ef-Eb)**2-np.conj(kz)*np.abs(Ef+Eb)**2)).imag /\
(n_vec[0]*np.conj(np.cos(theta[0]))).real
if pol == 's':
a_layer = (np.abs(Ef+Eb)**2 *np.imag(kz*n_vec[i]*np.cos(theta[i])))/\
(np.real(n_vec[0]*np.cos(theta[0])))
#for every layer calculate the absorbtion grid and append it to the total
a = np.append(a,a_layer)
#to get the right amount of points considered in the grid, since we round.
pointcount += points_per_layer
a = np.real(a)
grid = np.linspace(0,total_len,pointcount)
return(a,grid)
def createTMM(self,nvec,xflat,t,x0,timeprof = False,scaling=0):
#The distances and hence x0 have to be given in units of nm!
xmg, tmg = np.meshgrid(xflat,t)
#Adding infinite layer at the beginning and at the end of the distance vector
d_vec = np.diff(x0); d_vec = np.insert(d_vec,(0,len(d_vec)),np.inf)
#Calculating the 1D absorption profile according to TMM
[absorption,grid] = self.absorption(self.theta_in,self.lambda_vac,nvec,d_vec,self.polarization,np.shape(xflat)[0])
#Evaluate the Gaussian time profile
if np.any(timeprof) == False:
#Convert the input, fluence & FWHM given in 'source' class to Amplitude and sigma
sigma2 = self.FWHM**2/(2*np.log(2))
A = self.fluence/np.sqrt(2*np.pi*sigma2)
sourceM = A*np.exp(-(tmg-self.t0)**2/(2*sigma2))
if (self.timeprofile == "repGaussian") or (self.timeprofile == "RepGaussian"):
sigma2 = self.FWHM**2/(2*np.log(2))
A = self.fluence/np.sqrt(2*np.pi*sigma2)
sourceM = A*timeprof
if (self.timeprofile == "Custom") or (self.timeprofile == "custom"):
#Check the custom function in this class! It is already multiplied with a scaling factor
sourceM = scaling*timeprof
#Multiplying it with the absorption profile to obtain 2D (space-time source map)
sourceM *= absorption/1e-9
return(sourceM)
class visual(object):
def __init__(self,*args):
self.data = False
typecheck = | np.array([]) | numpy.array |
"""
A collection of utility functions not yet categorized.
"""
import os
from collections import OrderedDict
import json
import numpy as np
import scipy
import sympy
import qutip
import theano
import theano.tensor as T
def complexrandn(dim1, dim2):
"""Generates an array of pseudorandom, normally chosen, complex numbers."""
big_matrix = np.random.randn(dim1, dim2, 2)
return big_matrix[:, :, 0] + 1.j * big_matrix[:, :, 1]
def isvector(arr):
"""Check if a numpy array is a vector-like object."""
# we are not using `arr.ndims` in case the input is a qutip object
ndims = len(arr.shape)
return (ndims == 1
or (ndims == 2 and (arr.shape[0] == 1 or arr.shape[1] == 1)))
def _complex2bigreal_vector(vector):
"""Convert a complex vector to big real notation."""
vector = vector.reshape((vector.shape[0], 1))
return np.concatenate((np.real(vector), np.imag(vector)), axis=0)
def _complex2bigreal_matrix(matrix):
"""Convert complex matrix to big real notation."""
first_row = np.concatenate((np.real(matrix), -np.imag(matrix)), axis=1)
second_row = np.concatenate((np.imag(matrix), np.real(matrix)), axis=1)
return np.concatenate((first_row, second_row), axis=0)
def complex2bigreal(arr):
"""Convert from complex to big real representation.
To avoid the problem of theano and similar libraries not properly
supporting the gradient of complex objects, we map every complex
nxn matrix U to a bigger 2nx2n real matrix defined as
[[Ur, -Ui], [Ui, Ur]], where Ur and Ui are the real and imaginary
parts of U.
The input argument can be either a qutip object representing a ket,
or a qutip object representing an operator (a density matrix).
"""
# if qutip object, extract numpy arrays from it
if isinstance(arr, qutip.Qobj):
arr = arr.data.toarray()
arr = np.asarray(arr).astype(np.complex)
# if `arr` is a vector (possibly of shape Nx1 or 1xN)
if isvector(arr):
outarr = _complex2bigreal_vector(arr)
else:
outarr = _complex2bigreal_matrix(arr)
return outarr
def bigreal2complex(arr):
"""Convert numpy array back into regular complex form.
NOTE: The output will always be a numpy.ndarray of complex dtype
"""
arr = np.asarray(arr)
if isvector(arr):
# `arr` may be a Nx1 or 1xN dimensional vector, or a flat vector
try:
arr_len = arr.shape[0] * arr.shape[1]
except IndexError:
arr_len = len(arr)
# make `arr` an Nx1 vector
arr = arr.reshape((arr_len, 1))
real_part = arr[:arr.shape[0] // 2]
imag_part = arr[arr.shape[0] // 2:]
return real_part + 1j * imag_part
else:
real_part = arr[:arr.shape[0] // 2, :arr.shape[1] // 2]
imag_part = arr[arr.shape[0] // 2:, :arr.shape[1] // 2]
return real_part + 1j * imag_part
def bigreal2qobj(arr):
"""Convert big real vector into corresponding qutip object."""
if arr.ndim == 1 or arr.shape[0] != arr.shape[1]:
arr = bigreal2complex(arr)
num_qubits = scipy.log2(arr.shape[0]).astype(int)
return qutip.Qobj(arr, dims=[[2] * num_qubits, [1] * num_qubits])
elif arr.shape[0] == arr.shape[1]:
arr = bigreal2complex(arr)
num_qubits = scipy.log2(arr.shape[0]).astype(int)
return qutip.Qobj(arr, dims=[[2] * num_qubits] * 2)
else:
raise ValueError('Not sure what to do with this here.')
def theano_matrix_grad(matrix, parameters):
"""Compute the gradient of every elementr of a theano matrix."""
shape = matrix.shape
num_elements = shape[0] * shape[1]
flattened_matrix = T.flatten(matrix)
def grad_element(i, arr):
return T.grad(arr[i], parameters)
flattened_grads, _ = theano.scan(fn=grad_element,
sequences=T.arange(num_elements),
non_sequences=flattened_matrix)
try:
# if `parameters` is a theano vector, flattened_grads results to
# be a matrix of shape Nx2
num_gradients = parameters.shape[0]
newshape = (num_gradients, shape[0], shape[1])
return T.reshape(flattened_grads.T, newshape)
except AttributeError:
# if `parameters` is a list of theano scalars, flattened_grads
# becomes a list of the corresponding gradients
if isinstance(flattened_grads, (list, tuple)):
return [T.reshape(grads_mat, shape) for grads_mat in flattened_grads]
else:
return T.reshape(flattened_grads, shape)
def get_sigmas_index(indices):
"""Takes a tuple and gives back a length-16 array with a single 1.
Parameters
----------
indices: a tuple of two integers, each one between 0 and 3.
Examples
--------
>>> get_sigmas_index((1, 0))
array([ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.])
>>> get_sigmas_index((0, 3))
array([ 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0.])
"""
all_zeros = np.zeros(4 * 4)
all_zeros[indices[0] * 4 + indices[1]] = 1.
return all_zeros
def generate_ss_terms():
"""Returns the tensor products of every combination of two sigmas.
Generates a list in which each element is the tensor product of two
Pauli matrices, multiplied by the imaginary unit 1j and converted
into big real form using complex2bigreal.
The matrices are sorted in natural order, so that for example the
3th element is the tensor product of sigma_0 and sigma_3 and the
4th element is the tensor product of sigma_1 and sigma_0.
"""
sigmas = [qutip.qeye(2), qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()]
sigma_pairs = []
for idx1 in range(4):
for idx2 in range(4):
term = qutip.tensor(sigmas[idx1], sigmas[idx2])
term = 1j * term.data.toarray()
sigma_pairs.append(complex2bigreal(term))
return np.asarray(sigma_pairs)
def pauli_matrix(n_modes, position, which_pauli):
sigmas = [qutip.qeye(2), qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()]
indices = [0] * n_modes
indices[position] = which_pauli
return qutip.tensor(*tuple(sigmas[index] for index in indices))
def pauli_product(*pauli_indices):
n_modes = len(pauli_indices)
partial_product = qutip.tensor(*([qutip.qeye(2)] * n_modes))
for pos, pauli_index in enumerate(pauli_indices):
partial_product *= pauli_matrix(n_modes, pos, pauli_index)
return partial_product
def chars2pair(chars):
out_pair = []
for idx in range(len(chars)):
if chars[idx] == 'x':
out_pair.append(1)
elif chars[idx] == 'y':
out_pair.append(2)
elif chars[idx] == 'z':
out_pair.append(3)
else:
raise ValueError('chars must contain 2 characters, each of'
'which equal to either x, y, or z')
return tuple(out_pair)
def dm2ket(dm):
"""Converts density matrix to ket form, assuming it to be pure."""
outket = dm[:, 0] / dm[0, 0] * np.sqrt(np.abs(dm[0, 0]))
try:
return qutip.Qobj(outket, dims=[dm.dims[0], [1] * len(dm.dims[0])])
except AttributeError:
# `dm` could be a simple matrix, not a qutip.Qobj object. In
# this case just return the numpy array
return outket
def ket_normalize(ket):
return ket * np.exp(-1j * np.angle(ket[0, 0]))
def detensorize(bigm):
"""Assumes second matrix is 2x2."""
out = | np.zeros((bigm.shape[0] * bigm.shape[1], 2, 2), dtype=np.complex) | numpy.zeros |
import pytest
from numpy import array, append, empty, zeros, int64
from numpy.testing import assert_allclose
from touvlo.supv.nn_clsf import (feed_forward, init_nn_weights,
back_propagation, cost_function,
grad, unravel_params, h)
class TestNeuralNetwork:
@pytest.fixture(scope="module")
def omicron(self):
return array([[0.35, 0.78, 0.13, 0.90],
[0.27, 0.66, 0.62, 0.20],
[0.64, 0.36, 0.76, 0.33],
[0.00, 0.70, 0.78, 0.85],
[0.55, 0.72, 0.24, 0.43]])
@pytest.fixture(scope="module")
def omega(self):
return array([[0.86, 0.77, 0.63, 0.35, 0.99, 0.11],
[0.84, 0.74, 0.11, 0.30, 0.49, 0.14],
[0.04, 0.31, 0.17, 0.65, 0.28, 0.99]])
@pytest.fixture(scope="module")
def kappa(self):
return array([[0.98, 0.6, 0.18, 0.47, 0.07, 1],
[0.9, 0.38, 0.38, 0.36, 0.52, 0.85],
[0.57, 0.23, 0.41, 0.45, 0.04, 0.24],
[0.46, 0.94, 0.03, 0.06, 0.19, 0.63],
[0.87, 0.4, 0.85, 0.07, 0.81, 0.76]])
@pytest.fixture(scope="module")
def upsilon(self):
return array([[0.9, 0.95, 0.05, 0.05, 0.65, 0.11],
[0.84, 0.57, 0.17, 0.62, 0.06, 0.36],
[0.36, 0.6, 0.54, 0.49, 0.3, 0.03],
[0.11, 0.49, 0.71, 0.43, 0.01, 0.92],
[0.02, 0.01, 0.94, 0.35, 0.69, 0.88]])
@pytest.fixture(scope="module")
def zeta(self):
return array([[0.91672, 0.85806, 0.81484],
[0.89115, 0.83518, 0.78010],
[0.93880, 0.88464, 0.84335]])
@pytest.fixture(scope="module")
def iota(self):
return array([[0.017, 0.422, 0.739, -0.121, 0.479],
[-0.346, 0.018, 0.37, -0.65, 0.148],
[0.781, 0.484, 0.9405, 0.5385, 0.7915]])
def test_cost_function1(self, omicron, omega):
X = array([[0.10, 0.30, -0.50], [-0.20, 0, -0.60], [0, 0.20, 0.45]])
y = array([[0], [2], [1]])
n_hidden_layers = 1
num_labels = 3
_lambda = 0
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = omega
assert_allclose(cost_function(X, y, theta, _lambda,
num_labels, n_hidden_layers),
4.2549,
rtol=0, atol=0.001, equal_nan=False)
def test_cost_function2(self, omicron, omega):
X = array([[0.10, 0.30, -0.50], [-0.20, 0, -0.60], [0, 0.20, 0.45]])
y = array([[0], [2], [1]])
n_hidden_layers = 1
num_labels = 3
_lambda = 1
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = omega
assert_allclose(cost_function(X, y, theta, _lambda,
num_labels, n_hidden_layers),
5.9738,
rtol=0, atol=0.001, equal_nan=False)
def test_cost_function3(self, omicron, omega):
X = array([[0.10, 0.30, -0.50], [-0.20, 0, -0.60], [0, 0.20, 0.45]])
y = array([[0], [2], [1]])
n_hidden_layers = 1
num_labels = 3
_lambda = 10
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = omega
assert_allclose(cost_function(X, y, theta, _lambda,
num_labels, n_hidden_layers),
21.443,
rtol=0, atol=0.001, equal_nan=False)
def test_cost_function4(self, omicron, omega, kappa, upsilon):
X = array([[0.10, 0.30, -0.50], [-0.20, 0, -0.60], [0, 0.20, 0.45]])
y = array([[0], [2], [1]])
n_hidden_layers = 3
num_labels = 3
_lambda = 0
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = kappa
theta[2] = upsilon
theta[3] = omega
assert_allclose(cost_function(X, y, theta, _lambda,
num_labels, n_hidden_layers),
5.6617,
rtol=0, atol=0.001, equal_nan=False)
def test_cost_function5(self, omicron, omega, kappa, upsilon):
X = array([[0.10, 0.30, -0.50], [-0.20, 0, -0.60], [0, 0.20, 0.45]])
y = array([[0], [2], [1]])
n_hidden_layers = 3
num_labels = 3
_lambda = 1
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = kappa
theta[2] = upsilon
theta[3] = omega
assert_allclose(cost_function(X, y, theta, _lambda,
num_labels, n_hidden_layers),
9.7443,
rtol=0, atol=0.001, equal_nan=False)
def test_cost_function6(self, omicron, omega, kappa, upsilon):
X = array([[0.10, 0.30, -0.50], [-0.20, 0, -0.60], [0, 0.20, 0.45]])
y = array([[0], [2], [1]])
n_hidden_layers = 3
num_labels = 3
_lambda = 10
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = kappa
theta[2] = upsilon
theta[3] = omega
assert_allclose(cost_function(X, y, theta, _lambda,
num_labels, n_hidden_layers),
46.488,
rtol=0, atol=0.001, equal_nan=False)
def test_feed_forward1(self, omicron, omega):
n_hidden_layers = 1
X = array([[1, 0.10, 0.30, -0.50]])
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = omega
z, a = feed_forward(X, theta, n_hidden_layers)
assert_allclose(a[0],
array([[1, 0.10, 0.30, -0.50]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(a[1],
array([[1, 0.50425, 0.60396, 0.67678,
0.46979, 0.61751]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(a[2],
array([[0.91672, 0.85806, 0.81484]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(z[1],
array([[0.017, 0.422, 0.739, -0.121, 0.479]]),
rtol=0, atol=0.001, equal_nan=False)
def test_feed_forward2(self, omicron, omega):
n_hidden_layers = 1
X = array([[1, -0.2, 0, -0.6]])
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = omega
z, a = feed_forward(X, theta, n_hidden_layers)
assert_allclose(a[0],
array([[1, -0.2, 0, -0.6]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(a[1],
array([[1, 0.41435, 0.5045, 0.59146,
0.34299, 0.53693]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(a[2],
array([[0.89115, 0.83518, 0.78010]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(z[1],
array([[-0.346, 0.018, 0.37, -0.65, 0.148]]),
rtol=0, atol=0.001, equal_nan=False)
def test_feed_forward3(self, omicron, omega):
n_hidden_layers = 1
X = array([[1, 0, 0.2, 0.45]])
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = omega
z, a = feed_forward(X, theta, n_hidden_layers)
assert_allclose(a[0],
array([[1, 0, 0.2, 0.45]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(a[1],
array([[1, 0.6859, 0.61869, 0.7192,
0.63146, 0.68815]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(a[2],
array([[0.9388, 0.88464, 0.84335]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(z[1],
array([[0.781, 0.484, 0.9405, 0.5385, 0.7915]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(z[2],
array([[2.73048142, 2.03713759, 1.68336723]]),
rtol=0, atol=0.001, equal_nan=False)
def test_feed_forward4(self, omicron, omega, kappa, upsilon):
n_hidden_layers = 3
X = array([[1, 0, 0.2, 0.45]])
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = kappa
theta[2] = upsilon
theta[3] = omega
z, a = feed_forward(X, theta, n_hidden_layers)
assert_allclose(a[0],
array([[1, 0, 0.2, 0.45]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(a[1],
array([[1, 0.6859, 0.61869,
0.7192, 0.63146, 0.68815]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(a[2],
array([[1, 0.92912, 0.92877,
0.8169, 0.84812, 0.9402]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(a[3],
array([[1, 0.92585, 0.91859,
0.89109, 0.92052, 0.93092]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(a[4],
array([[0.97003, 0.92236, 0.90394]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(z[1],
array([[0.781, 0.484, 0.9405, 0.5385, 0.7915]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(z[2],
array([[2.5733, 2.5679, 1.4955, 1.72, 2.7551]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(z[3],
array([[2.5247, 2.4233, 2.1019, 2.4494, 2.6008]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(z[4],
array([[3.4772, 2.4749, 2.2417]]),
rtol=0, atol=0.001, equal_nan=False)
def test_hypothesis1(self, omicron, omega):
n_hidden_layers = 1
X = array([[1, 0.10, 0.30, -0.50]])
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = omega
_h = h(X, theta, n_hidden_layers)
assert_allclose(_h,
array([[0.91672, 0.85806, 0.81484]]),
rtol=0, atol=0.001, equal_nan=False)
def test_hypothesis2(self, omicron, omega):
n_hidden_layers = 1
X = array([[1, -0.2, 0, -0.6]])
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = omega
_h = h(X, theta, n_hidden_layers)
assert_allclose(_h,
array([[0.89115, 0.83518, 0.78010]]),
rtol=0, atol=0.001, equal_nan=False)
def test_hypothesis3(self, omicron, omega):
n_hidden_layers = 1
X = array([[1, 0, 0.2, 0.45]])
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = omega
_h = h(X, theta, n_hidden_layers)
assert_allclose(_h,
array([[0.9388, 0.88464, 0.84335]]),
rtol=0, atol=0.001, equal_nan=False)
def test_hypothesis4(self, omicron, omega, kappa, upsilon):
n_hidden_layers = 3
X = array([[1, 0, 0.2, 0.45]])
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = kappa
theta[2] = upsilon
theta[3] = omega
_h = h(X, theta, n_hidden_layers)
assert_allclose(_h,
array([[0.97003, 0.92236, 0.90394]]),
rtol=0, atol=0.001, equal_nan=False)
def test_grad(self, omicron, omega):
X = array([[0.10, 0.30, -0.50], [-0.20, 0, -0.60], [0, 0.20, 0.45]])
y = array([[0], [2], [1]])
_lambda = 0
num_labels = 3
n_hidden_layers = 1
input_layer_size = 3
hidden_layer_size = 5
theta = empty((n_hidden_layers + 1), dtype=object)
theta[0] = omicron
theta[1] = omega
nn_params = append(theta[0].flatten(), theta[1].flatten())
for i in range(2, len(theta)):
nn_params = append(nn_params, theta[i].flatten())
theta_grad = grad(X, y, nn_params, _lambda, input_layer_size,
hidden_layer_size, num_labels, n_hidden_layers)
theta_grad = unravel_params(theta_grad, input_layer_size,
hidden_layer_size, num_labels,
n_hidden_layers)
assert_allclose(theta_grad[0],
array([[0.2331544, -0.0131348,
0.0334961, -0.0652458],
[0.1224948, -0.0088256,
0.0156733, -0.0124328],
[0.1457463, -0.0012316,
0.0279176, -0.0223957],
[0.2254230, -0.0137763,
0.0313083, -0.0402217],
[0.1379756, 0.0072703,
0.0348654, -0.0063072]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(theta_grad[1],
array([
[0.58222, 0.32373, 0.32671,
0.38197, 0.28645, 0.35770],
[0.52596, 0.23320, 0.28940,
0.33057, 0.20557, 0.29964],
[0.47943, 0.29941, 0.30099,
0.34265, 0.27997, 0.32182]]),
rtol=0, atol=0.001, equal_nan=False)
def test_back_prop_1(self, omega, zeta, iota):
num_labels = 3
y = array([[0]])
n_hidden_layers = 1
L = n_hidden_layers + 1 # last layer
theta = empty((n_hidden_layers + 1), dtype=object)
a = empty((n_hidden_layers + 2), dtype=object)
z = empty((n_hidden_layers + 2), dtype=object)
theta[1] = omega
a[2] = zeta
z[1] = iota
delta = back_propagation(y, theta, a, z, num_labels, n_hidden_layers)
assert_allclose(delta[L - 1],
array([[0.205846, 0.043161, 0.165794,
0.141024, 0.216743],
[0.188319, 0.038974, 0.173862,
0.117159, 0.218117],
[0.187209, 0.047684, 0.159976,
0.141731, 0.204305]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(delta[L],
array([[-0.083275, 0.858059, 0.814840],
[-0.108852, 0.835179, 0.780102],
[-0.061198, 0.884641, 0.843350]]),
rtol=0, atol=0.001, equal_nan=False)
def test_back_prop_2(self, omega, zeta, iota):
num_labels = 3
y = array([[2]])
n_hidden_layers = 1
L = n_hidden_layers + 1 # last layer
theta = empty((n_hidden_layers + 1), dtype=object)
a = empty((n_hidden_layers + 2), dtype=object)
z = empty((n_hidden_layers + 2), dtype=object)
theta[1] = omega
a[2] = zeta
z[1] = iota
delta = back_propagation(y, theta, a, z, num_labels, n_hidden_layers)
assert_allclose(delta[L - 1],
array([[0.32083735, 0.15318951, 0.10016938,
0.31787549, 0.00889491],
[0.29994511, 0.15396509, 0.10137133,
0.27715581, -0.00068307],
[0.28631281, 0.15620337, 0.09939030,
0.30696021, 0.01545845]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(delta[L],
array([[0.91672, 0.85806, -0.18516],
[0.89115, 0.83518, -0.21990],
[0.93880, 0.88464, -0.15665]]),
rtol=0, atol=0.001, equal_nan=False)
def test_back_prop_3(self, omega, zeta, iota):
num_labels = 3
y = array([[1]])
n_hidden_layers = 1
L = n_hidden_layers + 1 # last layer
theta = empty((n_hidden_layers + 1), dtype=object)
a = empty((n_hidden_layers + 2), dtype=object)
z = empty((n_hidden_layers + 2), dtype=object)
theta[1] = omega
a[2] = zeta
z[1] = iota
delta = back_propagation(y, theta, a, z, num_labels, n_hidden_layers)
assert_allclose(delta[L - 1],
array([[0.21335, 0.16754, 0.17673, 0.26557, 0.20966],
[0.19560, 0.16896, 0.18594, 0.22983, 0.21066],
[0.19367, 0.17036, 0.17007, 0.25809, 0.19787]]),
rtol=0, atol=0.001, equal_nan=False)
assert_allclose(delta[L],
array([[0.91672, -0.14194, 0.81484],
[0.89115, -0.16482, 0.78010],
[0.93880, -0.11536, 0.84335]]),
rtol=0, atol=0.001, equal_nan=False)
def test_back_prop_4(self, omicron, omega, kappa, upsilon):
num_labels = 3
y = array([[1]])
n_hidden_layers = 3
L = n_hidden_layers + 1 # last layer
theta = empty((n_hidden_layers + 1), dtype=object)
z = empty((n_hidden_layers + 2), dtype=object)
a = empty((n_hidden_layers + 2), dtype=object)
theta[0] = omicron
theta[1] = kappa
theta[2] = upsilon
theta[3] = omega
a[0] = array([[1, 0, 0.2, 0.45]])
a[1] = array([[1, 0.6859, 0.61869, 0.7192, 0.63146, 0.68815]])
a[2] = array([[1, 0.92912, 0.92877, 0.8169, 0.84812, 0.9402]])
a[3] = array([[1, 0.92585, 0.91859, 0.89109, 0.92052, 0.93092]])
a[4] = array([[0.97003, 0.92236, 0.90394]])
z[1] = array([[0.781, 0.484, 0.9405, 0.5385, 0.7915]])
z[2] = array([[2.5733, 2.5679, 1.4955, 1.72, 2.7551]])
z[3] = | array([[2.5247, 2.4233, 2.1019, 2.4494, 2.6008]]) | numpy.array |
import numpy as np
from torchvision import datasets, transforms
import random
from torch.utils.data import Subset, RandomSampler
def randomSplit(M, N, minV, maxV):
res = []
while N > 0:
l = max(minV, M - (N-1)*maxV)
r = min(maxV, M - (N-1)*minV)
num = random.randint(l, r)
N -= 1
M -= num
res.append(num)
print(res)
return res
def uniform(N, k):
"""Uniform distribution of 'N' items into 'k' groups."""
dist = []
avg = N / k
# Make distribution
for i in range(k):
dist.append(int((i + 1) * avg) - int(i * avg))
# Return shuffled distribution
random.shuffle(dist)
return dist
def normal(N, k):
"""Normal distribution of 'N' items into 'k' groups."""
dist = []
# Make distribution
for i in range(k):
x = i - (k - 1) / 2
dist.append(int(N * (np.exp(-x) / (np.exp(-x) + 1)**2)))
# Add remainders
remainder = N - sum(dist)
dist = list(np.add(dist, uniform(remainder, k)))
# Return non-shuffled distribution
return dist
def data_organize(idxs_labels, labels):
data_dict = {}
labels = np.unique(labels, axis=0)
for one in labels:
data_dict[one] = []
for i in range(len(idxs_labels[1, :])):
data_dict[idxs_labels[1, i]].append(idxs_labels[0, i])
return data_dict
def data_partition(training_data, number_of_clients, non_iid_level):
idxs = np.arange(len(training_data))
if type(training_data.targets).__name__ == 'list':
labels = np.array(training_data.targets)
else: # type is Tensor
labels = training_data.train_labels.numpy()
# sort labels
idxs_labels = np.vstack((idxs, labels))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
labels = np.unique(labels, axis=0)
idxs = idxs_labels[0, :]
data_dict = data_organize(idxs_labels, labels)
if non_iid_level == 0:
num_items = int(len(training_data)/number_of_clients)
data_partition_profile, all_idxs = {}, [i for i in range(len(training_data))]
for i in range(number_of_clients):
data_partition_profile[i] = set(np.random.choice(all_idxs, num_items, replace=False))
all_idxs = list(set(all_idxs) - data_partition_profile[i])
else:
client_dict = {}
pref_dist = uniform(number_of_clients, len(labels))
print(pref_dist)
data_dist = randomSplit(len(training_data), number_of_clients, 500, 7000)
data_dist.sort(reverse=True)
print(data_dist)
client_list = list(range(number_of_clients))
for i in range(len(pref_dist)):
while pref_dist[i]>0:
client = np.random.choice(client_list, 1, replace=False)[0]
client_dict[client] = labels[i]
pref_dist[i] -= 1
client_list = list(set(client_list) - set([client]))
# for 6 users
# client_dict = {0: [1, 2], 1: [3, 4], 2: [5, 6], 3: [3, 4], 4: [1, 2], 5: [5, 6]}
# for 7 users
# client_dict = {0: [1, 2], 1: [3, 4], 2: [5, 6], 3: [1, 2], 4: [5, 6], 5: [3, 4], 6: [7, 8]}
# for 15 users
client_dict = {0: [1, 2], 1: [3, 4], 2: [1, 2], 3: [5, 6], 4: [1, 2], 5: [7, 8], 6: [3, 4], 7: [9, 0],
8: [5, 6], 9: [1, 2], 10: [9, 0], 11: [5, 6], 12: [3, 4], 13: [7, 8], 14: [3, 4]}
data_partition_profile, all_idxs = {}, [i for i in range(len(training_data))]
for i in range(number_of_clients):
pref_number1 = int(round(data_dist[i] * non_iid_level * 0.5))
pref_number2 = int(round(data_dist[i] * non_iid_level * 0.5))
if pref_number1 > len(data_dict[client_dict[i][0]]):
pref_number1 = len(data_dict[client_dict[i][0]])
if pref_number2 > len(data_dict[client_dict[i][1]]):
pref_number2 = len(data_dict[client_dict[i][1]])
data_dist[i] -= pref_number1
data_dist[i] -= pref_number2
tep1 = set(np.random.choice(data_dict[client_dict[i][0]], pref_number1, replace=False))
tep2 = set(np.random.choice(data_dict[client_dict[i][1]], pref_number2, replace=False))
data_partition_profile[i] = tep1.union(tep2)
all_idxs = list(set(all_idxs) - data_partition_profile[i])
data_dict[client_dict[i][0]] = list(set(data_dict[client_dict[i][0]]) - tep1)
data_dict[client_dict[i][1]] = list(set(data_dict[client_dict[i][1]]) - tep2)
for i in range(number_of_clients):
rest_idxs = set( | np.random.choice(all_idxs, data_dist[i], replace=False) | numpy.random.choice |
import tqdm
import numpy as np
from numpy import sin, cos
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
class DoublePendulum:
def __init__(self, m1=1.0, m2=1.0, l1=1.0, l2=1.0, g=9.81):
self.m1 = m1
self.m2 = m2
self.l1 = l1
self.l2 = l2
self.g = g
def q_prime(self, q, r, u, v, t):
val = u
return val
def r_prime(self, q, r, u, v, t):
val = v
return val
def u_prime(self, q, r, u, v, t):
m1, m2, l1, l2, g = self.m1, self.m2, self.l1, self.l2, self.g
num = 2 * g * m1 * sin(q) + g * m2 * sin(q) + g * m2 * sin(q - 2 * r)
num += l1 * m2 * u ** 2 * sin(2 * (q - r)) + 2 * l2 * m2 * v ** 2 * sin(q - r)
den = -2 * l1 * (m1 - m2 * cos(q - r) ** 2 + m2)
val = num / den
return val
def v_prime(self, q, r, u, v, t):
m1, m2, l1, l2, g = self.m1, self.m2, self.l1, self.l2, self.g
num = -(m1 + m2) * (g * sin(r) - l1 * u ** 2 * sin(q - r))
num += (cos(q - r)) * (g * m1 * sin(q) + g * m2 * sin(q) + l2 * m2 * v ** 2 * sin(q - r))
den = l2 * (m1 - m2 * cos(q - r) ** 2 + m2)
val = num / den
return val
def derivatives(self, q, r, u, v, t):
args = (q, r, u, v, t)
qruv_prime = np.array([
self.q_prime(*args),
self.r_prime(*args),
self.u_prime(*args),
self.v_prime(*args),
])
return qruv_prime
def x1(self, q, r):
val = +self.l1 * sin(q)
return val
def y1(self, q, r):
val = -self.l1 * | cos(q) | numpy.cos |
# -*- coding: utf-8 -*-
# @Organization : insightface.ai
# @Author : <NAME>
# @Time : 2021-05-04
# @Function :
from __future__ import division
import datetime
import numpy as np
import onnx
import onnxruntime
import os
import os.path as osp
import cv2
import sys
def softmax(z):
assert len(z.shape) == 2
s = np.max(z, axis=1)
s = s[:, np.newaxis] # necessary step to do broadcasting
e_x = np.exp(z - s)
div = np.sum(e_x, axis=1)
div = div[:, np.newaxis] # dito
return e_x / div
def distance2bbox(points, distance, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (n, 2), [x, y].
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom).
max_shape (tuple): Shape of the image.
Returns:
Tensor: Decoded bboxes.
"""
x1 = points[:, 0] - distance[:, 0]
y1 = points[:, 1] - distance[:, 1]
x2 = points[:, 0] + distance[:, 2]
y2 = points[:, 1] + distance[:, 3]
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1])
y1 = y1.clamp(min=0, max=max_shape[0])
x2 = x2.clamp(min=0, max=max_shape[1])
y2 = y2.clamp(min=0, max=max_shape[0])
return np.stack([x1, y1, x2, y2], axis=-1)
def distance2kps(points, distance, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (n, 2), [x, y].
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom).
max_shape (tuple): Shape of the image.
Returns:
Tensor: Decoded bboxes.
"""
preds = []
for i in range(0, distance.shape[1], 2):
px = points[:, i%2] + distance[:, i]
py = points[:, i%2+1] + distance[:, i+1]
if max_shape is not None:
px = px.clamp(min=0, max=max_shape[1])
py = py.clamp(min=0, max=max_shape[0])
preds.append(px)
preds.append(py)
return np.stack(preds, axis=-1)
class SCRFD:
def __init__(self, model_file=None, session=None):
import onnxruntime
self.model_file = model_file
self.session = session
self.taskname = 'detection'
if self.session is None:
assert self.model_file is not None
assert osp.exists(self.model_file)
self.session = onnxruntime.InferenceSession(self.model_file, None)
self.center_cache = {}
self.nms_thresh = 0.4
self.det_thresh = 0.5
self._init_vars()
def _init_vars(self):
input_cfg = self.session.get_inputs()[0]
input_shape = input_cfg.shape
#print(input_shape)
if isinstance(input_shape[2], str):
self.input_size = None
else:
self.input_size = tuple(input_shape[2:4][::-1])
#print('image_size:', self.image_size)
input_name = input_cfg.name
self.input_shape = input_shape
outputs = self.session.get_outputs()
output_names = []
for o in outputs:
output_names.append(o.name)
self.input_name = input_name
self.output_names = output_names
self.input_mean = 127.5
self.input_std = 128.0
#print(self.output_names)
#assert len(outputs)==10 or len(outputs)==15
self.use_kps = False
self._anchor_ratio = 1.0
self._num_anchors = 1
if len(outputs)==6:
self.fmc = 3
self._feat_stride_fpn = [8, 16, 32]
self._num_anchors = 2
elif len(outputs)==9:
self.fmc = 3
self._feat_stride_fpn = [8, 16, 32]
self._num_anchors = 2
self.use_kps = True
elif len(outputs)==10:
self.fmc = 5
self._feat_stride_fpn = [8, 16, 32, 64, 128]
self._num_anchors = 1
elif len(outputs)==15:
self.fmc = 5
self._feat_stride_fpn = [8, 16, 32, 64, 128]
self._num_anchors = 1
self.use_kps = True
def prepare(self, ctx_id, **kwargs):
if ctx_id<0:
self.session.set_providers(['CPUExecutionProvider'])
nms_thresh = kwargs.get('nms_thresh', None)
if nms_thresh is not None:
self.nms_thresh = nms_thresh
det_thresh = kwargs.get('det_thresh', None)
if det_thresh is not None:
self.det_thresh = det_thresh
input_size = kwargs.get('input_size', None)
if input_size is not None:
if self.input_size is not None:
print('warning: det_size is already set in scrfd model, ignore')
else:
self.input_size = input_size
def forward(self, img, threshold):
scores_list = []
bboxes_list = []
kpss_list = []
input_size = tuple(img.shape[0:2][::-1])
blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
net_outs = self.session.run(self.output_names, {self.input_name : blob})
input_height = blob.shape[2]
input_width = blob.shape[3]
fmc = self.fmc
for idx, stride in enumerate(self._feat_stride_fpn):
scores = net_outs[idx]
bbox_preds = net_outs[idx+fmc]
bbox_preds = bbox_preds * stride
if self.use_kps:
kps_preds = net_outs[idx+fmc*2] * stride
height = input_height // stride
width = input_width // stride
K = height * width
key = (height, width, stride)
if key in self.center_cache:
anchor_centers = self.center_cache[key]
else:
#solution-1, c style:
#anchor_centers = np.zeros( (height, width, 2), dtype=np.float32 )
#for i in range(height):
# anchor_centers[i, :, 1] = i
#for i in range(width):
# anchor_centers[:, i, 0] = i
#solution-2:
#ax = np.arange(width, dtype=np.float32)
#ay = np.arange(height, dtype=np.float32)
#xv, yv = np.meshgrid(np.arange(width), np.arange(height))
#anchor_centers = np.stack([xv, yv], axis=-1).astype(np.float32)
#solution-3:
anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
#print(anchor_centers.shape)
anchor_centers = (anchor_centers * stride).reshape( (-1, 2) )
if self._num_anchors>1:
anchor_centers = np.stack([anchor_centers]*self._num_anchors, axis=1).reshape( (-1,2) )
if len(self.center_cache)<100:
self.center_cache[key] = anchor_centers
pos_inds = np.where(scores>=threshold)[0]
bboxes = distance2bbox(anchor_centers, bbox_preds)
pos_scores = scores[pos_inds]
pos_bboxes = bboxes[pos_inds]
scores_list.append(pos_scores)
bboxes_list.append(pos_bboxes)
if self.use_kps:
kpss = distance2kps(anchor_centers, kps_preds)
#kpss = kps_preds
kpss = kpss.reshape( (kpss.shape[0], -1, 2) )
pos_kpss = kpss[pos_inds]
kpss_list.append(pos_kpss)
return scores_list, bboxes_list, kpss_list
def detect(self, img, input_size = None, max_num=0, metric='default'):
assert input_size is not None or self.input_size is not None
input_size = self.input_size if input_size is None else input_size
im_ratio = float(img.shape[0]) / img.shape[1]
model_ratio = float(input_size[1]) / input_size[0]
if im_ratio>model_ratio:
new_height = input_size[1]
new_width = int(new_height / im_ratio)
else:
new_width = input_size[0]
new_height = int(new_width * im_ratio)
det_scale = float(new_height) / img.shape[0]
resized_img = cv2.resize(img, (new_width, new_height))
det_img = np.zeros( (input_size[1], input_size[0], 3), dtype=np.uint8 )
det_img[:new_height, :new_width, :] = resized_img
scores_list, bboxes_list, kpss_list = self.forward(det_img, self.det_thresh)
scores = np.vstack(scores_list)
scores_ravel = scores.ravel()
order = scores_ravel.argsort()[::-1]
bboxes = | np.vstack(bboxes_list) | numpy.vstack |
import numpy as np
from scipy.stats import norm
from itertools import product
from wzk.numpy2 import shape_wrapper, axis_wrapper, insert
from wzk.dicts_lists_tuples import atleast_tuple
# a/b = (a+b) / a -> a / b =
golden_ratio = (np.sqrt(5.0) + 1) / 2
def number2digits(num):
return [int(x) for x in str(num)]
def sin_cos(x):
# https: // github.com / numpy / numpy / issues / 2626
return np.sin(x), np.cos(x)
# Normalize
def normalize_01(x, low=None, high=None, axis=None):
if low is None:
low = np.min(x, axis=axis, keepdims=True)
if high is None:
high = np.max(x, axis=axis, keepdims=True)
return (x-low) / (high-low)
def denormalize_01(x, low, high):
return x * (high - low) + low
def normalize_11(x, low, high):
"""
Normalize [low, high] to [-1, 1]
low and high should either be scalars or have the same dimension as the last dimension of x
"""
return 2 * (x - low) / (high - low) - 1
def denormalize_11(x, low, high):
"""
Denormalize [-1, 1] to [low, high]
low and high should either be scalars or have the same dimension as the last dimension of x
"""
return (x + 1) * (high - low)/2 + low
def euclidean_norm(arr, axis=-1, squared=False):
if squared:
return (arr**2).sum(axis=axis)
else:
return np.sqrt((arr**2).sum(axis=axis))
def discretize(x, step):
if np.isinf(step) or np.isnan(step):
return x
difference = x % step # distance to the next discrete value
if isinstance(x, (int, float)):
if difference > step / 2:
return x - (difference - step)
else:
return x - difference
else:
difference[difference > step / 2] -= step # round correctly
return x - difference
def d_linalg_norm__d_x(x, return_norm=False):
"""
Last dimension is normalized.
Calculate Jacobian
xn = x * (x^2 + y^2 + z^2)^(-1/2)
d xn / d x = (y^2 + z^2) * (x^2 + y^2 + z^2)^(-3/2)
d yn / d y = (x^2 + y^2) * (x^2 + y^2 + z^2)^(-3/2)
d zn / d z= (x^2 + z^2) * (x^2 + y^2 + z^2)^(-3/2)
Pattern of numerator
X123
0X23
01X3
012X
d xn / d y = -(x*y) * (x^2 + y^2 + z^2)^(-3/2)
d xn / d z = -(x*z) * (x^2 + y^2 + z^2)^(-3/2)
jac = [[dxn/dx, dxn/dy, dxn/dz]
[dyn/dx, dyn/dy, dyn/dz]
[dzn/dx, dzn/dy, dzn/dz]
"""
n = x.shape[-1]
off_diag_idx = [[j for j in range(n) if i != j] for i in range(n)]
jac = np.empty(x.shape + x.shape[-1:])
x_squared = x**2
# Diagonal
jac[:, np.arange(n), np.arange(n)] = x_squared[..., off_diag_idx].sum(axis=-1)
# Off-Diagonal
jac[:, np.arange(n)[:, np.newaxis], off_diag_idx] = -x[..., np.newaxis] * x[:, off_diag_idx]
jac *= (x_squared.sum(axis=-1, keepdims=True)**(-3/2))[..., np.newaxis]
if return_norm:
x /= np.sqrt(x_squared.sum(axis=-1, keepdims=True))
return x, jac
else:
return jac
# Smooth
def smooth_step(x):
"""https://en.wikipedia.org/wiki/Smoothstep
Interpolation which has zero 1st-order derivatives at x = 0 and x = 1,
~ cubic Hermite interpolation with clamping.
"""
res = -2 * x**3 + 3 * x**2
return np.clip(res, 0, 1)
def smoother_step(x):
"""https://en.wikipedia.org/wiki/Smoothstep+
<NAME> suggests an improved version of the smooth step function,
which has zero 1st- and 2nd-order derivatives at x = 0 and x = 1"""
res = +6 * x**5 - 15 * x**4 + 10 * x**3
return np.clip(res, 0, 1)
# Divisors
def divisors(n, with_1_and_n=False):
"""
https://stackoverflow.com/questions/171765/what-is-the-best-way-to-get-all-the-divisors-of-a-number#171784
"""
# Get factors and their counts
factors = {}
nn = n
i = 2
while i*i <= nn:
while nn % i == 0:
if i not in factors:
factors[i] = 0
factors[i] += 1
nn //= i
i += 1
if nn > 1:
factors[nn] = 1
primes = list(factors.keys())
# Generates factors from primes[k:] subset
def generate(k):
if k == len(primes):
yield 1
else:
rest = generate(k+1)
prime = primes[k]
for _factor in rest:
prime_to_i = 1
# Prime_to_i iterates prime**o values, o being all possible exponents
for _ in range(factors[prime] + 1):
yield _factor * prime_to_i
prime_to_i *= prime
if with_1_and_n:
return list(generate(0))
else:
return list(generate(0))[1:-1]
def get_mean_divisor_pair(n):
"""
Calculate the 'mean' pair of divisors. The two divisors should be as close as possible to the sqrt(n).
The smaller divisor is the first value of the output pair
10 -> 2, 5
20 -> 4, 5
24 -> 4, 6
25 -> 5, 5
30 -> 5, 6
40 -> 5, 8
"""
assert isinstance(n, int)
assert n >= 1
div = divisors(n)
if n >= 3 and len(div) == 0: # Prime number -> make at least even
return 1, n
div.sort()
# if numbers of divisors is odd -> n = o * o : power number
if len(div) % 2 == 1:
idx_center = len(div) // 2
return div[idx_center], div[idx_center]
# else get the two numbers at the center
else:
idx_center_plus1 = len(div) // 2
idx_center_minus1 = idx_center_plus1 - 1
return div[idx_center_minus1], div[idx_center_plus1]
def get_divisor_safe(numerator, denominator):
divisor = numerator / denominator
divisor_int = int(divisor)
assert divisor_int == divisor
return divisor_int
def doubling_factor(small, big):
return np.log2(big / small)
def modulo(x, low, high):
return (x - low) % (high - low) + low
def angle2minuspi_pluspi(x):
return modulo(x=x, low=-np.pi, high=+np.pi)
# modulo is faster for larger arrays, for small ones they are similar but arctan is faster in this region
# -> as always you have to make an trade-off
# return np.arctan2(np.sin(x), np.cos(x))
# Derivative
def numeric_derivative(*, fun, x, eps=1e-5, axis=-1,
**kwargs_fun):
"""
Use central difference scheme to calculate the
numeric derivative of fun at point x.
Axis indicates the dimensions of the free variables.
The result has the shape f(x).shape + (x.shape)[axis]
"""
axis = axis_wrapper(axis=axis, n_dim=x.ndim)
fun_shape = np.shape(fun(x, **kwargs_fun))
var_shape = atleast_tuple(np.array(np.shape(x))[axis])
derv = np.empty(fun_shape + var_shape)
eps_mat = np.empty_like(x, dtype=float)
def update_eps_mat(_idx):
eps_mat[:] = 0
insert(eps_mat, val=eps, idx=_idx, axis=axis)
for idx in product(*(range(s) for s in var_shape)):
update_eps_mat(_idx=idx)
derv[(Ellipsis,) + idx] = (fun(x + eps_mat, **kwargs_fun) - fun(x - eps_mat, **kwargs_fun)) / (2 * eps)
return derv
# Statistics for distribution of number of obstacles
def p_normal_skew(x, loc=0.0, scale=1.0, a=0.0):
t = (x - loc) / scale
return 2 * norm.pdf(t) * norm.cdf(a*t)
def normal_skew_int(loc=0.0, scale=1.0, a=0.0, low=None, high=None, size=1):
if low is None:
low = loc-10*scale
if high is None:
high = loc+10*scale+1
p_max = p_normal_skew(x=loc, loc=loc, scale=scale, a=a)
samples = np.zeros(np.prod(size))
for i in range(int(np.prod(size))):
while True:
x = np.random.randint(low=low, high=high)
if np.random.rand() <= p_normal_skew(x, loc=loc, scale=scale, a=a) / p_max:
samples[i] = x
break
samples = samples.astype(int)
if size == 1:
samples = samples[0]
return samples
def random_uniform_ndim(*, low, high, shape=None):
n_dim = np.shape(low)[0]
x = np.zeros(shape_wrapper(shape) + (n_dim,))
for i in range(n_dim):
x[..., i] = np.random.uniform(low=low[i], high=high[i], size=shape)
return x
def get_stats(x, axis=None, return_array=False):
stats = {'mean': np.mean(x, axis=axis),
'std': np.std(x, axis=axis),
'median': np.median(x, axis=axis),
'min': np.min(x, axis=axis),
'max': np.max(x, axis=axis)}
if return_array:
return np.array([stats['mean'], stats['std'], stats['median'], stats['min'], stats['max']])
return stats
# Magic
def magic(n):
"""
Equivalent of the MATLAB function:
M = magic(n) returns an n-by-n matrix constructed from the integers 1 through n2 with equal row and column sums.
https://stackoverflow.com/questions/47834140/numpy-equivalent-of-matlabs-magic
"""
n = int(n)
if n < 1:
raise ValueError('Size must be at least 1')
if n == 1:
return np.array([[1]])
elif n == 2:
return np.array([[1, 3], [4, 2]])
elif n % 2 == 1:
p = np.arange(1, n+1)
return n*np.mod(p[:, None] + p - (n+3)//2, n) + np.mod(p[:, None] + 2*p-2, n) + 1
elif n % 4 == 0:
j = np.mod(np.arange(1, n+1), 4) // 2
k = j[:, None] == j
m = np.arange(1, n*n+1, n)[:, None] + np.arange(n)
m[k] = n*n + 1 - m[k]
else:
p = n//2
m = magic(p)
m = np.block([[m, m+2*p*p], [m+3*p*p, m+p*p]])
i = np.arange(p)
k = (n-2)//4
j = np.concatenate((np.arange(k), np.arange(n-k+1, n)))
m[np.ix_(np.concatenate((i, i+p)), j)] = m[np.ix_( | np.concatenate((i+p, i)) | numpy.concatenate |
import lmfit
import numpy as np
from numpy.linalg import inv
import scipy as sp
import itertools
import matplotlib as mpl
from collections import OrderedDict, defaultdict
from pycqed.utilities import timer as tm_mod
from sklearn.mixture import GaussianMixture as GM
from sklearn.tree import DecisionTreeClassifier as DTC
from pycqed.analysis import fitting_models as fit_mods
from pycqed.analysis import analysis_toolbox as a_tools
import pycqed.analysis_v2.base_analysis as ba
import pycqed.analysis_v2.readout_analysis as roa
from pycqed.analysis_v2.readout_analysis import \
Singleshot_Readout_Analysis_Qutrit as SSROQutrit
import pycqed.analysis_v2.tomography_qudev as tomo
from pycqed.analysis.tools.plotting import SI_val_to_msg_str
from copy import deepcopy
from pycqed.measurement.sweep_points import SweepPoints
from pycqed.measurement.calibration.calibration_points import CalibrationPoints
import matplotlib.pyplot as plt
from pycqed.analysis.three_state_rotation import predict_proba_avg_ro
import logging
from pycqed.utilities import math
from pycqed.utilities.general import find_symmetry_index
import pycqed.measurement.waveform_control.segment as seg_mod
import datetime as dt
log = logging.getLogger(__name__)
try:
import qutip as qtp
except ImportError as e:
log.warning('Could not import qutip, tomography code will not work')
class AveragedTimedomainAnalysis(ba.BaseDataAnalysis):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.single_timestamp = True
self.params_dict = {
'value_names': 'value_names',
'measured_values': 'measured_values',
'measurementstring': 'measurementstring',
'exp_metadata': 'exp_metadata'}
self.numeric_params = []
if kwargs.get('auto', True):
self.run_analysis()
def process_data(self):
self.metadata = self.raw_data_dict.get('exp_metadata', {})
if self.metadata is None:
self.metadata = {}
cal_points = self.metadata.get('cal_points', None)
cal_points = self.options_dict.get('cal_points', cal_points)
cal_points_list = roa.convert_channel_names_to_index(
cal_points, len(self.raw_data_dict['measured_values'][0]),
self.raw_data_dict['value_names'])
self.proc_data_dict['cal_points_list'] = cal_points_list
measured_values = self.raw_data_dict['measured_values']
cal_idxs = self._find_calibration_indices()
scales = [np.std(x[cal_idxs]) for x in measured_values]
observable_vectors = np.zeros((len(cal_points_list),
len(measured_values)))
observable_vector_stds = np.ones_like(observable_vectors)
for i, observable in enumerate(cal_points_list):
for ch_idx, seg_idxs in enumerate(observable):
x = measured_values[ch_idx][seg_idxs] / scales[ch_idx]
if len(x) > 0:
observable_vectors[i][ch_idx] = np.mean(x)
if len(x) > 1:
observable_vector_stds[i][ch_idx] = np.std(x)
Omtx = (observable_vectors[1:] - observable_vectors[0]).T
d0 = observable_vectors[0]
corr_values = np.zeros(
(len(cal_points_list) - 1, len(measured_values[0])))
for i in range(len(measured_values[0])):
d = np.array([x[i] / scale for x, scale in zip(measured_values,
scales)])
corr_values[:, i] = inv(Omtx.T.dot(Omtx)).dot(Omtx.T).dot(d - d0)
self.proc_data_dict['corr_values'] = corr_values
def measurement_operators_and_results(self):
"""
Converts the calibration points to measurement operators. Assumes that
the calibration points are ordered the same as the basis states for
the tomography calculation (e.g. for two qubits |gg>, |ge>, |eg>, |ee>).
Also assumes that each calibration in the passed cal_points uses
different segments.
Returns:
A tuple of
the measured values with outthe calibration points;
the measurement operators corresponding to each channel;
and the expected covariation matrix between the operators.
"""
d = len(self.proc_data_dict['cal_points_list'])
cal_point_idxs = [set() for _ in range(d)]
for i, idxs_lists in enumerate(self.proc_data_dict['cal_points_list']):
for idxs in idxs_lists:
cal_point_idxs[i].update(idxs)
cal_point_idxs = [sorted(list(idxs)) for idxs in cal_point_idxs]
cal_point_idxs = np.array(cal_point_idxs)
raw_data = self.raw_data_dict['measured_values']
means = [None] * d
residuals = [list() for _ in raw_data]
for i, cal_point_idx in enumerate(cal_point_idxs):
means[i] = [np.mean(ch_data[cal_point_idx]) for ch_data in raw_data]
for j, ch_residuals in enumerate(residuals):
ch_residuals += list(raw_data[j][cal_point_idx] - means[i][j])
means = np.array(means)
residuals = np.array(residuals)
Fs = [np.diag(ms) for ms in means.T]
Omega = residuals.dot(residuals.T) / len(residuals.T)
data_idxs = np.setdiff1d(np.arange(len(raw_data[0])),
cal_point_idxs.flatten())
data = np.array([ch_data[data_idxs] for ch_data in raw_data])
return data, Fs, Omega
def _find_calibration_indices(self):
cal_indices = set()
cal_points = self.options_dict['cal_points']
nr_segments = self.raw_data_dict['measured_values'].shape[-1]
for observable in cal_points:
if isinstance(observable, (list, np.ndarray)):
for idxs in observable:
cal_indices.update({idx % nr_segments for idx in idxs})
else: # assume dictionaries
for idxs in observable.values():
cal_indices.update({idx % nr_segments for idx in idxs})
return list(cal_indices)
def all_cal_points(d, nr_ch, reps=1):
"""
Generates a list of calibration points for a Hilbert space of dimension d,
with nr_ch channels and reps reprtitions of each calibration point.
"""
return [[list(range(-reps*i, -reps*(i-1)))]*nr_ch for i in range(d, 0, -1)]
class Single_Qubit_TimeDomainAnalysis(ba.BaseDataAnalysis):
def process_data(self):
"""
This takes care of rotating and normalizing the data if required.
this should work for several input types.
- I/Q values (2 quadratures + cal points)
- weight functions (1 quadrature + cal points)
- counts (no cal points)
There are several options possible to specify the normalization
using the options dict.
cal_points (tuple) of indices of the calibrati on points
zero_coord, one_coord
"""
cal_points = self.options_dict.get('cal_points', None)
zero_coord = self.options_dict.get('zero_coord', None)
one_coord = self.options_dict.get('one_coord', None)
if cal_points is None:
# default for all standard Timedomain experiments
cal_points = [list(range(-4, -2)), list(range(-2, 0))]
if len(self.raw_data_dict['measured_values']) == 1:
# if only one weight function is used rotation is not required
self.proc_data_dict['corr_data'] = a_tools.rotate_and_normalize_data_1ch(
self.raw_data_dict['measured_values'][0],
cal_zero_points=cal_points[0],
cal_one_points=cal_points[1])
else:
self.proc_data_dict['corr_data'], zero_coord, one_coord = \
a_tools.rotate_and_normalize_data(
data=self.raw_data_dict['measured_values'][0:2],
zero_coord=zero_coord,
one_coord=one_coord,
cal_zero_points=cal_points[0],
cal_one_points=cal_points[1])
# This should be added to the hdf5 datafile but cannot because of the
# way that the "new" analysis works.
# self.add_dataset_to_analysisgroup('Corrected data',
# self.proc_data_dict['corr_data'])
class MultiQubit_TimeDomain_Analysis(ba.BaseDataAnalysis):
"""
Base class for multi-qubit time-domain analyses.
Parameters that can be specified in the options dict:
- rotation_type: type of rotation to be done on the raw data.
Types of rotations supported by this class:
- 'cal_states' (default, no need to specify): rotation based on
CalibrationPoints for 1D and TwoD data. Supports 2 and 3 cal states
per qubit
- 'fixed_cal_points' (only for TwoD, with 2 cal states):
does PCA on the columns corresponding to the highest cal state
to find the indices of that cal state in the columns, then uses
those to get the data points for the other cal state. Does
rotation using the mean of the data points corresponding to the
two cal states as the zero and one coordinates to rotate
the data.
- 'PCA': ignores cal points and does pca; in the case of TwoD data it
does PCA row by row
- 'column_PCA': cal points and does pca; in the case of TwoD data it
does PCA column by column
- 'global_PCA' (only for TwoD): does PCA on the whole 2D array
- main_sp (default: None): dict with keys qb_name used to specify which
sweep parameter should be used as axis label in plot
- functionality to split measurements with tiled sweep_points:
- split_params (default: None): list of strings with sweep parameters
names expected to be found in SweepPoints. Groups data by these
parameters and stores it in proc_data_dict['split_data_dict'].
- select_split (default: None): dict with keys qb_names and values
a tuple (sweep_param_name, value) or (sweep_param_name, index).
Stored in self.measurement_strings which specify the plot title.
The selected parameter must also be part of the split_params for
that qubit.
"""
def __init__(self,
qb_names: list=None, label: str='',
t_start: str=None, t_stop: str=None, data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True,
params_dict=None, numeric_params=None, **kwargs):
super().__init__(t_start=t_start, t_stop=t_stop, label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only,
do_fitting=do_fitting, **kwargs)
self.qb_names = qb_names
self.params_dict = params_dict
if self.params_dict is None:
self.params_dict = {}
self.numeric_params = numeric_params
self.measurement_strings = {}
if self.numeric_params is None:
self.numeric_params = []
if not hasattr(self, "job"):
self.create_job(qb_names=qb_names, t_start=t_start, t_stop=t_stop,
label=label, data_file_path=data_file_path,
do_fitting=do_fitting, options_dict=options_dict,
extract_only=extract_only, params_dict=params_dict,
numeric_params=numeric_params, **kwargs)
if auto:
self.run_analysis()
def extract_data(self):
super().extract_data()
if self.qb_names is None:
self.qb_names = self.get_param_value('ro_qubits')
if self.qb_names is None:
raise ValueError('Provide the "qb_names."')
self.measurement_strings = {
qbn: self.raw_data_dict['measurementstring'] for qbn in
self.qb_names}
self.channel_map = self.get_param_value('meas_obj_value_names_map')
if self.channel_map is None:
# if the new name meas_obj_value_names_map is not found, try with
# the old name channel_map
self.channel_map = self.get_param_value('channel_map')
if self.channel_map is None:
value_names = self.raw_data_dict['value_names']
if np.ndim(value_names) > 0:
value_names = value_names
if 'w' in value_names[0]:
self.channel_map = a_tools.get_qb_channel_map_from_hdf(
self.qb_names, value_names=value_names,
file_path=self.raw_data_dict['folder'])
else:
self.channel_map = {}
for qbn in self.qb_names:
self.channel_map[qbn] = value_names
if len(self.channel_map) == 0:
raise ValueError('No qubit RO channels have been found.')
# creates self.sp
self.get_sweep_points()
def get_sweep_points(self):
self.sp = self.get_param_value('sweep_points')
if self.sp is not None:
self.sp = SweepPoints(self.sp)
def create_sweep_points_dict(self):
sweep_points_dict = self.get_param_value('sweep_points_dict')
hard_sweep_params = self.get_param_value('hard_sweep_params')
if self.sp is not None:
self.mospm = self.get_param_value('meas_obj_sweep_points_map')
main_sp = self.get_param_value('main_sp')
if self.mospm is None:
raise ValueError('When providing "sweep_points", '
'"meas_obj_sweep_points_map" has to be '
'provided in addition.')
if main_sp is not None:
self.proc_data_dict['sweep_points_dict'] = {}
for qbn, p in main_sp.items():
dim = self.sp.find_parameter(p)
if dim == 1:
log.warning(f"main_sp is only implemented for sweep "
f"dimension 0, but {p} is in dimension 1.")
self.proc_data_dict['sweep_points_dict'][qbn] = \
{'sweep_points': self.sp.get_sweep_params_property(
'values', dim, p)}
else:
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': self.sp.get_sweep_params_property(
'values', 0, self.mospm[qbn])[0]}
for qbn in self.qb_names}
elif sweep_points_dict is not None:
# assumed to be of the form {qbn1: swpts_array1, qbn2: swpts_array2}
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': sweep_points_dict[qbn]}
for qbn in self.qb_names}
elif hard_sweep_params is not None:
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': list(hard_sweep_params.values())[0][
'values']} for qbn in self.qb_names}
else:
self.proc_data_dict['sweep_points_dict'] = \
{qbn: {'sweep_points': self.data_filter(
self.raw_data_dict['hard_sweep_points'])}
for qbn in self.qb_names}
def create_sweep_points_2D_dict(self):
soft_sweep_params = self.get_param_value('soft_sweep_params')
if self.sp is not None:
self.proc_data_dict['sweep_points_2D_dict'] = OrderedDict()
for qbn in self.qb_names:
self.proc_data_dict['sweep_points_2D_dict'][qbn] = \
OrderedDict()
for pn in self.mospm[qbn]:
if pn in self.sp[1]:
self.proc_data_dict['sweep_points_2D_dict'][qbn][
pn] = self.sp[1][pn][0]
elif soft_sweep_params is not None:
self.proc_data_dict['sweep_points_2D_dict'] = \
{qbn: {pn: soft_sweep_params[pn]['values'] for
pn in soft_sweep_params}
for qbn in self.qb_names}
else:
if len(self.raw_data_dict['soft_sweep_points'].shape) == 1:
self.proc_data_dict['sweep_points_2D_dict'] = \
{qbn: {self.raw_data_dict['sweep_parameter_names'][1]:
self.raw_data_dict['soft_sweep_points']} for
qbn in self.qb_names}
else:
sspn = self.raw_data_dict['sweep_parameter_names'][1:]
self.proc_data_dict['sweep_points_2D_dict'] = \
{qbn: {sspn[i]: self.raw_data_dict['soft_sweep_points'][i]
for i in range(len(sspn))} for qbn in self.qb_names}
if self.get_param_value('percentage_done', 100) < 100:
# This indicated an interrupted measurement.
# Remove non-measured sweep points in that case.
# raw_data_dict['soft_sweep_points'] is obtained in
# BaseDataAnalysis.add_measured_data(), and its length should
# always correspond to the actual number of measured soft sweep
# points.
ssl = len(self.raw_data_dict['soft_sweep_points'])
for sps in self.proc_data_dict['sweep_points_2D_dict'].values():
for k, v in sps.items():
sps[k] = v[:ssl]
def create_meas_results_per_qb(self):
measured_RO_channels = list(self.raw_data_dict['measured_data'])
meas_results_per_qb_raw = {}
meas_results_per_qb = {}
for qb_name, RO_channels in self.channel_map.items():
meas_results_per_qb_raw[qb_name] = {}
meas_results_per_qb[qb_name] = {}
if isinstance(RO_channels, str):
meas_ROs_per_qb = [RO_ch for RO_ch in measured_RO_channels
if RO_channels in RO_ch]
for meas_RO in meas_ROs_per_qb:
meas_results_per_qb_raw[qb_name][meas_RO] = \
self.raw_data_dict[
'measured_data'][meas_RO]
meas_results_per_qb[qb_name][meas_RO] = \
self.data_filter(
meas_results_per_qb_raw[qb_name][meas_RO])
elif isinstance(RO_channels, list):
for qb_RO_ch in RO_channels:
meas_ROs_per_qb = [RO_ch for RO_ch in measured_RO_channels
if qb_RO_ch in RO_ch]
for meas_RO in meas_ROs_per_qb:
meas_results_per_qb_raw[qb_name][meas_RO] = \
self.raw_data_dict[
'measured_data'][meas_RO]
meas_results_per_qb[qb_name][meas_RO] = \
self.data_filter(
meas_results_per_qb_raw[qb_name][meas_RO])
else:
raise TypeError('The RO channels for {} must either be a list '
'or a string.'.format(qb_name))
self.proc_data_dict['meas_results_per_qb_raw'] = \
meas_results_per_qb_raw
self.proc_data_dict['meas_results_per_qb'] = \
meas_results_per_qb
def process_data(self):
super().process_data()
self.data_filter = self.get_param_value('data_filter')
prep_params = self.get_param_value('preparation_params',
default_value=dict())
self.data_with_reset = False
if self.data_filter is None:
if 'active' in prep_params.get('preparation_type', 'wait'):
reset_reps = prep_params.get('reset_reps', 1)
self.data_filter = lambda x: x[reset_reps::reset_reps+1]
self.data_with_reset = True
elif "preselection" in prep_params.get('preparation_type', 'wait'):
self.data_filter = lambda x: x[1::2] # filter preselection RO
if self.data_filter is None:
self.data_filter = lambda x: x
self.create_sweep_points_dict()
self.create_meas_results_per_qb()
# temporary fix for appending calibration points to x values but
# without breaking sequences not yet using this interface.
self.rotate = self.get_param_value('rotate', default_value=False)
cal_points = self.get_param_value('cal_points')
last_ge_pulses = self.get_param_value('last_ge_pulses',
default_value=False)
try:
self.cp = CalibrationPoints.from_string(cal_points)
# for now assuming the same for all qubits.
self.cal_states_dict = self.cp.get_indices(
self.qb_names, prep_params)[self.qb_names[0]]
cal_states_rots = self.cp.get_rotations(last_ge_pulses,
self.qb_names[0])[self.qb_names[0]] if self.rotate \
else None
self.cal_states_rotations = self.get_param_value(
'cal_states_rotations', default_value=cal_states_rots)
sweep_points_w_calpts = \
{qbn: {'sweep_points': self.cp.extend_sweep_points(
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'], qbn)} for qbn in self.qb_names}
self.proc_data_dict['sweep_points_dict'] = sweep_points_w_calpts
except TypeError as e:
log.error(e)
log.warning("Failed retrieving cal point objects or states. "
"Please update measurement to provide cal point object "
"in metadata. Trying to get them using the old way ...")
self.cal_states_rotations = self.get_param_value(
'cal_states_rotations', default_value=None) \
if self.rotate else None
self.cal_states_dict = self.get_param_value('cal_states_dict',
default_value={})
if self.get_param_value('global_PCA') is not None:
log.warning('Parameter "global_PCA" is deprecated. Please set '
'rotation_type="global_PCA" instead.')
self.rotation_type = self.get_param_value(
'rotation_type',
default_value='cal_states' if self.rotate else 'no_rotation')
# create projected_data_dict
self.data_to_fit = deepcopy(self.get_param_value('data_to_fit'))
if self.data_to_fit is None:
# If we have cal points, but data_to_fit is not specified,
# choose a reasonable default value. In cases with only two cal
# points, this decides which projected plot is generated. (In
# cases with three cal points, we will anyways get all three
# projected plots.)
if 'e' in self.cal_states_dict.keys():
self.data_to_fit = {qbn: 'pe' for qbn in self.qb_names}
elif 'g' in self.cal_states_dict.keys():
self.data_to_fit = {qbn: 'pg' for qbn in self.qb_names}
else:
self.data_to_fit = {}
# TODO: Steph 15.09.2020
# This is a hack to allow list inside data_to_fit. These lists are
# currently only supported by MultiCZgate_CalibAnalysis
for qbn in self.data_to_fit:
if isinstance(self.data_to_fit[qbn], (list, tuple)):
self.data_to_fit[qbn] = self.data_to_fit[qbn][0]
if self.rotate or self.rotation_type == 'global_PCA':
self.cal_states_analysis()
else:
# this assumes data obtained with classifier detector!
# ie pg, pe, pf are expected to be in the value_names
self.proc_data_dict['projected_data_dict'] = OrderedDict()
for qbn, data_dict in self.proc_data_dict[
'meas_results_per_qb'].items():
self.proc_data_dict['projected_data_dict'][qbn] = OrderedDict()
for state_prob in ['pg', 'pe', 'pf']:
self.proc_data_dict['projected_data_dict'][qbn].update(
{state_prob: data for key, data in data_dict.items()
if state_prob in key})
if self.cal_states_dict is None:
self.cal_states_dict = {}
self.num_cal_points = np.array(list(
self.cal_states_dict.values())).flatten().size
# correct probabilities given calibration matrix
if self.get_param_value("correction_matrix") is not None:
self.proc_data_dict['projected_data_dict_corrected'] = OrderedDict()
for qbn, data_dict in self.proc_data_dict[
'meas_results_per_qb'].items():
self.proc_data_dict['projected_data_dict'][qbn] = OrderedDict()
probas_raw = np.asarray([data_dict[k] for k in data_dict
for state_prob in ['pg', 'pe', 'pf'] if
state_prob in k])
corr_mtx = self.get_param_value("correction_matrix")[qbn]
probas_corrected = np.linalg.inv(corr_mtx).T @ probas_raw
for state_prob in ['pg', 'pe', 'pf']:
self.proc_data_dict['projected_data_dict_corrected'][qbn].update(
{state_prob: data for key, data in
zip(["pg", "pe", "pf"], probas_corrected)})
# get data_to_fit
self.proc_data_dict['data_to_fit'] = OrderedDict()
for qbn, prob_data in self.proc_data_dict[
'projected_data_dict'].items():
if qbn in self.data_to_fit:
self.proc_data_dict['data_to_fit'][qbn] = prob_data[
self.data_to_fit[qbn]]
# create msmt_sweep_points, sweep_points, cal_points_sweep_points
for qbn in self.qb_names:
if self.num_cal_points > 0:
self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'] = \
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][:-self.num_cal_points]
self.proc_data_dict['sweep_points_dict'][qbn][
'cal_points_sweep_points'] = \
self.proc_data_dict['sweep_points_dict'][qbn][
'sweep_points'][-self.num_cal_points::]
else:
self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points'] = self.proc_data_dict[
'sweep_points_dict'][qbn]['sweep_points']
self.proc_data_dict['sweep_points_dict'][qbn][
'cal_points_sweep_points'] = []
if self.options_dict.get('TwoD', False):
self.create_sweep_points_2D_dict()
# handle data splitting if needed
self.split_data()
def split_data(self):
def unique(l):
try:
return np.unique(l, return_inverse=True)
except Exception:
h = [repr(a) for a in l]
_, i, j = np.unique(h, return_index=True, return_inverse=True)
return l[i], j
split_params = self.get_param_value('split_params', [])
if not len(split_params):
return
pdd = self.proc_data_dict
pdd['split_data_dict'] = {}
for qbn in self.qb_names:
pdd['split_data_dict'][qbn] = {}
for p in split_params:
dim = self.sp.find_parameter(p)
sv = self.sp.get_sweep_params_property(
'values', param_names=p, dimension=dim)
usp, ind = unique(sv)
if len(usp) <= 1:
continue
svs = [self.sp.subset(ind == i, dim) for i in
range(len(usp))]
[s.remove_sweep_parameter(p) for s in svs]
sdd = {}
pdd['split_data_dict'][qbn][p] = sdd
for i in range(len(usp)):
subset = (np.concatenate(
[ind == i,
[True] * len(pdd['sweep_points_dict'][qbn][
'cal_points_sweep_points'])]))
sdd[i] = {}
sdd[i]['value'] = usp[i]
sdd[i]['sweep_points'] = svs[i]
d = pdd['sweep_points_dict'][qbn]
if dim == 0:
sdd[i]['sweep_points_dict'] = {
'sweep_points': d['sweep_points'][subset],
'msmt_sweep_points':
d['msmt_sweep_points'][ind == i],
'cal_points_sweep_points':
d['cal_points_sweep_points'],
}
sdd[i]['sweep_points_2D_dict'] = pdd[
'sweep_points_2D_dict'][qbn]
else:
sdd[i]['sweep_points_dict'] = \
pdd['sweep_points_dict'][qbn]
sdd[i]['sweep_points_2D_dict'] = {
k: v[ind == i] for k, v in pdd[
'sweep_points_2D_dict'][qbn].items()}
for d in ['projected_data_dict', 'data_to_fit']:
if isinstance(pdd[d][qbn], dict):
if dim == 0:
sdd[i][d] = {k: v[:, subset] for
k, v in pdd[d][qbn].items()}
else:
sdd[i][d] = {k: v[ind == i, :] for
k, v in pdd[d][qbn].items()}
else:
if dim == 0:
sdd[i][d] = pdd[d][qbn][:, subset]
else:
sdd[i][d] = pdd[d][qbn][ind == i, :]
select_split = self.get_param_value('select_split')
if select_split is not None:
for qbn, select in select_split.items():
p, v = select
if p not in pdd['split_data_dict'][qbn]:
log.warning(f"Split parameter {p} for {qbn} not "
f"found. Ignoring this selection.")
try:
ind = [a['value'] for a in pdd['split_data_dict'][
qbn][p].values()].index(v)
except ValueError:
ind = v
try:
pdd['split_data_dict'][qbn][p][ind]
except ValueError:
log.warning(f"Value {v} for split parameter {p} "
f"of {qbn} not found. Ignoring this "
f"selection.")
continue
for d in ['projected_data_dict', 'data_to_fit',
'sweep_points_dict', 'sweep_points_2D_dict']:
pdd[d][qbn] = pdd['split_data_dict'][qbn][p][ind][d]
self.measurement_strings[qbn] += f' ({p}: {v})'
def get_cal_data_points(self):
self.num_cal_points = np.array(list(
self.cal_states_dict.values())).flatten().size
do_PCA = self.rotation_type == 'PCA' or \
self.rotation_type == 'column_PCA'
self.cal_states_dict_for_rotation = OrderedDict()
states = False
cal_states_rotations = self.cal_states_rotations
for key in cal_states_rotations.keys():
if key == 'g' or key == 'e' or key == 'f':
states = True
for qbn in self.qb_names:
self.cal_states_dict_for_rotation[qbn] = OrderedDict()
if states:
cal_states_rot_qb = cal_states_rotations
else:
cal_states_rot_qb = cal_states_rotations[qbn]
for i in range(len(cal_states_rot_qb)):
cal_state = \
[k for k, idx in cal_states_rot_qb.items()
if idx == i][0]
self.cal_states_dict_for_rotation[qbn][cal_state] = \
None if do_PCA and self.num_cal_points != 3 else \
self.cal_states_dict[cal_state]
def cal_states_analysis(self):
self.get_cal_data_points()
self.proc_data_dict['projected_data_dict'] = OrderedDict(
{qbn: '' for qbn in self.qb_names})
for qbn in self.qb_names:
cal_states_dict = self.cal_states_dict_for_rotation[qbn]
if len(cal_states_dict) not in [0, 2, 3]:
raise NotImplementedError('Calibration states rotation is '
'currently only implemented for 0, '
'2, or 3 cal states per qubit.')
data_mostly_g = self.get_param_value('data_mostly_g',
default_value=True)
if self.get_param_value('TwoD', default_value=False):
if self.rotation_type == 'global_PCA':
self.proc_data_dict['projected_data_dict'].update(
self.global_pca_TwoD(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.data_to_fit,
data_mostly_g=data_mostly_g))
elif len(cal_states_dict) == 3:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data_3_cal_states_TwoD(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map,
self.cal_states_dict_for_rotation))
elif self.rotation_type == 'fixed_cal_points':
rotated_data_dict, zero_coord, one_coord = \
self.rotate_data_TwoD_same_fixed_cal_idxs(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.cal_states_dict_for_rotation,
self.data_to_fit)
self.proc_data_dict['projected_data_dict'].update(
rotated_data_dict)
self.proc_data_dict['rotation_coordinates'] = \
[zero_coord, one_coord]
else:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data_TwoD(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.cal_states_dict_for_rotation,
self.data_to_fit, data_mostly_g=data_mostly_g,
column_PCA=self.rotation_type == 'column_PCA'))
else:
if len(cal_states_dict) == 3:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data_3_cal_states(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map,
self.cal_states_dict_for_rotation))
else:
self.proc_data_dict['projected_data_dict'].update(
self.rotate_data(
qbn, self.proc_data_dict['meas_results_per_qb'],
self.channel_map, self.cal_states_dict_for_rotation,
self.data_to_fit, data_mostly_g=data_mostly_g))
@staticmethod
def rotate_data_3_cal_states(qb_name, meas_results_per_qb, channel_map,
cal_states_dict):
# FOR 3 CAL STATES
rotated_data_dict = OrderedDict()
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict[qb_name] = OrderedDict()
cal_pts_idxs = list(cal_states_dict[qb_name].values())
cal_points_data = np.zeros((len(cal_pts_idxs), 2))
if list(meas_res_dict) == channel_map[qb_name]:
raw_data = np.array([v for v in meas_res_dict.values()]).T
for i, cal_idx in enumerate(cal_pts_idxs):
cal_points_data[i, :] = np.mean(raw_data[cal_idx, :],
axis=0)
rotated_data = predict_proba_avg_ro(raw_data, cal_points_data)
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'] = rotated_data[:, i]
else:
raise NotImplementedError('Calibration states rotation with 3 '
'cal states only implemented for '
'2 readout channels per qubit.')
return rotated_data_dict
@staticmethod
def rotate_data(qb_name, meas_results_per_qb, channel_map,
cal_states_dict, data_to_fit, data_mostly_g=True):
# ONLY WORKS FOR 2 CAL STATES
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict = OrderedDict()
if len(cal_states_dict[qb_name]) == 0:
cal_zero_points = None
cal_one_points = None
else:
cal_zero_points = list(cal_states_dict[qb_name].values())[0]
cal_one_points = list(cal_states_dict[qb_name].values())[1]
rotated_data_dict[qb_name] = OrderedDict()
if len(meas_res_dict) == 1:
# one RO channel per qubit
if cal_zero_points is None and cal_one_points is None:
data = meas_res_dict[list(meas_res_dict)[0]]
data = (data - np.min(data))/(np.max(data) - np.min(data))
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]] = data
else:
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
a_tools.rotate_and_normalize_data_1ch(
data=meas_res_dict[list(meas_res_dict)[0]],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
elif list(meas_res_dict) == channel_map[qb_name]:
# two RO channels per qubit
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=np.array([v for v in meas_res_dict.values()]),
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]] = data
else:
# multiple readouts per qubit per channel
if isinstance(channel_map[qb_name], str):
qb_ro_ch0 = channel_map[qb_name]
else:
qb_ro_ch0 = channel_map[qb_name][0]
ro_suffixes = [s[len(qb_ro_ch0)+1::] for s in
list(meas_res_dict) if qb_ro_ch0 in s]
for i, ro_suf in enumerate(ro_suffixes):
if len(ro_suffixes) == len(meas_res_dict):
# one RO ch per qubit
if cal_zero_points is None and cal_one_points is None:
data = meas_res_dict[list(meas_res_dict)[i]]
data = (data - np.min(data))/(np.max(data) - np.min(data))
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf] = data
else:
rotated_data_dict[qb_name][ro_suf] = \
a_tools.rotate_and_normalize_data_1ch(
data=meas_res_dict[list(meas_res_dict)[i]],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
else:
# two RO ch per qubit
keys = [k for k in meas_res_dict if ro_suf in k]
correct_keys = [k for k in keys
if k[len(qb_ro_ch0)+1::] == ro_suf]
data_array = np.array([meas_res_dict[k]
for k in correct_keys])
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf] = data
return rotated_data_dict
@staticmethod
def rotate_data_3_cal_states_TwoD(qb_name, meas_results_per_qb,
channel_map, cal_states_dict):
# FOR 3 CAL STATES
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict = OrderedDict()
rotated_data_dict[qb_name] = OrderedDict()
cal_pts_idxs = list(cal_states_dict[qb_name].values())
cal_points_data = np.zeros((len(cal_pts_idxs), 2))
if list(meas_res_dict) == channel_map[qb_name]:
# two RO channels per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'] = np.zeros(
raw_data_arr.shape)
for col in range(raw_data_arr.shape[1]):
raw_data = np.concatenate([
v[:, col].reshape(len(v[:, col]), 1) for
v in meas_res_dict.values()], axis=1)
for i, cal_idx in enumerate(cal_pts_idxs):
cal_points_data[i, :] = np.mean(raw_data[cal_idx, :],
axis=0)
# rotated data is (raw_data_arr.shape[0], 3)
rotated_data = predict_proba_avg_ro(
raw_data, cal_points_data)
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'][:, col] = \
rotated_data[:, i]
else:
raise NotImplementedError('Calibration states rotation with 3 '
'cal states only implemented for '
'2 readout channels per qubit.')
# transpose data
for i, state in enumerate(list(cal_states_dict[qb_name])):
rotated_data_dict[qb_name][f'p{state}'] = \
rotated_data_dict[qb_name][f'p{state}'].T
return rotated_data_dict
@staticmethod
def global_pca_TwoD(qb_name, meas_results_per_qb, channel_map,
data_to_fit, data_mostly_g=True):
meas_res_dict = meas_results_per_qb[qb_name]
if list(meas_res_dict) != channel_map[qb_name]:
raise NotImplementedError('Global PCA is only implemented '
'for two-channel RO!')
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rotated_data_dict = OrderedDict({qb_name: OrderedDict()})
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
deepcopy(raw_data_arr.transpose())
data_array = np.array(
[v.T.flatten() for v in meas_res_dict.values()])
rot_flat_data, _, _ = \
a_tools.rotate_and_normalize_data_IQ(
data=data_array)
data = np.reshape(rot_flat_data, raw_data_arr.T.shape)
data = a_tools.set_majority_sign(data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]] = data
return rotated_data_dict
@staticmethod
def rotate_data_TwoD(qb_name, meas_results_per_qb, channel_map,
cal_states_dict, data_to_fit,
column_PCA=False, data_mostly_g=True):
meas_res_dict = meas_results_per_qb[qb_name]
rotated_data_dict = OrderedDict()
if len(cal_states_dict[qb_name]) == 0:
cal_zero_points = None
cal_one_points = None
else:
cal_zero_points = list(cal_states_dict[qb_name].values())[0]
cal_one_points = list(cal_states_dict[qb_name].values())[1]
rotated_data_dict[qb_name] = OrderedDict()
if len(meas_res_dict) == 1:
# one RO channel per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
deepcopy(raw_data_arr.transpose())
if column_PCA:
for row in range(raw_data_arr.shape[0]):
data = a_tools.rotate_and_normalize_data_1ch(
data=raw_data_arr[row, :],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]][
:, row] = data
else:
for col in range(raw_data_arr.shape[1]):
data = a_tools.rotate_and_normalize_data_1ch(
data=raw_data_arr[:, col],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]][col] = data
elif list(meas_res_dict) == channel_map[qb_name]:
# two RO channels per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
deepcopy(raw_data_arr.transpose())
if column_PCA:
for row in range(raw_data_arr.shape[0]):
data_array = np.array(
[v[row, :] for v in meas_res_dict.values()])
data, _, _ = \
a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][data_to_fit[qb_name]][
:, row] = data
else:
for col in range(raw_data_arr.shape[1]):
data_array = np.array(
[v[:, col] for v in meas_res_dict.values()])
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][
data_to_fit[qb_name]][col] = data
else:
# multiple readouts per qubit per channel
if isinstance(channel_map[qb_name], str):
qb_ro_ch0 = channel_map[qb_name]
else:
qb_ro_ch0 = channel_map[qb_name][0]
ro_suffixes = [s[len(qb_ro_ch0)+1::] for s in
list(meas_res_dict) if qb_ro_ch0 in s]
for i, ro_suf in enumerate(ro_suffixes):
if len(ro_suffixes) == len(meas_res_dict):
# one RO ch per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[i]]
rotated_data_dict[qb_name][ro_suf] = \
deepcopy(raw_data_arr.transpose())
for col in range(raw_data_arr.shape[1]):
data = a_tools.rotate_and_normalize_data_1ch(
data=raw_data_arr[:, col],
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf][col] = data
else:
# two RO ch per qubit
raw_data_arr = meas_res_dict[list(meas_res_dict)[i]]
rotated_data_dict[qb_name][ro_suf] = \
deepcopy(raw_data_arr.transpose())
for col in range(raw_data_arr.shape[1]):
data_array = np.array(
[v[:, col] for k, v in meas_res_dict.items()
if ro_suf in k])
data, _, _ = a_tools.rotate_and_normalize_data_IQ(
data=data_array,
cal_zero_points=cal_zero_points,
cal_one_points=cal_one_points)
if cal_zero_points is None:
data = a_tools.set_majority_sign(
data, -1 if data_mostly_g else 1)
rotated_data_dict[qb_name][ro_suf][col] = data
return rotated_data_dict
@staticmethod
def rotate_data_TwoD_same_fixed_cal_idxs(qb_name, meas_results_per_qb,
channel_map, cal_states_dict,
data_to_fit):
meas_res_dict = meas_results_per_qb[qb_name]
if list(meas_res_dict) != channel_map[qb_name]:
raise NotImplementedError('rotate_data_TwoD_same_fixed_cal_idxs '
'only implemented for two-channel RO!')
if len(cal_states_dict[qb_name]) == 0:
cal_zero_points = None
cal_one_points = None
else:
cal_zero_points = list(cal_states_dict[qb_name].values())[0]
cal_one_points = list(cal_states_dict[qb_name].values())[1]
# do pca on the one cal states
raw_data_arr = meas_res_dict[list(meas_res_dict)[0]]
rot_dat_e = np.zeros(raw_data_arr.shape[1])
for row in cal_one_points:
rot_dat_e += a_tools.rotate_and_normalize_data_IQ(
data=np.array([v[row, :] for v in meas_res_dict.values()]),
cal_zero_points=None, cal_one_points=None)[0]
rot_dat_e /= len(cal_one_points)
# find the values of the zero and one cal points
col_idx = np.argmax(np.abs(rot_dat_e))
zero_coord = [np.mean([v[r, col_idx] for r in cal_zero_points])
for v in meas_res_dict.values()]
one_coord = [np.mean([v[r, col_idx] for r in cal_one_points])
for v in meas_res_dict.values()]
# rotate all data based on the fixed zero_coord and one_coord
rotated_data_dict = OrderedDict({qb_name: OrderedDict()})
rotated_data_dict[qb_name][data_to_fit[qb_name]] = \
deepcopy(raw_data_arr.transpose())
for col in range(raw_data_arr.shape[1]):
data_array = np.array(
[v[:, col] for v in meas_res_dict.values()])
rotated_data_dict[qb_name][
data_to_fit[qb_name]][col], _, _ = \
a_tools.rotate_and_normalize_data_IQ(
data=data_array,
zero_coord=zero_coord,
one_coord=one_coord)
return rotated_data_dict, zero_coord, one_coord
def get_xaxis_label_unit(self, qb_name):
hard_sweep_params = self.get_param_value('hard_sweep_params')
sweep_name = self.get_param_value('sweep_name')
sweep_unit = self.get_param_value('sweep_unit')
if self.sp is not None:
main_sp = self.get_param_value('main_sp', None)
if main_sp is not None and qb_name in main_sp:
param_names = [main_sp[qb_name]]
else:
param_names = self.mospm[qb_name]
_, xunit, xlabel = self.sp.get_sweep_params_description(
param_names=param_names, dimension=0)[0]
elif hard_sweep_params is not None:
xlabel = list(hard_sweep_params)[0]
xunit = list(hard_sweep_params.values())[0][
'unit']
elif (sweep_name is not None) and (sweep_unit is not None):
xlabel = sweep_name
xunit = sweep_unit
else:
xlabel = self.raw_data_dict['sweep_parameter_names']
xunit = self.raw_data_dict['sweep_parameter_units']
if np.ndim(xlabel) > 0:
xlabel = xlabel[0]
if np.ndim(xunit) > 0:
xunit = xunit[0]
return xlabel, xunit
@staticmethod
def get_cal_state_color(cal_state_label):
if cal_state_label == 'g' or cal_state_label == r'$|g\rangle$':
return 'k'
elif cal_state_label == 'e' or cal_state_label == r'$|e\rangle$':
return 'gray'
elif cal_state_label == 'f' or cal_state_label == r'$|f\rangle$':
return 'C8'
else:
return 'C4'
@staticmethod
def get_latex_prob_label(prob_label):
if '$' in prob_label:
return prob_label
elif 'p' in prob_label.lower():
return r'$|{}\rangle$'.format(prob_label[-1])
else:
return r'$|{}\rangle$'.format(prob_label)
def prepare_plots(self):
if self.get_param_value('plot_proj_data', default_value=True):
select_split = self.get_param_value('select_split')
fig_name_suffix = self.get_param_value('fig_name_suffix', '')
title_suffix = self.get_param_value('title_suffix', '')
for qb_name, corr_data in self.proc_data_dict[
'projected_data_dict'].items():
fig_name = f'projected_plot_{qb_name}'
title_suf = title_suffix
if select_split is not None:
param, idx = select_split[qb_name]
# remove qb_name from param
p = '_'.join([e for e in param.split('_') if e != qb_name])
# create suffix
suf = f'({p}, {str(np.round(idx, 3))})'
# add suffix
fig_name += f'_{suf}'
title_suf = f'{suf}_{title_suf}' if \
len(title_suf) else suf
if isinstance(corr_data, dict):
for data_key, data in corr_data.items():
if not self.rotate:
data_label = data_key
plot_name_suffix = data_key
plot_cal_points = False
data_axis_label = 'Population'
else:
fn = f'{fig_name}_{data_key}'
data_label = 'Data'
plot_name_suffix = ''
tf = f'{data_key}_{title_suf}' if \
len(title_suf) else data_key
plot_cal_points = (
not self.options_dict.get('TwoD', False))
data_axis_label = \
'Strongest principal component (arb.)' if \
'pca' in self.rotation_type.lower() else \
'{} state population'.format(
self.get_latex_prob_label(data_key))
self.prepare_projected_data_plot(
fn, data, qb_name=qb_name,
data_label=data_label,
title_suffix=tf,
plot_name_suffix=plot_name_suffix,
fig_name_suffix=fig_name_suffix,
data_axis_label=data_axis_label,
plot_cal_points=plot_cal_points)
else:
fig_name = 'projected_plot_' + qb_name
self.prepare_projected_data_plot(
fig_name, corr_data, qb_name=qb_name,
plot_cal_points=(
not self.options_dict.get('TwoD', False)))
if self.get_param_value('plot_raw_data', default_value=True):
self.prepare_raw_data_plots(plot_filtered=False)
if 'preparation_params' in self.metadata:
if 'active' in self.metadata['preparation_params'].get(
'preparation_type', 'wait'):
self.prepare_raw_data_plots(plot_filtered=True)
def prepare_raw_data_plots(self, plot_filtered=False):
if plot_filtered or not self.data_with_reset:
key = 'meas_results_per_qb'
suffix = 'filtered' if self.data_with_reset else ''
func_for_swpts = lambda qb_name: self.proc_data_dict[
'sweep_points_dict'][qb_name]['sweep_points']
else:
key = 'meas_results_per_qb_raw'
suffix = ''
func_for_swpts = lambda qb_name: self.raw_data_dict[
'hard_sweep_points']
for qb_name, raw_data_dict in self.proc_data_dict[key].items():
if qb_name not in self.qb_names:
continue
sweep_points = func_for_swpts(qb_name)
if len(raw_data_dict) == 1:
numplotsx = 1
numplotsy = 1
elif len(raw_data_dict) == 2:
numplotsx = 1
numplotsy = 2
else:
numplotsx = 2
numplotsy = len(raw_data_dict) // 2 + len(raw_data_dict) % 2
plotsize = self.get_default_plot_params(set=False)['figure.figsize']
fig_title = (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'] +
'\nRaw data ' + suffix + ' ' + qb_name)
plot_name = 'raw_plot_' + qb_name + suffix
xlabel, xunit = self.get_xaxis_label_unit(qb_name)
for ax_id, ro_channel in enumerate(raw_data_dict):
if self.get_param_value('TwoD', default_value=False):
if self.sp is None:
soft_sweep_params = self.get_param_value(
'soft_sweep_params')
if soft_sweep_params is not None:
yunit = list(soft_sweep_params.values())[0]['unit']
else:
yunit = self.raw_data_dict[
'sweep_parameter_units'][1]
if np.ndim(yunit) > 0:
yunit = yunit[0]
for pn, ssp in self.proc_data_dict['sweep_points_2D_dict'][
qb_name].items():
ylabel = pn
if self.sp is not None:
yunit = self.sp.get_sweep_params_property(
'unit', dimension=1, param_names=pn)
ylabel = self.sp.get_sweep_params_property(
'label', dimension=1, param_names=pn)
self.plot_dicts[f'{plot_name}_{ro_channel}_{pn}'] = {
'fig_id': plot_name + '_' + pn,
'ax_id': ax_id,
'plotfn': self.plot_colorxy,
'xvals': sweep_points,
'yvals': ssp,
'zvals': raw_data_dict[ro_channel].T,
'xlabel': xlabel,
'xunit': xunit,
'ylabel': ylabel,
'yunit': yunit,
'numplotsx': numplotsx,
'numplotsy': numplotsy,
'plotsize': (plotsize[0]*numplotsx,
plotsize[1]*numplotsy),
'title': fig_title,
'clabel': '{} (Vpeak)'.format(ro_channel)}
else:
self.plot_dicts[plot_name + '_' + ro_channel] = {
'fig_id': plot_name,
'ax_id': ax_id,
'plotfn': self.plot_line,
'xvals': sweep_points,
'xlabel': xlabel,
'xunit': xunit,
'yvals': raw_data_dict[ro_channel],
'ylabel': '{} (Vpeak)'.format(ro_channel),
'yunit': '',
'numplotsx': numplotsx,
'numplotsy': numplotsy,
'plotsize': (plotsize[0]*numplotsx,
plotsize[1]*numplotsy),
'title': fig_title}
if len(raw_data_dict) == 1:
self.plot_dicts[
plot_name + '_' + list(raw_data_dict)[0]]['ax_id'] = None
def prepare_projected_data_plot(
self, fig_name, data, qb_name, title_suffix='', sweep_points=None,
plot_cal_points=True, plot_name_suffix='', fig_name_suffix='',
data_label='Data', data_axis_label='', do_legend_data=True,
do_legend_cal_states=True):
if len(fig_name_suffix):
fig_name = f'{fig_name}_{fig_name_suffix}'
if data_axis_label == '':
data_axis_label = 'Strongest principal component (arb.)' if \
'pca' in self.rotation_type.lower() else \
'{} state population'.format(self.get_latex_prob_label(
self.data_to_fit[qb_name]))
plotsize = self.get_default_plot_params(set=False)['figure.figsize']
plotsize = (plotsize[0], plotsize[0]/1.25)
if sweep_points is None:
sweep_points = self.proc_data_dict['sweep_points_dict'][qb_name][
'sweep_points']
plot_names_cal = []
if plot_cal_points and self.num_cal_points != 0:
yvals = data[:-self.num_cal_points]
xvals = sweep_points[:-self.num_cal_points]
# plot cal points
for i, cal_pts_idxs in enumerate(
self.cal_states_dict.values()):
plot_dict_name_cal = fig_name + '_' + \
list(self.cal_states_dict)[i] + '_' + \
plot_name_suffix
plot_names_cal += [plot_dict_name_cal]
self.plot_dicts[plot_dict_name_cal] = {
'fig_id': fig_name,
'plotfn': self.plot_line,
'plotsize': plotsize,
'xvals': self.proc_data_dict['sweep_points_dict'][qb_name][
'cal_points_sweep_points'][cal_pts_idxs],
'yvals': data[cal_pts_idxs],
'setlabel': list(self.cal_states_dict)[i],
'do_legend': do_legend_cal_states,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'linestyle': 'none',
'line_kws': {'color': self.get_cal_state_color(
list(self.cal_states_dict)[i])}}
self.plot_dicts[plot_dict_name_cal+'_line'] = {
'fig_id': fig_name,
'plotsize': plotsize,
'plotfn': self.plot_hlines,
'y': np.mean(data[cal_pts_idxs]),
'xmin': self.proc_data_dict['sweep_points_dict'][qb_name][
'sweep_points'][0],
'xmax': self.proc_data_dict['sweep_points_dict'][qb_name][
'sweep_points'][-1],
'colors': 'gray'}
else:
yvals = data
xvals = sweep_points
title = (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring'])
title += '\n' + f'{qb_name}_{title_suffix}' if len(title_suffix) else \
' ' + qb_name
plot_dict_name = f'{fig_name}_{plot_name_suffix}'
xlabel, xunit = self.get_xaxis_label_unit(qb_name)
if self.get_param_value('TwoD', default_value=False):
if self.sp is None:
soft_sweep_params = self.get_param_value(
'soft_sweep_params')
if soft_sweep_params is not None:
yunit = list(soft_sweep_params.values())[0]['unit']
else:
yunit = self.raw_data_dict['sweep_parameter_units'][1]
if np.ndim(yunit) > 0:
yunit = yunit[0]
for pn, ssp in self.proc_data_dict['sweep_points_2D_dict'][
qb_name].items():
ylabel = pn
if self.sp is not None:
yunit = self.sp.get_sweep_params_property(
'unit', dimension=1, param_names=pn)
ylabel = self.sp.get_sweep_params_property(
'label', dimension=1, param_names=pn)
self.plot_dicts[f'{plot_dict_name}_{pn}'] = {
'plotfn': self.plot_colorxy,
'fig_id': fig_name + '_' + pn,
'xvals': xvals,
'yvals': ssp,
'zvals': yvals,
'xlabel': xlabel,
'xunit': xunit,
'ylabel': ylabel,
'yunit': yunit,
'zrange': self.get_param_value('zrange', None),
'title': title,
'clabel': data_axis_label}
else:
self.plot_dicts[plot_dict_name] = {
'plotfn': self.plot_line,
'fig_id': fig_name,
'plotsize': plotsize,
'xvals': xvals,
'xlabel': xlabel,
'xunit': xunit,
'yvals': yvals,
'ylabel': data_axis_label,
'yunit': '',
'setlabel': data_label,
'title': title,
'linestyle': 'none',
'do_legend': do_legend_data,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
# add plot_params to each plot dict
plot_params = self.get_param_value('plot_params', default_value={})
for plt_name in self.plot_dicts:
self.plot_dicts[plt_name].update(plot_params)
if len(plot_names_cal) > 0:
if do_legend_data and not do_legend_cal_states:
for plot_name in plot_names_cal:
plot_dict_cal = self.plot_dicts.pop(plot_name)
self.plot_dicts[plot_name] = plot_dict_cal
class Idling_Error_Rate_Analyisis(ba.BaseDataAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
post_sel_th = self.options_dict.get('post_sel_th', 0.5)
raw_shots = self.raw_data_dict['measured_values'][0][0]
post_sel_shots = raw_shots[::2]
data_shots = raw_shots[1::2]
data_shots[np.where(post_sel_shots > post_sel_th)] = np.nan
states = ['0', '1', '+']
self.proc_data_dict['xvals'] = np.unique(self.raw_data_dict['xvals'])
for i, state in enumerate(states):
self.proc_data_dict['shots_{}'.format(state)] =data_shots[i::3]
self.proc_data_dict['yvals_{}'.format(state)] = \
np.nanmean(np.reshape(self.proc_data_dict['shots_{}'.format(state)],
(len(self.proc_data_dict['xvals']), -1),
order='F'), axis=1)
def prepare_plots(self):
# assumes that value names are unique in an experiment
states = ['0', '1', '+']
for i, state in enumerate(states):
yvals = self.proc_data_dict['yvals_{}'.format(state)]
xvals = self.proc_data_dict['xvals']
self.plot_dicts['Prepare in {}'.format(state)] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': xvals,
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': yvals,
'ylabel': 'Counts',
'yrange': [0, 1],
'xrange': self.options_dict.get('xrange', None),
'yunit': 'frac',
'setlabel': 'Prepare in {}'.format(state),
'do_legend':True,
'title': (self.raw_data_dict['timestamps'][0]+' - ' +
self.raw_data_dict['timestamps'][-1] + '\n' +
self.raw_data_dict['measurementstring'][0]),
'legend_pos': 'upper right'}
if self.do_fitting:
for state in ['0', '1', '+']:
self.plot_dicts['fit_{}'.format(state)] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['fit {}'.format(state)]['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'fit |{}>'.format(state),
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['fit_text']={
'ax_id':'main',
'box_props': 'fancy',
'xpos':1.05,
'horizontalalignment':'left',
'plotfn': self.plot_text,
'text_string': self.proc_data_dict['fit_msg']}
def analyze_fit_results(self):
fit_msg =''
states = ['0', '1', '+']
for state in states:
fr = self.fit_res['fit {}'.format(state)]
N1 = fr.params['N1'].value, fr.params['N1'].stderr
N2 = fr.params['N2'].value, fr.params['N2'].stderr
fit_msg += ('Prep |{}> : \n\tN_1 = {:.2g} $\pm$ {:.2g}'
'\n\tN_2 = {:.2g} $\pm$ {:.2g}\n').format(
state, N1[0], N1[1], N2[0], N2[1])
self.proc_data_dict['fit_msg'] = fit_msg
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
states = ['0', '1', '+']
for i, state in enumerate(states):
yvals = self.proc_data_dict['yvals_{}'.format(state)]
xvals = self.proc_data_dict['xvals']
mod = lmfit.Model(fit_mods.idle_error_rate_exp_decay)
mod.guess = fit_mods.idle_err_rate_guess.__get__(mod, mod.__class__)
# Done here explicitly so that I can overwrite a specific guess
guess_pars = mod.guess(N=xvals, data=yvals)
vary_N2 = self.options_dict.get('vary_N2', True)
if not vary_N2:
guess_pars['N2'].value = 1e21
guess_pars['N2'].vary = False
self.fit_dicts['fit {}'.format(states[i])] = {
'model': mod,
'fit_xvals': {'N': xvals},
'fit_yvals': {'data': yvals},
'guess_pars': guess_pars}
# Allows fixing the double exponential coefficient
class Grovers_TwoQubitAllStates_Analysis(ba.BaseDataAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
normalize_to_cal_points = self.options_dict.get('normalize_to_cal_points', True)
cal_points = [
[[-4, -3], [-2, -1]],
[[-4, -2], [-3, -1]],
]
for idx in [0,1]:
yvals = list(self.raw_data_dict['measured_data'].values())[idx][0]
self.proc_data_dict['ylabel_{}'.format(idx)] = \
self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
if normalize_to_cal_points:
yvals = a_tools.rotate_and_normalize_data_1ch(yvals,
cal_zero_points=cal_points[idx][0],
cal_one_points=cal_points[idx][1])
self.proc_data_dict['yvals_{}'.format(idx)] = yvals
y0 = self.proc_data_dict['yvals_0']
y1 = self.proc_data_dict['yvals_1']
p_success = ((y0[0]*y1[0]) +
(1-y0[1])*y1[1] +
(y0[2])*(1-y1[2]) +
(1-y0[3])*(1-y1[3]) )/4
self.proc_data_dict['p_success'] = p_success
def prepare_plots(self):
# assumes that value names are unique in an experiment
for i in [0, 1]:
yvals = self.proc_data_dict['yvals_{}'.format(i)]
xvals = self.raw_data_dict['xvals'][0]
ylabel = self.proc_data_dict['ylabel_{}'.format(i)]
self.plot_dicts['main_{}'.format(ylabel)] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['xvals'][0],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_{}'.format(i)],
'ylabel': ylabel,
'yunit': self.proc_data_dict['yunit'],
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': False,
'legend_pos': 'upper right'}
self.plot_dicts['limit_text']={
'ax_id':'main_{}'.format(ylabel),
'box_props': 'fancy',
'xpos':1.05,
'horizontalalignment':'left',
'plotfn': self.plot_text,
'text_string': 'P succes = {:.3f}'.format(self.proc_data_dict['p_success'])}
class FlippingAnalysis(Single_Qubit_TimeDomainAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = True
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'measurementstring': 'measurementstring',
'sweep_points': 'sweep_points',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
# This analysis makes a hardcoded assumption on the calibration points
self.options_dict['cal_points'] = [list(range(-4, -2)),
list(range(-2, 0))]
self.numeric_params = []
if auto:
self.run_analysis()
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
# Even though we expect an exponentially damped oscillation we use
# a simple cosine as this gives more reliable fitting and we are only
# interested in extracting the frequency of the oscillation
cos_mod = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.raw_data_dict['sweep_points'][:-4],
data=self.proc_data_dict['corr_data'][:-4])
# This enforces the oscillation to start at the equator
# and ensures that any over/under rotation is absorbed in the
# frequency
guess_pars['amplitude'].value = 0.5
guess_pars['amplitude'].vary = False
guess_pars['offset'].value = 0.5
guess_pars['offset'].vary = False
self.fit_dicts['cos_fit'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.raw_data_dict['sweep_points'][:-4]},
'fit_yvals': {'data': self.proc_data_dict['corr_data'][:-4]},
'guess_pars': guess_pars}
# In the case there are very few periods we fall back on a small
# angle approximation to extract the drive detuning
poly_mod = lmfit.models.PolynomialModel(degree=1)
# the detuning can be estimated using on a small angle approximation
# c1 = d/dN (cos(2*pi*f N) ) evaluated at N = 0 -> c1 = -2*pi*f
poly_mod.set_param_hint('frequency', expr='-c1/(2*pi)')
guess_pars = poly_mod.guess(x=self.raw_data_dict['sweep_points'][:-4],
data=self.proc_data_dict['corr_data'][:-4])
# Constraining the line ensures that it will only give a good fit
# if the small angle approximation holds
guess_pars['c0'].vary = False
guess_pars['c0'].value = 0.5
self.fit_dicts['line_fit'] = {
'model': poly_mod,
'fit_xvals': {'x': self.raw_data_dict['sweep_points'][:-4]},
'fit_yvals': {'data': self.proc_data_dict['corr_data'][:-4]},
'guess_pars': guess_pars}
def analyze_fit_results(self):
sf_line = self._get_scale_factor_line()
sf_cos = self._get_scale_factor_cos()
self.proc_data_dict['scale_factor'] = self.get_scale_factor()
msg = 'Scale fact. based on '
if self.proc_data_dict['scale_factor'] == sf_cos:
msg += 'cos fit\n'
else:
msg += 'line fit\n'
msg += 'cos fit: {:.4f}\n'.format(sf_cos)
msg += 'line fit: {:.4f}'.format(sf_line)
self.raw_data_dict['scale_factor_msg'] = msg
# TODO: save scale factor to file
def get_scale_factor(self):
"""
Returns the scale factor that should correct for the error in the
pulse amplitude.
"""
# Model selection based on the Bayesian Information Criterion (BIC)
# as calculated by lmfit
if (self.fit_dicts['line_fit']['fit_res'].bic <
self.fit_dicts['cos_fit']['fit_res'].bic):
scale_factor = self._get_scale_factor_line()
else:
scale_factor = self._get_scale_factor_cos()
return scale_factor
def _get_scale_factor_cos(self):
# 1/period of the oscillation corresponds to the (fractional)
# over/under rotation error per gate
frequency = self.fit_dicts['cos_fit']['fit_res'].params['frequency']
# the square is needed to account for the difference between
# power and amplitude
scale_factor = (1+frequency)**2
phase = np.rad2deg(self.fit_dicts['cos_fit']['fit_res'].params['phase']) % 360
# phase ~90 indicates an under rotation so the scale factor
# has to be larger than 1. A phase ~270 indicates an over
# rotation so then the scale factor has to be smaller than one.
if phase > 180:
scale_factor = 1/scale_factor
return scale_factor
def _get_scale_factor_line(self):
# 1/period of the oscillation corresponds to the (fractional)
# over/under rotation error per gate
frequency = self.fit_dicts['line_fit']['fit_res'].params['frequency']
scale_factor = (1+frequency)**2
# no phase sign check is needed here as this is contained in the
# sign of the coefficient
return scale_factor
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['sweep_points'],
'xlabel': self.raw_data_dict['xlabel'],
'xunit': self.raw_data_dict['xunit'], # does not do anything yet
'yvals': self.proc_data_dict['corr_data'],
'ylabel': 'Excited state population',
'yunit': '',
'setlabel': 'data',
'title': (self.raw_data_dict['timestamp'] + ' ' +
self.raw_data_dict['measurementstring']),
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'line fit',
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['cos_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_pos': 'upper right'}
self.plot_dicts['text_msg'] = {
'ax_id': 'main',
'ypos': 0.15,
'plotfn': self.plot_text,
'box_props': 'fancy',
'text_string': self.raw_data_dict['scale_factor_msg']}
class Intersect_Analysis(Single_Qubit_TimeDomainAnalysis):
"""
Analysis to extract the intercept of two parameters.
relevant options_dict parameters
ch_idx_A (int) specifies first channel for intercept
ch_idx_B (int) specifies second channel for intercept if same as first
it will assume data was taken interleaved.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xvals': 'sweep_points',
'xunit': 'sweep_unit',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx_A" and "ch_idx_B"
specified in the options dict. If ch_idx_A and ch_idx_B are the same
it will unzip the data.
"""
self.proc_data_dict = deepcopy(self.raw_data_dict)
# The channel containing the data must be specified in the options dict
ch_idx_A = self.options_dict.get('ch_idx_A', 0)
ch_idx_B = self.options_dict.get('ch_idx_B', 0)
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][ch_idx_A]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][ch_idx_A]
if ch_idx_A == ch_idx_B:
yvals = list(self.raw_data_dict['measured_data'].values())[ch_idx_A][0]
self.proc_data_dict['xvals_A'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_B'] = self.raw_data_dict['xvals'][0][1::2]
self.proc_data_dict['yvals_A'] = yvals[::2]
self.proc_data_dict['yvals_B'] = yvals[1::2]
else:
self.proc_data_dict['xvals_A'] = self.raw_data_dict['xvals'][0]
self.proc_data_dict['xvals_B'] = self.raw_data_dict['xvals'][0]
self.proc_data_dict['yvals_A'] = list(self.raw_data_dict
['measured_data'].values())[ch_idx_A][0]
self.proc_data_dict['yvals_B'] = list(self.raw_data_dict
['measured_data'].values())[ch_idx_B][0]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
self.fit_dicts['line_fit_A'] = {
'model': lmfit.models.PolynomialModel(degree=2),
'fit_xvals': {'x': self.proc_data_dict['xvals_A']},
'fit_yvals': {'data': self.proc_data_dict['yvals_A']}}
self.fit_dicts['line_fit_B'] = {
'model': lmfit.models.PolynomialModel(degree=2),
'fit_xvals': {'x': self.proc_data_dict['xvals_B']},
'fit_yvals': {'data': self.proc_data_dict['yvals_B']}}
def analyze_fit_results(self):
fr_0 = self.fit_res['line_fit_A'].best_values
fr_1 = self.fit_res['line_fit_B'].best_values
c0 = (fr_0['c0'] - fr_1['c0'])
c1 = (fr_0['c1'] - fr_1['c1'])
c2 = (fr_0['c2'] - fr_1['c2'])
poly_coeff = [c0, c1, c2]
poly = np.polynomial.polynomial.Polynomial([fr_0['c0'],
fr_0['c1'], fr_0['c2']])
ic = np.polynomial.polynomial.polyroots(poly_coeff)
self.proc_data_dict['intersect_L'] = ic[0], poly(ic[0])
self.proc_data_dict['intersect_R'] = ic[1], poly(ic[1])
if (((np.min(self.proc_data_dict['xvals']))< ic[0]) and
( ic[0] < (np.max(self.proc_data_dict['xvals'])))):
self.proc_data_dict['intersect'] =self.proc_data_dict['intersect_L']
else:
self.proc_data_dict['intersect'] =self.proc_data_dict['intersect_R']
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_A'],
'xlabel': self.proc_data_dict['xlabel'][0],
'xunit': self.proc_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_A'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'A',
'title': (self.proc_data_dict['timestamps'][0] + ' \n' +
self.proc_data_dict['measurementstring'][0]),
'do_legend': True,
'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_B'],
'xlabel': self.proc_data_dict['xlabel'][0],
'xunit': self.proc_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_B'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'B',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit_A'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_A']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit A',
'do_legend': True}
self.plot_dicts['line_fit_B'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_B']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit B',
'do_legend': True}
ic, ic_unit = SI_val_to_msg_str(
self.proc_data_dict['intersect'][0],
self.proc_data_dict['xunit'][0][0], return_type=float)
self.plot_dicts['intercept_message'] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': [self.proc_data_dict['intersect'][0]],
'yvals': [self.proc_data_dict['intersect'][1]],
'line_kws': {'alpha': .5, 'color':'gray',
'markersize':15},
'marker': 'o',
'setlabel': 'Intercept: {:.1f} {}'.format(ic, ic_unit),
'do_legend': True}
def get_intersect(self):
return self.proc_data_dict['intersect']
class CZ_1QPhaseCal_Analysis(ba.BaseDataAnalysis):
"""
Analysis to extract the intercept for a single qubit phase calibration
experiment
N.B. this is a less generic version of "Intersect_Analysis" and should
be deprecated (MAR Dec 2017)
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx" in options dict and
then splits the data for th
"""
self.proc_data_dict = OrderedDict()
# The channel containing the data must be specified in the options dict
ch_idx = self.options_dict['ch_idx']
yvals = list(self.raw_data_dict['measured_data'].values())[ch_idx][0]
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][ch_idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][ch_idx]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
self.proc_data_dict['yvals_off'] = yvals[::2]
self.proc_data_dict['yvals_on'] = yvals[1::2]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
self.fit_dicts['line_fit_off'] = {
'model': lmfit.models.PolynomialModel(degree=1),
'fit_xvals': {'x': self.proc_data_dict['xvals_off']},
'fit_yvals': {'data': self.proc_data_dict['yvals_off']}}
self.fit_dicts['line_fit_on'] = {
'model': lmfit.models.PolynomialModel(degree=1),
'fit_xvals': {'x': self.proc_data_dict['xvals_on']},
'fit_yvals': {'data': self.proc_data_dict['yvals_on']}}
def analyze_fit_results(self):
fr_0 = self.fit_res['line_fit_off'].best_values
fr_1 = self.fit_res['line_fit_on'].best_values
ic = -(fr_0['c0'] - fr_1['c0'])/(fr_0['c1'] - fr_1['c1'])
self.proc_data_dict['zero_phase_diff_intersect'] = ic
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_off'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_on'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['line_fit_off'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_off']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ off',
'do_legend': True}
self.plot_dicts['line_fit_on'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['line_fit_on']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ on',
'do_legend': True}
ic, ic_unit = SI_val_to_msg_str(
self.proc_data_dict['zero_phase_diff_intersect'],
self.raw_data_dict['xunit'][0][0], return_type=float)
self.plot_dicts['intercept_message'] = {
'ax_id': 'main',
'plotfn': self.plot_line,
'xvals': [self.proc_data_dict['zero_phase_diff_intersect']],
'yvals': [np.mean(self.proc_data_dict['xvals_on'])],
'line_kws': {'alpha': 0},
'setlabel': 'Intercept: {:.1f} {}'.format(ic, ic_unit),
'do_legend': True}
def get_zero_phase_diff_intersect(self):
return self.proc_data_dict['zero_phase_diff_intersect']
class Oscillation_Analysis(ba.BaseDataAnalysis):
"""
Very basic analysis to determine the phase of a single oscillation
that has an assumed period of 360 degrees.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
label: str='',
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
idx = 1
self.proc_data_dict['yvals'] = list(self.raw_data_dict['measured_data'].values())[idx][0]
self.proc_data_dict['ylabel'] = self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.raw_data_dict['xvals'][0],
data=self.proc_data_dict['yvals'], freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts['cos_fit'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.raw_data_dict['xvals'][0]},
'fit_yvals': {'data': self.proc_data_dict['yvals']},
'guess_pars': guess_pars}
def analyze_fit_results(self):
fr = self.fit_res['cos_fit'].best_values
self.proc_data_dict['phi'] = np.rad2deg(fr['phase'])
def prepare_plots(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.raw_data_dict['xvals'][0],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['cos_fit'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit',
'do_legend': True}
class Conditional_Oscillation_Analysis(ba.BaseDataAnalysis):
"""
Analysis to extract quantities from a conditional oscillation.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
data_file_path: str=None,
label: str='',
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'xvals': 'sweep_points',
'measurementstring': 'measurementstring',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
selects the relevant acq channel based on "ch_idx_osc" and
"ch_idx_spec" in the options dict and then splits the data for the
off and on cases
"""
self.proc_data_dict = OrderedDict()
# The channel containing the data must be specified in the options dict
ch_idx_spec = self.options_dict.get('ch_idx_spec', 0)
ch_idx_osc = self.options_dict.get('ch_idx_osc', 1)
normalize_to_cal_points = self.options_dict.get('normalize_to_cal_points', True)
cal_points = [
[[-4, -3], [-2, -1]],
[[-4, -2], [-3, -1]],
]
i = 0
for idx, type_str in zip([ch_idx_osc, ch_idx_spec], ['osc', 'spec']):
yvals = list(self.raw_data_dict['measured_data'].values())[idx][0]
self.proc_data_dict['ylabel_{}'.format(type_str)] = self.raw_data_dict['value_names'][0][idx]
self.proc_data_dict['yunit'] = self.raw_data_dict['value_units'][0][idx]
if normalize_to_cal_points:
yvals = a_tools.rotate_and_normalize_data_1ch(yvals,
cal_zero_points=cal_points[i][0],
cal_one_points=cal_points[i][1])
i +=1
self.proc_data_dict['yvals_{}_off'.format(type_str)] = yvals[::2]
self.proc_data_dict['yvals_{}_on'.format(type_str)] = yvals[1::2]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
else:
self.proc_data_dict['yvals_{}_off'.format(type_str)] = yvals[::2]
self.proc_data_dict['yvals_{}_on'.format(type_str)] = yvals[1::2]
self.proc_data_dict['xvals_off'] = self.raw_data_dict['xvals'][0][::2]
self.proc_data_dict['xvals_on'] = self.raw_data_dict['xvals'][0][1::2]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.proc_data_dict['xvals_off'][:-2],
data=self.proc_data_dict['yvals_osc_off'][:-2],
freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts['cos_fit_off'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.proc_data_dict['xvals_off'][:-2]},
'fit_yvals': {'data': self.proc_data_dict['yvals_osc_off'][:-2]},
'guess_pars': guess_pars}
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.proc_data_dict['xvals_on'][:-2],
data=self.proc_data_dict['yvals_osc_on'][:-2],
freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts['cos_fit_on'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.proc_data_dict['xvals_on'][:-2]},
'fit_yvals': {'data': self.proc_data_dict['yvals_osc_on'][:-2]},
'guess_pars': guess_pars}
def analyze_fit_results(self):
fr_0 = self.fit_res['cos_fit_off'].params
fr_1 = self.fit_res['cos_fit_on'].params
phi0 = np.rad2deg(fr_0['phase'].value)
phi1 = np.rad2deg(fr_1['phase'].value)
phi0_stderr = np.rad2deg(fr_0['phase'].stderr)
phi1_stderr = np.rad2deg(fr_1['phase'].stderr)
self.proc_data_dict['phi_0'] = phi0, phi0_stderr
self.proc_data_dict['phi_1'] = phi1, phi1_stderr
phi_cond_stderr = (phi0_stderr**2+phi1_stderr**2)**.5
self.proc_data_dict['phi_cond'] = (phi1 -phi0), phi_cond_stderr
osc_amp = np.mean([fr_0['amplitude'], fr_1['amplitude']])
osc_amp_stderr = np.sqrt(fr_0['amplitude'].stderr**2 +
fr_1['amplitude']**2)/2
self.proc_data_dict['osc_amp_0'] = (fr_0['amplitude'].value,
fr_0['amplitude'].stderr)
self.proc_data_dict['osc_amp_1'] = (fr_1['amplitude'].value,
fr_1['amplitude'].stderr)
self.proc_data_dict['osc_offs_0'] = (fr_0['offset'].value,
fr_0['offset'].stderr)
self.proc_data_dict['osc_offs_1'] = (fr_1['offset'].value,
fr_1['offset'].stderr)
offs_stderr = (fr_0['offset'].stderr**2+fr_1['offset'].stderr**2)**.5
self.proc_data_dict['offs_diff'] = (
fr_1['offset'].value - fr_0['offset'].value, offs_stderr)
# self.proc_data_dict['osc_amp'] = (osc_amp, osc_amp_stderr)
self.proc_data_dict['missing_fraction'] = (
np.mean(self.proc_data_dict['yvals_spec_on'][:-2]) -
np.mean(self.proc_data_dict['yvals_spec_off'][:-2]))
def prepare_plots(self):
self._prepare_main_oscillation_figure()
self._prepare_spectator_qubit_figure()
def _prepare_main_oscillation_figure(self):
self.plot_dicts['main'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_osc_off'],
'ylabel': self.proc_data_dict['ylabel_osc'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['on'] = {
'plotfn': self.plot_line,
'ax_id': 'main',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_osc_on'],
'ylabel': self.proc_data_dict['ylabel_osc'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
self.plot_dicts['cos_fit_off'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_off']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ off',
'do_legend': True}
self.plot_dicts['cos_fit_on'] = {
'ax_id': 'main',
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_on']['fit_res'],
'plot_init': self.options_dict['plot_init'],
'setlabel': 'Fit CZ on',
'do_legend': True}
# offset as a guide for the eye
y = self.fit_res['cos_fit_off'].params['offset'].value
self.plot_dicts['cos_off_offset'] ={
'plotfn': self.plot_matplot_ax_method,
'ax_id':'main',
'func': 'axhline',
'plot_kws': {
'y': y, 'color': 'C0', 'linestyle': 'dotted'}
}
phase_message = (
'Phase diff.: {:.1f} $\pm$ {:.1f} deg\n'
'Phase off: {:.1f} $\pm$ {:.1f}deg\n'
'Phase on: {:.1f} $\pm$ {:.1f}deg\n'
'Osc. amp. off: {:.4f} $\pm$ {:.4f}\n'
'Osc. amp. on: {:.4f} $\pm$ {:.4f}\n'
'Offs. diff.: {:.4f} $\pm$ {:.4f}\n'
'Osc. offs. off: {:.4f} $\pm$ {:.4f}\n'
'Osc. offs. on: {:.4f} $\pm$ {:.4f}'.format(
self.proc_data_dict['phi_cond'][0],
self.proc_data_dict['phi_cond'][1],
self.proc_data_dict['phi_0'][0],
self.proc_data_dict['phi_0'][1],
self.proc_data_dict['phi_1'][0],
self.proc_data_dict['phi_1'][1],
self.proc_data_dict['osc_amp_0'][0],
self.proc_data_dict['osc_amp_0'][1],
self.proc_data_dict['osc_amp_1'][0],
self.proc_data_dict['osc_amp_1'][1],
self.proc_data_dict['offs_diff'][0],
self.proc_data_dict['offs_diff'][1],
self.proc_data_dict['osc_offs_0'][0],
self.proc_data_dict['osc_offs_0'][1],
self.proc_data_dict['osc_offs_1'][0],
self.proc_data_dict['osc_offs_1'][1]))
self.plot_dicts['phase_message'] = {
'ax_id': 'main',
'ypos': 0.9,
'xpos': 1.45,
'plotfn': self.plot_text,
'box_props': 'fancy',
'line_kws': {'alpha': 0},
'text_string': phase_message}
def _prepare_spectator_qubit_figure(self):
self.plot_dicts['spectator_qubit'] = {
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['xvals_off'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_spec_off'],
'ylabel': self.proc_data_dict['ylabel_spec'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ off',
'title': (self.raw_data_dict['timestamps'][0] + ' \n' +
self.raw_data_dict['measurementstring'][0]),
'do_legend': True,
# 'yrange': (0,1),
'legend_pos': 'upper right'}
self.plot_dicts['spec_on'] = {
'plotfn': self.plot_line,
'ax_id': 'spectator_qubit',
'xvals': self.proc_data_dict['xvals_on'],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': self.proc_data_dict['yvals_spec_on'],
'ylabel': self.proc_data_dict['ylabel_spec'],
'yunit': self.proc_data_dict['yunit'],
'setlabel': 'CZ on',
'do_legend': True,
'legend_pos': 'upper right'}
if self.do_fitting:
leak_msg = (
'Missing fraction: {:.2f} % '.format(
self.proc_data_dict['missing_fraction']*100))
self.plot_dicts['leak_msg'] = {
'ax_id': 'spectator_qubit',
'ypos': 0.7,
'plotfn': self.plot_text,
'box_props': 'fancy',
'line_kws': {'alpha': 0},
'text_string': leak_msg}
# offset as a guide for the eye
y = self.fit_res['cos_fit_on'].params['offset'].value
self.plot_dicts['cos_on_offset'] ={
'plotfn': self.plot_matplot_ax_method,
'ax_id':'main',
'func': 'axhline',
'plot_kws': {
'y': y, 'color': 'C1', 'linestyle': 'dotted'}
}
class StateTomographyAnalysis(ba.BaseDataAnalysis):
"""
Analyses the results of the state tomography experiment and calculates
the corresponding quantum state.
Possible options that can be passed in the options_dict parameter:
cal_points: A data structure specifying the indices of the calibration
points. See the AveragedTimedomainAnalysis for format.
The calibration points need to be in the same order as the
used basis for the result.
data_type: 'averaged' or 'singleshot'. For singleshot data each
measurement outcome is saved and arbitrary order correlations
between the states can be calculated.
meas_operators: (optional) A list of qutip operators or numpy 2d arrays.
This overrides the measurement operators otherwise
found from the calibration points.
covar_matrix: (optional) The covariance matrix of the measurement
operators as a 2d numpy array. Overrides the one found
from the calibration points.
use_covariance_matrix (bool): Flag to define whether to use the
covariance matrix
basis_rots_str: A list of standard PycQED pulse names that were
applied to qubits before measurement
basis_rots: As an alternative to single_qubit_pulses, the basis
rotations applied to the system as qutip operators or numpy
matrices can be given.
mle: True/False, whether to do maximum likelihood fit. If False, only
least squares fit will be done, which could give negative
eigenvalues for the density matrix.
imle: True/False, whether to do iterative maximum likelihood fit. If
True, it takes preference over maximum likelihood method. Otherwise
least squares fit will be done, then 'mle' option will be checked.
pauli_raw: True/False, extracts Pauli expected values from a measurement
without assignment correction based on calibration data. If True,
takes preference over other methods except pauli_corr.
pauli_values: True/False, extracts Pauli expected values from a
measurement with assignment correction based on calibration data.
If True, takes preference over other methods.
iterations (optional): maximum number of iterations allowed in imle.
Tomographies with more qubits require more iterations to converge.
tolerance (optional): minimum change across iterations allowed in imle.
The iteration will stop if it goes under this value. Tomographies
with more qubits require smaller tolerance to converge.
rho_target (optional): A qutip density matrix that the result will be
compared to when calculating fidelity.
"""
def __init__(self, *args, **kwargs):
auto = kwargs.pop('auto', True)
super().__init__(*args, **kwargs)
kwargs['auto'] = auto
self.single_timestamp = True
self.params_dict = {'exp_metadata': 'exp_metadata'}
self.numeric_params = []
self.data_type = self.options_dict['data_type']
if self.data_type == 'averaged':
self.base_analysis = AveragedTimedomainAnalysis(*args, **kwargs)
elif self.data_type == 'singleshot':
self.base_analysis = roa.MultiQubit_SingleShot_Analysis(
*args, **kwargs)
else:
raise KeyError("Invalid tomography data mode: '" + self.data_type +
"'. Valid modes are 'averaged' and 'singleshot'.")
if kwargs.get('auto', True):
self.run_analysis()
def process_data(self):
tomography_qubits = self.options_dict.get('tomography_qubits', None)
data, Fs, Omega = self.base_analysis.measurement_operators_and_results(
tomography_qubits)
if 'data_filter' in self.options_dict:
data = self.options_dict['data_filter'](data.T).T
data = data.T
for i, v in enumerate(data):
data[i] = v / v.sum()
data = data.T
Fs = self.options_dict.get('meas_operators', Fs)
Fs = [qtp.Qobj(F) for F in Fs]
d = Fs[0].shape[0]
self.proc_data_dict['d'] = d
Omega = self.options_dict.get('covar_matrix', Omega)
if Omega is None:
Omega = np.diag(np.ones(len(Fs)))
elif len(Omega.shape) == 1:
Omega = np.diag(Omega)
metadata = self.raw_data_dict.get('exp_metadata',
self.options_dict.get(
'exp_metadata', {}))
if metadata is None:
metadata = {}
self.raw_data_dict['exp_metadata'] = metadata
basis_rots_str = metadata.get('basis_rots_str', None)
basis_rots_str = self.options_dict.get('basis_rots_str', basis_rots_str)
if basis_rots_str is not None:
nr_qubits = int(np.round(np.log2(d)))
pulse_list = list(itertools.product(basis_rots_str,
repeat=nr_qubits))
rotations = tomo.standard_qubit_pulses_to_rotations(pulse_list)
else:
rotations = metadata.get('basis_rots', None)
rotations = self.options_dict.get('basis_rots', rotations)
if rotations is None:
raise KeyError("Either 'basis_rots_str' or 'basis_rots' "
"parameter must be passed in the options "
"dictionary or in the experimental metadata.")
rotations = [qtp.Qobj(U) for U in rotations]
all_Fs = tomo.rotated_measurement_operators(rotations, Fs)
all_Fs = list(itertools.chain(*np.array(all_Fs, dtype=np.object).T))
all_mus = np.array(list(itertools.chain(*data.T)))
all_Omegas = sp.linalg.block_diag(*[Omega] * len(data[0]))
self.proc_data_dict['meas_operators'] = all_Fs
self.proc_data_dict['covar_matrix'] = all_Omegas
self.proc_data_dict['meas_results'] = all_mus
if self.options_dict.get('pauli_values', False):
rho_pauli = tomo.pauli_values_tomography(all_mus,Fs,basis_rots_str)
self.proc_data_dict['rho_raw'] = rho_pauli
self.proc_data_dict['rho'] = rho_pauli
elif self.options_dict.get('pauli_raw', False):
pauli_raw = self.generate_raw_pauli_set()
rho_raw = tomo.pauli_set_to_density_matrix(pauli_raw)
self.proc_data_dict['rho_raw'] = rho_raw
self.proc_data_dict['rho'] = rho_raw
elif self.options_dict.get('imle', False):
it = metadata.get('iterations', None)
it = self.options_dict.get('iterations', it)
tol = metadata.get('tolerance', None)
tol = self.options_dict.get('tolerance', tol)
rho_imle = tomo.imle_tomography(
all_mus, all_Fs, it, tol)
self.proc_data_dict['rho_imle'] = rho_imle
self.proc_data_dict['rho'] = rho_imle
else:
rho_ls = tomo.least_squares_tomography(
all_mus, all_Fs,
all_Omegas if self.get_param_value('use_covariance_matrix', False)
else None )
self.proc_data_dict['rho_ls'] = rho_ls
self.proc_data_dict['rho'] = rho_ls
if self.options_dict.get('mle', False):
rho_mle = tomo.mle_tomography(
all_mus, all_Fs,
all_Omegas if self.get_param_value('use_covariance_matrix', False) else None,
rho_guess=rho_ls)
self.proc_data_dict['rho_mle'] = rho_mle
self.proc_data_dict['rho'] = rho_mle
rho = self.proc_data_dict['rho']
self.proc_data_dict['purity'] = (rho * rho).tr().real
rho_target = metadata.get('rho_target', None)
rho_target = self.options_dict.get('rho_target', rho_target)
if rho_target is not None:
self.proc_data_dict['fidelity'] = tomo.fidelity(rho, rho_target)
if d == 4:
self.proc_data_dict['concurrence'] = tomo.concurrence(rho)
else:
self.proc_data_dict['concurrence'] = 0
def prepare_plots(self):
self.prepare_density_matrix_plot()
d = self.proc_data_dict['d']
if 2 ** (d.bit_length() - 1) == d:
# dimension is power of two, plot expectation values of pauli
# operators
self.prepare_pauli_basis_plot()
def prepare_density_matrix_plot(self):
self.tight_fig = self.options_dict.get('tight_fig', False)
rho_target = self.raw_data_dict['exp_metadata'].get('rho_target', None)
rho_target = self.options_dict.get('rho_target', rho_target)
d = self.proc_data_dict['d']
xtick_labels = self.options_dict.get('rho_ticklabels', None)
ytick_labels = self.options_dict.get('rho_ticklabels', None)
if 2 ** (d.bit_length() - 1) == d:
nr_qubits = d.bit_length() - 1
fmt_string = '{{:0{}b}}'.format(nr_qubits)
labels = [fmt_string.format(i) for i in range(2 ** nr_qubits)]
if xtick_labels is None:
xtick_labels = ['$|' + lbl + r'\rangle$' for lbl in labels]
if ytick_labels is None:
ytick_labels = [r'$\langle' + lbl + '|$' for lbl in labels]
color = (0.5 * np.angle(self.proc_data_dict['rho'].full()) / np.pi) % 1.
cmap = self.options_dict.get('rho_colormap', self.default_phase_cmap())
if self.options_dict.get('pauli_raw', False):
title = 'Density matrix reconstructed from the Pauli (raw) set\n'
elif self.options_dict.get('pauli_values', False):
title = 'Density matrix reconstructed from the Pauli set\n'
elif self.options_dict.get('mle', False):
title = 'Maximum likelihood fit of the density matrix\n'
elif self.options_dict.get('it_mle', False):
title = 'Iterative maximum likelihood fit of the density matrix\n'
else:
title = 'Least squares fit of the density matrix\n'
empty_artist = mpl.patches.Rectangle((0, 0), 0, 0, visible=False)
legend_entries = [(empty_artist,
r'Purity, $Tr(\rho^2) = {:.1f}\%$'.format(
100 * self.proc_data_dict['purity']))]
if rho_target is not None:
legend_entries += [
(empty_artist, r'Fidelity, $F = {:.1f}\%$'.format(
100 * self.proc_data_dict['fidelity']))]
if d == 4:
legend_entries += [
(empty_artist, r'Concurrence, $C = {:.2f}$'.format(
self.proc_data_dict['concurrence']))]
meas_string = self.base_analysis.\
raw_data_dict['measurementstring']
if isinstance(meas_string, list):
if len(meas_string) > 1:
meas_string = meas_string[0] + ' to ' + meas_string[-1]
else:
meas_string = meas_string[0]
self.plot_dicts['density_matrix'] = {
'plotfn': self.plot_bar3D,
'3d': True,
'3d_azim': -35,
'3d_elev': 35,
'xvals': np.arange(d),
'yvals': np.arange(d),
'zvals': np.abs(self.proc_data_dict['rho'].full()),
'zrange': (0, 1),
'color': color,
'colormap': cmap,
'bar_widthx': 0.5,
'bar_widthy': 0.5,
'xtick_loc': np.arange(d),
'xtick_labels': xtick_labels,
'ytick_loc': np.arange(d),
'ytick_labels': ytick_labels,
'ctick_loc': np.linspace(0, 1, 5),
'ctick_labels': ['$0$', r'$\frac{1}{2}\pi$', r'$\pi$',
r'$\frac{3}{2}\pi$', r'$2\pi$'],
'clabel': 'Phase (rad)',
'title': (title + self.raw_data_dict['timestamp'] + ' ' +
meas_string),
'do_legend': True,
'legend_entries': legend_entries,
'legend_kws': dict(loc='upper left', bbox_to_anchor=(0, 0.94))
}
if rho_target is not None:
rho_target = qtp.Qobj(rho_target)
if rho_target.type == 'ket':
rho_target = rho_target * rho_target.dag()
elif rho_target.type == 'bra':
rho_target = rho_target.dag() * rho_target
self.plot_dicts['density_matrix_target'] = {
'plotfn': self.plot_bar3D,
'3d': True,
'3d_azim': -35,
'3d_elev': 35,
'xvals': np.arange(d),
'yvals': np.arange(d),
'zvals': np.abs(rho_target.full()),
'zrange': (0, 1),
'color': (0.5 * np.angle(rho_target.full()) / np.pi) % 1.,
'colormap': cmap,
'bar_widthx': 0.5,
'bar_widthy': 0.5,
'xtick_loc': np.arange(d),
'xtick_labels': xtick_labels,
'ytick_loc': np.arange(d),
'ytick_labels': ytick_labels,
'ctick_loc': np.linspace(0, 1, 5),
'ctick_labels': ['$0$', r'$\frac{1}{2}\pi$', r'$\pi$',
r'$\frac{3}{2}\pi$', r'$2\pi$'],
'clabel': 'Phase (rad)',
'title': ('Target density matrix\n' +
self.raw_data_dict['timestamp'] + ' ' +
meas_string),
'bar_kws': dict(zorder=1),
}
def generate_raw_pauli_set(self):
nr_qubits = self.proc_data_dict['d'].bit_length() - 1
pauli_raw_values = []
for op in tomo.generate_pauli_set(nr_qubits)[1]:
nr_terms = 0
sum_terms = 0.
for meas_op, meas_res in zip(self.proc_data_dict['meas_operators'],
self.proc_data_dict['meas_results']):
trace = (meas_op*op).tr().real
clss = int(trace*2)
if clss < 0:
sum_terms -= meas_res
nr_terms += 1
elif clss > 0:
sum_terms += meas_res
nr_terms += 1
pauli_raw_values.append(2**nr_qubits*sum_terms/nr_terms)
return pauli_raw_values
def generate_corr_pauli_set(self,Fs,rotations):
nr_qubits = self.proc_data_dict['d'].bit_length() - 1
Fs_corr = []
assign_corr = []
for i,F in enumerate(Fs):
new_op = np.zeros(2**nr_qubits)
new_op[i] = 1
Fs_corr.append(qtp.Qobj(np.diag(new_op)))
assign_corr.append(np.diag(F.full()))
pauli_Fs = tomo.rotated_measurement_operators(rotations, Fs_corr)
pauli_Fs = list(itertools.chain(*np.array(pauli_Fs, dtype=np.object).T))
mus = self.proc_data_dict['meas_results']
pauli_mus = np.reshape(mus,[-1,2**nr_qubits])
for i,raw_mus in enumerate(pauli_mus):
pauli_mus[i] = np.matmul(np.linalg.inv(assign_corr),np.array(raw_mus))
pauli_mus = pauli_mus.flatten()
pauli_values = []
for op in tomo.generate_pauli_set(nr_qubits)[1]:
nr_terms = 0
sum_terms = 0.
for meas_op, meas_res in zip(pauli_Fs,pauli_mus):
trace = (meas_op*op).tr().real
clss = int(trace*2)
if clss < 0:
sum_terms -= meas_res
nr_terms += 1
elif clss > 0:
sum_terms += meas_res
nr_terms += 1
pauli_values.append(2**nr_qubits*sum_terms/nr_terms)
return pauli_values
def prepare_pauli_basis_plot(self):
yexp = tomo.density_matrix_to_pauli_basis(self.proc_data_dict['rho'])
nr_qubits = self.proc_data_dict['d'].bit_length() - 1
labels = list(itertools.product(*[['I', 'X', 'Y', 'Z']]*nr_qubits))
labels = [''.join(label_list) for label_list in labels]
if nr_qubits == 1:
order = [1, 2, 3]
elif nr_qubits == 2:
order = [1, 2, 3, 4, 8, 12, 5, 6, 7, 9, 10, 11, 13, 14, 15]
elif nr_qubits == 3:
order = [1, 2, 3, 4, 8, 12, 16, 32, 48] + \
[5, 6, 7, 9, 10, 11, 13, 14, 15] + \
[17, 18, 19, 33, 34, 35, 49, 50, 51] + \
[20, 24, 28, 36, 40, 44, 52, 56, 60] + \
[21, 22, 23, 25, 26, 27, 29, 30, 31] + \
[37, 38, 39, 41, 42, 43, 45, 46, 47] + \
[53, 54, 55, 57, 58, 59, 61, 62, 63]
else:
order = np.arange(4**nr_qubits)[1:]
if self.options_dict.get('pauli_raw', False):
fit_type = 'raw counts'
elif self.options_dict.get('pauli_values', False):
fit_type = 'corrected counts'
elif self.options_dict.get('mle', False):
fit_type = 'maximum likelihood estimation'
elif self.options_dict.get('imle', False):
fit_type = 'iterative maximum likelihood estimation'
else:
fit_type = 'least squares fit'
meas_string = self.base_analysis. \
raw_data_dict['measurementstring']
if np.ndim(meas_string) > 0:
if len(meas_string) > 1:
meas_string = meas_string[0] + ' to ' + meas_string[-1]
else:
meas_string = meas_string[0]
self.plot_dicts['pauli_basis'] = {
'plotfn': self.plot_bar,
'xcenters': np.arange(len(order)),
'xwidth': 0.4,
'xrange': (-1, len(order)),
'yvals': np.array(yexp)[order],
'xlabel': r'Pauli operator, $\hat{O}$',
'ylabel': r'Expectation value, $\mathrm{Tr}(\hat{O} \hat{\rho})$',
'title': 'Pauli operators, ' + fit_type + '\n' +
self.raw_data_dict['timestamp'] + ' ' + meas_string,
'yrange': (-1.1, 1.1),
'xtick_loc': np.arange(4**nr_qubits - 1),
'xtick_rotation': 90,
'xtick_labels': np.array(labels)[order],
'bar_kws': dict(zorder=10),
'setlabel': 'Fit to experiment',
'do_legend': True
}
if nr_qubits > 2:
self.plot_dicts['pauli_basis']['plotsize'] = (10, 5)
rho_target = self.raw_data_dict['exp_metadata'].get('rho_target', None)
rho_target = self.options_dict.get('rho_target', rho_target)
if rho_target is not None:
rho_target = qtp.Qobj(rho_target)
ytar = tomo.density_matrix_to_pauli_basis(rho_target)
self.plot_dicts['pauli_basis_target'] = {
'plotfn': self.plot_bar,
'ax_id': 'pauli_basis',
'xcenters': np.arange(len(order)),
'xwidth': 0.8,
'yvals': np.array(ytar)[order],
'xtick_loc': np.arange(len(order)),
'xtick_labels': np.array(labels)[order],
'bar_kws': dict(color='0.8', zorder=0),
'setlabel': 'Target values',
'do_legend': True
}
purity_str = r'Purity, $Tr(\rho^2) = {:.1f}\%$'.format(
100 * self.proc_data_dict['purity'])
if rho_target is not None:
fidelity_str = '\n' + r'Fidelity, $F = {:.1f}\%$'.format(
100 * self.proc_data_dict['fidelity'])
else:
fidelity_str = ''
if self.proc_data_dict['d'] == 4:
concurrence_str = '\n' + r'Concurrence, $C = {:.1f}\%$'.format(
100 * self.proc_data_dict['concurrence'])
else:
concurrence_str = ''
self.plot_dicts['pauli_info_labels'] = {
'ax_id': 'pauli_basis',
'plotfn': self.plot_line,
'xvals': [0],
'yvals': [0],
'line_kws': {'alpha': 0},
'setlabel': purity_str + fidelity_str,
'do_legend': True
}
def default_phase_cmap(self):
cols = np.array(((41, 39, 231), (61, 130, 163), (208, 170, 39),
(209, 126, 4), (181, 28, 20), (238, 76, 152),
(251, 130, 242), (162, 112, 251))) / 255
n = len(cols)
cdict = {
'red': [[i/n, cols[i%n][0], cols[i%n][0]] for i in range(n+1)],
'green': [[i/n, cols[i%n][1], cols[i%n][1]] for i in range(n+1)],
'blue': [[i/n, cols[i%n][2], cols[i%n][2]] for i in range(n+1)],
}
return mpl.colors.LinearSegmentedColormap('DMDefault', cdict)
class ReadoutROPhotonsAnalysis(Single_Qubit_TimeDomainAnalysis):
"""
Analyses the photon number in the RO based on the
readout_photons_in_resonator function
function specific options for options dict:
f_qubit
chi
artif_detuning
print_fit_results
"""
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
close_figs: bool=False, options_dict: dict=None,
extract_only: bool=False, do_fitting: bool=False,
auto: bool=True):
super().__init__(t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
close_figs=close_figs, label=label,
extract_only=extract_only, do_fitting=do_fitting)
if self.options_dict.get('TwoD', None) is None:
self.options_dict['TwoD'] = True
self.label = label
self.params_dict = {
'measurementstring': 'measurementstring',
'sweep_points': 'sweep_points',
'sweep_points_2D': 'sweep_points_2D',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'}
self.numeric_params = self.options_dict.get('numeric_params',
OrderedDict())
self.kappa = self.options_dict.get('kappa_effective', None)
self.chi = self.options_dict.get('chi', None)
self.T2 = self.options_dict.get('T2echo', None)
self.artif_detuning = self.options_dict.get('artif_detuning', 0)
if (self.kappa is None) or (self.chi is None) or (self.T2 is None):
raise ValueError('kappa_effective, chi and T2echo must be passed to '
'the options_dict.')
if auto:
self.run_analysis()
def process_data(self):
self.proc_data_dict = OrderedDict()
self.proc_data_dict['qubit_state'] = [[],[]]
self.proc_data_dict['delay_to_relax'] = self.raw_data_dict[
'sweep_points_2D'][0]
self.proc_data_dict['ramsey_times'] = []
for i,x in enumerate(np.transpose(self.raw_data_dict[
'measured_data']['raw w0 _measure'][0])):
self.proc_data_dict['qubit_state'][0].append([])
self.proc_data_dict['qubit_state'][1].append([])
for j,y in enumerate(np.transpose(self.raw_data_dict[
'measured_data']['raw w0 _measure'][0])[i]):
if j%2 == 0:
self.proc_data_dict['qubit_state'][0][i].append(y)
else:
self.proc_data_dict['qubit_state'][1][i].append(y)
for i,x in enumerate( self.raw_data_dict['sweep_points'][0]):
if i % 2 == 0:
self.proc_data_dict['ramsey_times'].append(x)
#I STILL NEED to pass Chi
def prepare_fitting(self):
self.proc_data_dict['photon_number'] = [[],[]]
self.proc_data_dict['fit_results'] = []
self.proc_data_dict['ramsey_fit_results'] = [[],[]]
for i,tau in enumerate(self.proc_data_dict['delay_to_relax']):
self.proc_data_dict['ramsey_fit_results'][0].append(self.fit_Ramsey(
self.proc_data_dict['ramsey_times'][:-4],
self.proc_data_dict['qubit_state'][0][i][:-4]/
max(self.proc_data_dict['qubit_state'][0][i][:-4]),
state=0,
kw=self.options_dict))
self.proc_data_dict['ramsey_fit_results'][1].append(self.fit_Ramsey(
self.proc_data_dict['ramsey_times'][:-4],
self.proc_data_dict['qubit_state'][1][i][:-4]/
max(self.proc_data_dict['qubit_state'][1][i][:-4]),
state=1,
kw=self.options_dict))
n01 = self.proc_data_dict['ramsey_fit_results'
][0][i][0].params['n0'].value
n02 = self.proc_data_dict['ramsey_fit_results'
][1][i][0].params['n0'].value
self.proc_data_dict['photon_number'][0].append(n01)
self.proc_data_dict['photon_number'][1].append(n02)
def run_fitting(self):
print_fit_results = self.params_dict.pop('print_fit_results',False)
exp_dec_mod = lmfit.Model(fit_mods.ExpDecayFunc)
exp_dec_mod.set_param_hint('n',
value=1,
vary=False)
exp_dec_mod.set_param_hint('offset',
value=0,
min=0,
vary=True)
exp_dec_mod.set_param_hint('tau',
value=self.proc_data_dict[
'delay_to_relax'][-1],
min=1e-11,
vary=True)
exp_dec_mod.set_param_hint('amplitude',
value=1,
min=0,
vary=True)
params = exp_dec_mod.make_params()
self.fit_res = OrderedDict()
self.fit_res['ground_state'] = exp_dec_mod.fit(
data=self.proc_data_dict['photon_number'][0],
params=params,
t=self.proc_data_dict['delay_to_relax'])
self.fit_res['excited_state'] = exp_dec_mod.fit(
data=self.proc_data_dict['photon_number'][1],
params=params,
t=self.proc_data_dict['delay_to_relax'])
if print_fit_results:
print(self.fit_res['ground_state'].fit_report())
print(self.fit_res['excited_state'].fit_report())
def fit_Ramsey(self, x, y, state, **kw):
x = np.array(x)
y = np.array(y)
exp_dec_p_mod = lmfit.Model(fit_mods.ExpDecayPmod)
comb_exp_dec_mod = lmfit.Model(fit_mods.CombinedOszExpDecayFunc)
average = np.mean(y)
ft_of_data = np.fft.fft(y)
index_of_fourier_maximum = np.argmax(np.abs(
ft_of_data[1:len(ft_of_data) // 2])) + 1
max_ramsey_delay = x[-1] - x[0]
fft_axis_scaling = 1 / max_ramsey_delay
freq_est = fft_axis_scaling * index_of_fourier_maximum
n_est = (freq_est-self.artif_detuning)/(2 * self.chi)
exp_dec_p_mod.set_param_hint('T2echo',
value=self.T2,
vary=False)
exp_dec_p_mod.set_param_hint('offset',
value=average,
min=0,
vary=True)
exp_dec_p_mod.set_param_hint('delta',
value=self.artif_detuning,
vary=False)
exp_dec_p_mod.set_param_hint('amplitude',
value=1,
min=0,
vary=True)
exp_dec_p_mod.set_param_hint('kappa',
value=self.kappa[state],
vary=False)
exp_dec_p_mod.set_param_hint('chi',
value=self.chi,
vary=False)
exp_dec_p_mod.set_param_hint('n0',
value=n_est,
min=0,
vary=True)
exp_dec_p_mod.set_param_hint('phase',
value=0,
vary=True)
comb_exp_dec_mod.set_param_hint('tau',
value=self.T2,
vary=True)
comb_exp_dec_mod.set_param_hint('offset',
value=average,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('oscillation_offset',
value=average,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('amplitude',
value=1,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('tau_gauss',
value=self.kappa[state],
vary=True)
comb_exp_dec_mod.set_param_hint('n0',
value=n_est,
min=0,
vary=True)
comb_exp_dec_mod.set_param_hint('phase',
value=0,
vary=True)
comb_exp_dec_mod.set_param_hint('delta',
value=self.artif_detuning,
vary=False)
comb_exp_dec_mod.set_param_hint('chi',
value=self.chi,
vary=False)
if (np.average(y[:4]) >
np.average(y[4:8])):
phase_estimate = 0
else:
phase_estimate = np.pi
exp_dec_p_mod.set_param_hint('phase',
value=phase_estimate, vary=True)
comb_exp_dec_mod.set_param_hint('phase',
value=phase_estimate, vary=True)
amplitude_guess = 0.5
if np.all(np.logical_and(y >= 0, y <= 1)):
exp_dec_p_mod.set_param_hint('amplitude',
value=amplitude_guess,
min=0.00,
max=4.0,
vary=True)
comb_exp_dec_mod.set_param_hint('amplitude',
value=amplitude_guess,
min=0.00,
max=4.0,
vary=True)
else:
print('data is not normalized, varying amplitude')
exp_dec_p_mod.set_param_hint('amplitude',
value=max(y),
min=0.00,
max=4.0,
vary=True)
comb_exp_dec_mod.set_param_hint('amplitude',
value=max(y),
min=0.00,
max=4.0,
vary=True)
fit_res_1 = exp_dec_p_mod.fit(data=y,
t=x,
params= exp_dec_p_mod.make_params())
fit_res_2 = comb_exp_dec_mod.fit(data=y,
t=x,
params= comb_exp_dec_mod.make_params())
if fit_res_1.chisqr > .35:
log.warning('Fit did not converge, varying phase')
fit_res_lst = []
for phase_estimate in np.linspace(0, 2*np.pi, 10):
for i, del_amp in enumerate(np.linspace(
-max(y)/10, max(y)/10, 10)):
exp_dec_p_mod.set_param_hint('phase',
value=phase_estimate,
vary=False)
exp_dec_p_mod.set_param_hint('amplitude',
value=max(y)+ del_amp)
fit_res_lst += [exp_dec_p_mod.fit(
data=y,
t=x,
params= exp_dec_p_mod.make_params())]
chisqr_lst = [fit_res_1.chisqr for fit_res_1 in fit_res_lst]
fit_res_1 = fit_res_lst[np.argmin(chisqr_lst)]
if fit_res_2.chisqr > .35:
log.warning('Fit did not converge, varying phase')
fit_res_lst = []
for phase_estimate in np.linspace(0, 2*np.pi, 10):
for i, del_amp in enumerate(np.linspace(
-max(y)/10, max(y)/10, 10)):
comb_exp_dec_mod.set_param_hint('phase',
value=phase_estimate,
vary=False)
comb_exp_dec_mod.set_param_hint('amplitude',
value=max(y)+ del_amp)
fit_res_lst += [comb_exp_dec_mod.fit(
data=y,
t=x,
params= comb_exp_dec_mod.make_params())]
chisqr_lst = [fit_res_2.chisqr for fit_res_2 in fit_res_lst]
fit_res_2 = fit_res_lst[np.argmin(chisqr_lst)]
if fit_res_1.chisqr < fit_res_2.chisqr:
self.proc_data_dict['params'] = exp_dec_p_mod.make_params()
return [fit_res_1,fit_res_1,fit_res_2]
else:
self.proc_data_dict['params'] = comb_exp_dec_mod.make_params()
return [fit_res_2,fit_res_1,fit_res_2]
def prepare_plots(self):
self.prepare_2D_sweep_plot()
self.prepare_photon_number_plot()
self.prepare_ramsey_plots()
def prepare_2D_sweep_plot(self):
self.plot_dicts['off_full_data_'+self.label] = {
'title': 'Raw data |g>',
'plotfn': self.plot_colorxy,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': self.proc_data_dict['delay_to_relax'],
'ylabel': 'Delay after first RO-pulse',
'yunit': 's',
'zvals': np.array(self.proc_data_dict['qubit_state'][0]) }
self.plot_dicts['on_full_data_'+self.label] = {
'title': 'Raw data |e>',
'plotfn': self.plot_colorxy,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': self.proc_data_dict['delay_to_relax'],
'ylabel': 'Delay after first RO-pulse',
'yunit': 's',
'zvals': np.array(self.proc_data_dict['qubit_state'][1]) }
def prepare_ramsey_plots(self):
x_fit = np.linspace(self.proc_data_dict['ramsey_times'][0],
max(self.proc_data_dict['ramsey_times']),101)
for i in range(len(self.proc_data_dict['ramsey_fit_results'][0])):
self.plot_dicts['off_'+str(i)] = {
'title': 'Ramsey w t_delay = '+\
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |g> state',
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': np.array(self.proc_data_dict['qubit_state'][0][i]/
max(self.proc_data_dict['qubit_state'][0][i][:-4])),
'ylabel': 'Measured qubit state',
'yunit': '',
'marker': 'o',
'setlabel': '|g> data_'+str(i),
'do_legend': True }
self.plot_dicts['off_fit_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |g> state',
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][0][i][1].eval(
self.proc_data_dict['ramsey_fit_results'][0][i][1].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|g> fit_model'+str(i),
'do_legend': True }
self.plot_dicts['off_fit_2_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |g> state',
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][0][i][2].eval(
self.proc_data_dict['ramsey_fit_results'][0][i][2].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|g> fit_simpel_model'+str(i),
'do_legend': True }
self.plot_dicts['hidden_g_'+str(i)] = {
'ax_id':'ramsey_off_'+str(i),
'plotfn': self.plot_line,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'Residual photon count = '
''+str(self.proc_data_dict['photon_number'][0][i]),
'do_legend': True }
self.plot_dicts['on_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |e> state',
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': self.proc_data_dict['ramsey_times'],
'xlabel': 'Ramsey delays',
'xunit': 's',
'yvals': np.array(self.proc_data_dict['qubit_state'][1][i]/
max(self.proc_data_dict['qubit_state'][1][i][:-4])),
'ylabel': 'Measured qubit state',
'yunit': '',
'marker': 'o',
'setlabel': '|e> data_'+str(i),
'do_legend': True }
self.plot_dicts['on_fit_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |e> state',
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][1][i][1].eval(
self.proc_data_dict['ramsey_fit_results'][1][i][1].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|e> fit_model'+str(i),
'do_legend': True }
self.plot_dicts['on_fit_2_'+str(i)] = {
'title': 'Ramsey w t_delay = '+ \
str(self.proc_data_dict['delay_to_relax'][i])+ \
' s, in |e> state',
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': x_fit,
'yvals': self.proc_data_dict['ramsey_fit_results'][1][i][2].eval(
self.proc_data_dict['ramsey_fit_results'][1][i][2].params,
t=x_fit),
'linestyle': '-',
'marker': '',
'setlabel': '|e> fit_simpel_model'+str(i),
'do_legend': True }
self.plot_dicts['hidden_e_'+str(i)] = {
'ax_id':'ramsey_on_'+str(i),
'plotfn': self.plot_line,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'Residual photon count = '
''+str(self.proc_data_dict['photon_number'][1][i]),
'do_legend': True }
def prepare_photon_number_plot(self):
ylabel = 'Average photon number'
yunit = ''
x_fit = np.linspace(min(self.proc_data_dict['delay_to_relax']),
max(self.proc_data_dict['delay_to_relax']),101)
minmax_data = [min(min(self.proc_data_dict['photon_number'][0]),
min(self.proc_data_dict['photon_number'][1])),
max(max(self.proc_data_dict['photon_number'][0]),
max(self.proc_data_dict['photon_number'][1]))]
minmax_data[0] -= minmax_data[0]/5
minmax_data[1] += minmax_data[1]/5
self.proc_data_dict['photon_number'][1],
self.fit_res['excited_state'].eval(
self.fit_res['excited_state'].params,
t=x_fit)
self.plot_dicts['Photon number count'] = {
'plotfn': self.plot_line,
'xlabel': 'Delay after first RO-pulse',
'ax_id': 'Photon number count ',
'xunit': 's',
'xvals': self.proc_data_dict['delay_to_relax'],
'yvals': self.proc_data_dict['photon_number'][0],
'ylabel': ylabel,
'yunit': yunit,
'yrange': minmax_data,
'title': 'Residual photon number',
'color': 'b',
'linestyle': '',
'marker': 'o',
'setlabel': '|g> data',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['main2'] = {
'plotfn': self.plot_line,
'xunit': 's',
'xvals': x_fit,
'yvals': self.fit_res['ground_state'].eval(
self.fit_res['ground_state'].params,
t=x_fit),
'yrange': minmax_data,
'ax_id': 'Photon number count ',
'color': 'b',
'linestyle': '-',
'marker': '',
'setlabel': '|g> fit',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['main3'] = {
'plotfn': self.plot_line,
'xunit': 's',
'xvals': self.proc_data_dict['delay_to_relax'],
'yvals': self.proc_data_dict['photon_number'][1],
'yrange': minmax_data,
'ax_id': 'Photon number count ',
'color': 'r',
'linestyle': '',
'marker': 'o',
'setlabel': '|e> data',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['main4'] = {
'plotfn': self.plot_line,
'xunit': 's',
'ax_id': 'Photon number count ',
'xvals': x_fit,
'yvals': self.fit_res['excited_state'].eval(
self.fit_res['excited_state'].params,
t=x_fit),
'yrange': minmax_data,
'ylabel': ylabel,
'color': 'r',
'linestyle': '-',
'marker': '',
'setlabel': '|e> fit',
'func': 'semilogy',
'do_legend': True}
self.plot_dicts['hidden_1'] = {
'ax_id': 'Photon number count ',
'plotfn': self.plot_line,
'yrange': minmax_data,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'tau_g = '
''+str("%.3f" %
(self.fit_res['ground_state'].params['tau'].value*1e9))+''
' ns',
'do_legend': True }
self.plot_dicts['hidden_2'] = {
'ax_id': 'Photon number count ',
'plotfn': self.plot_line,
'yrange': minmax_data,
'xvals': [0],
'yvals': [0],
'color': 'w',
'setlabel': 'tau_e = '
''+str("%.3f" %
(self.fit_res['excited_state'].params['tau'].value*1e9))+''
' ns',
'do_legend': True}
class RODynamicPhaseAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names: list=None, t_start: str=None, t_stop: str=None,
data_file_path: str=None, single_timestamp: bool=False,
options_dict: dict=None, extract_only: bool=False,
do_fitting: bool=True, auto=True):
super().__init__(qb_names=qb_names, t_start=t_start, t_stop=t_stop,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only,
do_fitting=do_fitting,
auto=False)
if auto:
self.run_analysis()
def process_data(self):
super().process_data()
if 'qbp_name' in self.metadata:
self.pulsed_qbname = self.metadata['qbp_name']
else:
self.pulsed_qbname = self.options_dict.get('pulsed_qbname')
self.measured_qubits = [qbn for qbn in self.channel_map if
qbn != self.pulsed_qbname]
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.measured_qubits:
ro_dict = self.proc_data_dict['projected_data_dict'][qbn]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
for ro_suff, data in ro_dict.items():
cos_mod = lmfit.Model(fit_mods.CosFunc)
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
guess_pars = fit_mods.Cos_guess(
model=cos_mod,
t=sweep_points,
data=data)
guess_pars['amplitude'].vary = True
guess_pars['offset'].vary = True
guess_pars['frequency'].vary = True
guess_pars['phase'].vary = True
key = 'cos_fit_{}{}'.format(qbn, ro_suff)
self.fit_dicts[key] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.dynamic_phases = OrderedDict()
for meas_qbn in self.measured_qubits:
self.dynamic_phases[meas_qbn] = \
(self.fit_dicts['cos_fit_{}_measure'.format(meas_qbn)][
'fit_res'].best_values['phase'] -
self.fit_dicts['cos_fit_{}_ref_measure'.format(meas_qbn)][
'fit_res'].best_values['phase'])*180/np.pi
def prepare_plots(self):
super().prepare_plots()
if self.do_fitting:
for meas_qbn in self.measured_qubits:
sweep_points_dict = self.proc_data_dict['sweep_points_dict'][
meas_qbn]
if self.num_cal_points != 0:
yvals = [self.proc_data_dict['projected_data_dict'][meas_qbn][
'_ref_measure'][:-self.num_cal_points],
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_measure'][:-self.num_cal_points]]
sweep_points = sweep_points_dict['msmt_sweep_points']
# plot cal points
for i, cal_pts_idxs in enumerate(
self.cal_states_dict.values()):
key = list(self.cal_states_dict)[i] + meas_qbn
self.plot_dicts[key] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'plotfn': self.plot_line,
'xvals': np.mean([
sweep_points_dict['cal_points_sweep_points'][
cal_pts_idxs],
sweep_points_dict['cal_points_sweep_points'][
cal_pts_idxs]],
axis=0),
'yvals': np.mean([
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_ref_measure'][cal_pts_idxs],
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_measure'][cal_pts_idxs]],
axis=0),
'setlabel': list(self.cal_states_dict)[i],
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left',
'linestyle': 'none',
'line_kws': {'color': self.get_cal_state_color(
list(self.cal_states_dict)[i])}}
else:
yvals = [self.proc_data_dict['projected_data_dict'][meas_qbn][
'_ref_measure'],
self.proc_data_dict['projected_data_dict'][meas_qbn][
'_measure']]
sweep_points = sweep_points_dict['sweep_points']
self.plot_dicts['dyn_phase_plot_' + meas_qbn] = {
'plotfn': self.plot_line,
'xvals': [sweep_points, sweep_points],
'xlabel': self.raw_data_dict['xlabel'][0],
'xunit': self.raw_data_dict['xunit'][0][0],
'yvals': yvals,
'ylabel': 'Excited state population',
'yunit': '',
'setlabel': ['with measurement', 'no measurement'],
'title': (self.raw_data_dict['timestamps'][0] + ' ' +
self.raw_data_dict['measurementstring'][0]),
'linestyle': 'none',
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
self.plot_dicts['cos_fit_' + meas_qbn + '_ref_measure'] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_{}_ref_measure'.format(
meas_qbn)]['fit_res'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
self.plot_dicts['cos_fit_' + meas_qbn + '_measure'] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'plotfn': self.plot_fit,
'fit_res': self.fit_dicts['cos_fit_{}_measure'.format(
meas_qbn)]['fit_res'],
'setlabel': 'cos fit',
'do_legend': True,
'legend_bbox_to_anchor': (1, 0.5),
'legend_pos': 'center left'}
textstr = 'Dynamic phase = {:.2f}'.format(
self.dynamic_phases[meas_qbn]) + r'$^{\circ}$'
self.plot_dicts['text_msg_' + meas_qbn] = {
'fig_id': 'dyn_phase_plot_' + meas_qbn,
'ypos': -0.175,
'xpos': 0.5,
'horizontalalignment': 'center',
'verticalalignment': 'top',
'plotfn': self.plot_text,
'text_string': textstr}
class FluxAmplitudeSweepAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names, *args, **kwargs):
self.mask_freq = kwargs.pop('mask_freq', None)
self.mask_amp = kwargs.pop('mask_amp', None)
super().__init__(qb_names, *args, **kwargs)
def extract_data(self):
super().extract_data()
# Set some default values specific to FluxPulseScopeAnalysis if the
# respective options have not been set by the user or in the metadata.
# (We do not do this in the init since we have to wait until
# metadata has been extracted.)
if self.get_param_value('rotation_type', default_value=None) is None:
self.options_dict['rotation_type'] = 'global_PCA'
if self.get_param_value('TwoD', default_value=None) is None:
self.options_dict['TwoD'] = True
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
nr_sp = {qb: len(pdd['sweep_points_dict'][qb]['sweep_points'])
for qb in self.qb_names}
nr_sp2d = {qb: len(list(pdd['sweep_points_2D_dict'][qb].values())[0])
for qb in self.qb_names}
nr_cp = self.num_cal_points
# make matrix out of vector
data_reshaped = {qb: np.reshape(deepcopy(
pdd['data_to_fit'][qb]).T.flatten(), (nr_sp[qb], nr_sp2d[qb]))
for qb in self.qb_names}
pdd['data_reshaped'] = data_reshaped
# remove calibration points from data to fit
data_no_cp = {qb: np.array([pdd['data_reshaped'][qb][i, :]
for i in range(nr_sp[qb]-nr_cp)])
for qb in self.qb_names}
# apply mask
for qb in self.qb_names:
if self.mask_freq is None:
self.mask_freq = [True]*nr_sp2d[qb] # by default, no point is masked
if self.mask_amp is None:
self.mask_amp = [True]*(nr_sp[qb]-nr_cp)
pdd['freqs_masked'] = {}
pdd['amps_masked'] = {}
pdd['data_masked'] = {}
for qb in self.qb_names:
sp_param = [k for k in self.mospm[qb] if 'freq' in k][0]
pdd['freqs_masked'][qb] = \
pdd['sweep_points_2D_dict'][qb][sp_param][self.mask_freq]
pdd['amps_masked'][qb] = \
pdd['sweep_points_dict'][qb]['sweep_points'][
:-self.num_cal_points][self.mask_amp]
data_masked = data_no_cp[qb][self.mask_amp,:]
pdd['data_masked'][qb] = data_masked[:, self.mask_freq]
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
# Gaussian fit of amplitude slices
gauss_mod = fit_mods.GaussianModel_v2()
for qb in self.qb_names:
for i in range(len(pdd['amps_masked'][qb])):
data = pdd['data_masked'][qb][i,:]
self.fit_dicts[f'gauss_fit_{qb}_{i}'] = {
'model': gauss_mod,
'fit_xvals': {'x': pdd['freqs_masked'][qb]},
'fit_yvals': {'data': data}
}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['gauss_center'] = {}
pdd['gauss_center_err'] = {}
pdd['filtered_center'] = {}
pdd['filtered_amps'] = {}
for qb in self.qb_names:
pdd['gauss_center'][qb] = np.array([
self.fit_res[f'gauss_fit_{qb}_{i}'].best_values['center']
for i in range(len(pdd['amps_masked'][qb]))])
pdd['gauss_center_err'][qb] = np.array([
self.fit_res[f'gauss_fit_{qb}_{i}'].params['center'].stderr
for i in range(len(pdd['amps_masked'][qb]))])
# filter out points with stderr > 1e6 Hz
pdd['filtered_center'][qb] = np.array([])
pdd['filtered_amps'][qb] = np.array([])
for i, stderr in enumerate(pdd['gauss_center_err'][qb]):
try:
if stderr < 1e6:
pdd['filtered_center'][qb] = \
np.append(pdd['filtered_center'][qb],
pdd['gauss_center'][qb][i])
pdd['filtered_amps'][qb] = \
np.append(pdd['filtered_amps'][qb],
pdd['sweep_points_dict'][qb]\
['sweep_points'][:-self.num_cal_points][i])
except:
continue
# if gaussian fitting does not work (i.e. all points were filtered
# out above) use max value of data to get an estimate of freq
if len(pdd['filtered_amps'][qb]) == 0:
for qb in self.qb_names:
freqs = np.array([])
for i in range(pdd['data_masked'][qb].shape[0]):
freqs = np.append(freqs, pdd['freqs_masked'][qb]\
[np.argmax(pdd['data_masked'][qb][i,:])])
pdd['filtered_center'][qb] = freqs
pdd['filtered_amps'][qb] = pdd['amps_masked'][qb]
# fit the freqs to the qubit model
self.fit_func = self.get_param_value('fit_func', fit_mods.Qubit_dac_to_freq)
if self.fit_func == fit_mods.Qubit_dac_to_freq_precise:
fit_guess_func = fit_mods.Qubit_dac_arch_guess_precise
else:
fit_guess_func = fit_mods.Qubit_dac_arch_guess
freq_mod = lmfit.Model(self.fit_func)
fixed_params = \
self.get_param_value("fixed_params_for_fit", {}).get(qb, None)
if fixed_params is None:
fixed_params = dict(E_c=0)
freq_mod.guess = fit_guess_func.__get__(
freq_mod, freq_mod.__class__)
self.fit_dicts[f'freq_fit_{qb}'] = {
'model': freq_mod,
'fit_xvals': {'dac_voltage': pdd['filtered_amps'][qb]},
'fit_yvals': {'data': pdd['filtered_center'][qb]},
"guessfn_pars": {"fixed_params": fixed_params}}
self.run_fitting()
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
sp_param = [k for k in self.mospm[qb] if 'freq' in k][0]
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'data_2d_{qb}',
'plotfn': self.plot_colorxy,
'xvals': pdd['sweep_points_dict'][qb]['sweep_points'],
'yvals': pdd['sweep_points_2D_dict'][qb][sp_param],
'zvals': np.transpose(pdd['data_reshaped'][qb]),
'xlabel': r'Flux pulse amplitude',
'xunit': 'V',
'ylabel': r'Qubit drive frequency',
'yunit': 'Hz',
'zlabel': 'Excited state population',
}
if self.do_fitting:
if self.options_dict.get('scatter', True):
label = f'freq_scatter_{qb}_scatter'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'data_2d_{qb}',
'plotfn': self.plot_line,
'linestyle': '',
'marker': 'o',
'xvals': pdd['filtered_amps'][qb],
'yvals': pdd['filtered_center'][qb],
'xlabel': r'Flux pulse amplitude',
'xunit': 'V',
'ylabel': r'Qubit drive frequency',
'yunit': 'Hz',
'color': 'white',
}
amps = pdd['sweep_points_dict'][qb]['sweep_points'][
:-self.num_cal_points]
label = f'freq_scatter_{qb}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'data_2d_{qb}',
'plotfn': self.plot_line,
'linestyle': '-',
'marker': '',
'xvals': amps,
'yvals': self.fit_func(amps,
**self.fit_res[f'freq_fit_{qb}'].best_values),
'color': 'red',
}
class T1FrequencySweepAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
nr_cp = self.num_cal_points
self.lengths = OrderedDict()
self.amps = OrderedDict()
self.freqs = OrderedDict()
for qbn in self.qb_names:
len_key = [pn for pn in self.mospm[qbn] if 'length' in pn]
if len(len_key) == 0:
raise KeyError('Couldn"t find sweep points corresponding to '
'flux pulse length.')
self.lengths[qbn] = self.sp.get_sweep_params_property(
'values', 0, len_key[0])
amp_key = [pn for pn in self.mospm[qbn] if 'amp' in pn]
if len(len_key) == 0:
raise KeyError('Couldn"t find sweep points corresponding to '
'flux pulse amplitude.')
self.amps[qbn] = self.sp.get_sweep_params_property(
'values', 1, amp_key[0])
freq_key = [pn for pn in self.mospm[qbn] if 'freq' in pn]
if len(freq_key) == 0:
self.freqs[qbn] = None
else:
self.freqs[qbn] =self.sp.get_sweep_params_property(
'values', 1, freq_key[0])
nr_amps = len(self.amps[self.qb_names[0]])
nr_lengths = len(self.lengths[self.qb_names[0]])
# make matrix out of vector
data_reshaped_no_cp = {qb: np.reshape(deepcopy(
pdd['data_to_fit'][qb][
:, :pdd['data_to_fit'][qb].shape[1]-nr_cp]).flatten(),
(nr_amps, nr_lengths)) for qb in self.qb_names}
pdd['data_reshaped_no_cp'] = data_reshaped_no_cp
pdd['mask'] = {qb: np.ones(nr_amps, dtype=np.bool)
for qb in self.qb_names}
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
exp_mod = fit_mods.ExponentialModel()
for qb in self.qb_names:
for i, data in enumerate(pdd['data_reshaped_no_cp'][qb]):
self.fit_dicts[f'exp_fit_{qb}_amp_{i}'] = {
'model': exp_mod,
'fit_xvals': {'x': self.lengths[qb]},
'fit_yvals': {'data': data}}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['T1'] = {}
pdd['T1_err'] = {}
for qb in self.qb_names:
pdd['T1'][qb] = np.array([
abs(self.fit_res[f'exp_fit_{qb}_amp_{i}'].best_values['decay'])
for i in range(len(self.amps[qb]))])
pdd['T1_err'][qb] = np.array([
self.fit_res[f'exp_fit_{qb}_amp_{i}'].params['decay'].stderr
for i in range(len(self.amps[qb]))])
for i in range(len(self.amps[qb])):
try:
if pdd['T1_err'][qb][i] >= 10 * pdd['T1'][qb][i]:
pdd['mask'][qb][i] = False
except TypeError:
pdd['mask'][qb][i] = False
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
for p, param_values in enumerate([self.amps, self.freqs]):
if param_values is None:
continue
suffix = '_amp' if p == 0 else '_freq'
mask = pdd['mask'][qb]
xlabel = r'Flux pulse amplitude' if p == 0 else \
r'Derived qubit frequency'
if self.do_fitting:
# Plot T1 vs flux pulse amplitude
label = f'T1_fit_{qb}{suffix}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'plotfn': self.plot_line,
'linestyle': '-',
'xvals': param_values[qb][mask],
'yvals': pdd['T1'][qb][mask],
'yerr': pdd['T1_err'][qb][mask],
'xlabel': xlabel,
'xunit': 'V' if p == 0 else 'Hz',
'ylabel': r'T1',
'yunit': 's',
'color': 'blue',
}
# Plot rotated integrated average in dependece of flux pulse
# amplitude and length
label = f'T1_color_plot_{qb}{suffix}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'plotfn': self.plot_colorxy,
'linestyle': '-',
'xvals': param_values[qb][mask],
'yvals': self.lengths[qb],
'zvals': np.transpose(pdd['data_reshaped_no_cp'][qb][mask]),
'xlabel': xlabel,
'xunit': 'V' if p == 0 else 'Hz',
'ylabel': r'Flux pulse length',
'yunit': 's',
'zlabel': r'Excited state population'
}
# Plot population loss for the first flux pulse length as a
# function of flux pulse amplitude
label = f'Pop_loss_{qb}{suffix}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'plotfn': self.plot_line,
'linestyle': '-',
'xvals': param_values[qb][mask],
'yvals': 1 - pdd['data_reshaped_no_cp'][qb][:, 0][mask],
'xlabel': xlabel,
'xunit': 'V' if p == 0 else 'Hz',
'ylabel': r'Pop. loss @ {:.0f} ns'.format(
self.lengths[qb][0]/1e-9
),
'yunit': '',
}
# Plot all fits in single figure
if self.options_dict.get('all_fits', False) and self.do_fitting:
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i in range(len(self.amps[qb])):
color = colormap(i/(len(self.amps[qb])-1))
label = f'exp_fit_{qb}_amp_{i}'
fitid = param_values[qb][i]
self.plot_dicts[label] = {
'title': rdd['measurementstring'] + '\n' + rdd['timestamp'],
'fig_id': f'T1_fits_{qb}',
'xlabel': r'Flux pulse length',
'xunit': 's',
'ylabel': r'Excited state population',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': f'freq={fitid:.4f}' if p == 1
else f'amp={fitid:.4f}',
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
label = f'freq_scatter_{qb}_{i}'
self.plot_dicts[label] = {
'fig_id': f'T1_fits_{qb}',
'plotfn': self.plot_line,
'xvals': self.lengths[qb],
'linestyle': '',
'yvals': pdd['data_reshaped_no_cp'][qb][i, :],
'color': color,
'setlabel': f'freq={fitid:.4f}' if p == 1
else f'amp={fitid:.4f}',
}
class T2FrequencySweepAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
pdd = self.proc_data_dict
nr_cp = self.num_cal_points
nr_amps = len(self.metadata['amplitudes'])
nr_lengths = len(self.metadata['flux_lengths'])
nr_phases = len(self.metadata['phases'])
# make matrix out of vector
data_reshaped_no_cp = {qb: np.reshape(
deepcopy(pdd['data_to_fit'][qb][
:, :pdd['data_to_fit'][qb].shape[1]-nr_cp]).flatten(),
(nr_amps, nr_lengths, nr_phases)) for qb in self.qb_names}
pdd['data_reshaped_no_cp'] = data_reshaped_no_cp
if self.metadata['use_cal_points']:
pdd['cal_point_data'] = {qb: deepcopy(
pdd['data_to_fit'][qb][
len(pdd['data_to_fit'][qb])-nr_cp:]) for qb in self.qb_names}
pdd['mask'] = {qb: np.ones(nr_amps, dtype=np.bool)
for qb in self.qb_names}
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
nr_amps = len(self.metadata['amplitudes'])
for qb in self.qb_names:
for i in range(nr_amps):
for j, data in enumerate(pdd['data_reshaped_no_cp'][qb][i]):
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=self.metadata['phases'],
data=data,
freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts[f'cos_fit_{qb}_{i}_{j}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': self.metadata['phases']},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['T2'] = {}
pdd['T2_err'] = {}
pdd['phase_contrast'] = {}
nr_lengths = len(self.metadata['flux_lengths'])
nr_amps = len(self.metadata['amplitudes'])
for qb in self.qb_names:
pdd['phase_contrast'][qb] = {}
exp_mod = fit_mods.ExponentialModel()
for i in range(nr_amps):
pdd['phase_contrast'][qb][f'amp_{i}'] = np.array([self.fit_res[
f'cos_fit_{qb}_{i}_{j}'
].best_values['amplitude']
for j in
range(nr_lengths)])
self.fit_dicts[f'exp_fit_{qb}_{i}'] = {
'model': exp_mod,
'fit_xvals': {'x': self.metadata['flux_lengths']},
'fit_yvals': {'data': np.array([self.fit_res[
f'cos_fit_{qb}_{i}_{j}'
].best_values['amplitude']
for j in
range(nr_lengths)])}}
self.run_fitting()
pdd['T2'][qb] = np.array([
abs(self.fit_res[f'exp_fit_{qb}_{i}'].best_values['decay'])
for i in range(len(self.metadata['amplitudes']))])
pdd['mask'][qb] = []
for i in range(len(self.metadata['amplitudes'])):
try:
if self.fit_res[f'exp_fit_{qb}_{i}']\
.params['decay'].stderr >= 1e-5:
pdd['mask'][qb][i] = False
except TypeError:
pdd['mask'][qb][i] = False
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
mask = pdd['mask'][qb]
label = f'T2_fit_{qb}'
xvals = self.metadata['amplitudes'][mask] if \
self.metadata['frequencies'] is None else \
self.metadata['frequencies'][mask]
xlabel = r'Flux pulse amplitude' if \
self.metadata['frequencies'] is None else \
r'Derived qubit frequency'
self.plot_dicts[label] = {
'plotfn': self.plot_line,
'linestyle': '-',
'xvals': xvals,
'yvals': pdd['T2'][qb][mask],
'xlabel': xlabel,
'xunit': 'V' if self.metadata['frequencies'] is None else 'Hz',
'ylabel': r'T2',
'yunit': 's',
'color': 'blue',
}
# Plot all fits in single figure
if not self.options_dict.get('all_fits', False):
continue
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i in range(len(self.metadata['amplitudes'])):
color = colormap(i/(len(self.metadata['frequencies'])-1))
label = f'exp_fit_{qb}_amp_{i}'
freqs = self.metadata['frequencies'] is not None
fitid = self.metadata.get('frequencies',
self.metadata['amplitudes'])[i]
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'T2_fits_{qb}',
'xlabel': r'Flux pulse length',
'xunit': 's',
'ylabel': r'Excited state population',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': f'freq={fitid:.4f}' if freqs
else f'amp={fitid:.4f}',
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
label = f'freq_scatter_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'T2_fits_{qb}',
'plotfn': self.plot_line,
'xvals': self.metadata['phases'],
'linestyle': '',
'yvals': pdd['data_reshaped_no_cp'][qb][i,:],
'color': color,
'setlabel': f'freq={fitid:.4f}' if freqs
else f'amp={fitid:.4f}',
}
class MeasurementInducedDephasingAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
rdd = self.raw_data_dict
pdd = self.proc_data_dict
pdd['data_reshaped'] = {qb: [] for qb in pdd['data_to_fit']}
pdd['amps_reshaped'] = np.unique(self.metadata['hard_sweep_params']['ro_amp_scale']['values'])
pdd['phases_reshaped'] = []
for amp in pdd['amps_reshaped']:
mask = self.metadata['hard_sweep_params']['ro_amp_scale']['values'] == amp
pdd['phases_reshaped'].append(self.metadata['hard_sweep_params']['phase']['values'][mask])
for qb in self.qb_names:
pdd['data_reshaped'][qb].append(pdd['data_to_fit'][qb][:len(mask)][mask])
def prepare_fitting(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
self.fit_dicts = OrderedDict()
for qb in self.qb_names:
for i, data in enumerate(pdd['data_reshaped'][qb]):
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=pdd['phases_reshaped'][i],
data=data, freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts[f'cos_fit_{qb}_{i}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': pdd['phases_reshaped'][i]},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['phase_contrast'] = {}
pdd['phase_offset'] = {}
pdd['sigma'] = {}
pdd['sigma_err'] = {}
pdd['a'] = {}
pdd['a_err'] = {}
pdd['c'] = {}
pdd['c_err'] = {}
for qb in self.qb_names:
pdd['phase_contrast'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['amplitude']
for i, _ in enumerate(pdd['data_reshaped'][qb])])
pdd['phase_offset'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['phase']
for i, _ in enumerate(pdd['data_reshaped'][qb])])
pdd['phase_offset'][qb] += np.pi * (pdd['phase_contrast'][qb] < 0)
pdd['phase_offset'][qb] = (pdd['phase_offset'][qb] + np.pi) % (2 * np.pi) - np.pi
pdd['phase_offset'][qb] = 180*np.unwrap(pdd['phase_offset'][qb])/np.pi
pdd['phase_contrast'][qb] = np.abs(pdd['phase_contrast'][qb])
gauss_mod = lmfit.models.GaussianModel()
self.fit_dicts[f'phase_contrast_fit_{qb}'] = {
'model': gauss_mod,
'guess_dict': {'center': {'value': 0, 'vary': False}},
'fit_xvals': {'x': pdd['amps_reshaped']},
'fit_yvals': {'data': pdd['phase_contrast'][qb]}}
quadratic_mod = lmfit.models.QuadraticModel()
self.fit_dicts[f'phase_offset_fit_{qb}'] = {
'model': quadratic_mod,
'guess_dict': {'b': {'value': 0, 'vary': False}},
'fit_xvals': {'x': pdd['amps_reshaped']},
'fit_yvals': {'data': pdd['phase_offset'][qb]}}
self.run_fitting()
self.save_fit_results()
pdd['sigma'][qb] = self.fit_res[f'phase_contrast_fit_{qb}'].best_values['sigma']
pdd['sigma_err'][qb] = self.fit_res[f'phase_contrast_fit_{qb}'].params['sigma']. \
stderr
pdd['a'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].best_values['a']
pdd['a_err'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].params['a'].stderr
pdd['c'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].best_values['c']
pdd['c_err'][qb] = self.fit_res[f'phase_offset_fit_{qb}'].params['c'].stderr
pdd['sigma_err'][qb] = float('nan') if pdd['sigma_err'][qb] is None \
else pdd['sigma_err'][qb]
pdd['a_err'][qb] = float('nan') if pdd['a_err'][qb] is None else pdd['a_err'][qb]
pdd['c_err'][qb] = float('nan') if pdd['c_err'][qb] is None else pdd['c_err'][qb]
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
phases_equal = True
for phases in pdd['phases_reshaped'][1:]:
if not np.all(phases == pdd['phases_reshaped'][0]):
phases_equal = False
break
for qb in self.qb_names:
if phases_equal:
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'plotfn': self.plot_colorxy,
'xvals': pdd['phases_reshaped'][0],
'yvals': pdd['amps_reshaped'],
'zvals': pdd['data_reshaped'][qb],
'xlabel': r'Pulse phase, $\phi$',
'xunit': 'deg',
'ylabel': r'Readout pulse amplitude scale, $V_{RO}/V_{ref}$',
'yunit': '',
'zlabel': 'Excited state population',
}
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i, amp in enumerate(pdd['amps_reshaped']):
color = colormap(i/(len(pdd['amps_reshaped'])-1))
label = f'cos_data_{qb}_{i}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'amplitude_crossections_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['phases_reshaped'][i],
'yvals': pdd['data_reshaped'][qb][i],
'xlabel': r'Pulse phase, $\phi$',
'xunit': 'deg',
'ylabel': 'Excited state population',
'linestyle': '',
'color': color,
'setlabel': f'amp={amp:.4f}',
'do_legend': True,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
if self.do_fitting:
for i, amp in enumerate(pdd['amps_reshaped']):
color = colormap(i/(len(pdd['amps_reshaped'])-1))
label = f'cos_fit_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'amplitude_crossections_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': f'fit, amp={amp:.4f}',
}
# Phase contrast
self.plot_dicts[f'phase_contrast_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': 200*pdd['phase_contrast'][qb],
'xlabel': r'Readout pulse amplitude scale, $V_{RO}/V_{ref}$',
'xunit': '',
'ylabel': 'Phase contrast',
'yunit': '%',
'linestyle': '',
'color': 'k',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_contrast_fit_{qb}'] = {
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': 200*self.fit_res[f'phase_contrast_fit_{qb}'].best_fit,
'color': 'r',
'marker': '',
'setlabel': 'fit',
'do_legend': True,
}
self.plot_dicts[f'phase_contrast_labels_{qb}'] = {
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': 200*pdd['phase_contrast'][qb],
'marker': '',
'linestyle': '',
'setlabel': r'$\sigma = ({:.5f} \pm {:.5f})$ V'.
format(pdd['sigma'][qb], pdd['sigma_err'][qb]),
'do_legend': True,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
# Phase offset
self.plot_dicts[f'phase_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'],
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': pdd['phase_offset'][qb],
'xlabel': r'Readout pulse amplitude scale, $V_{RO}/V_{ref}$',
'xunit': '',
'ylabel': 'Phase offset',
'yunit': 'deg',
'linestyle': '',
'color': 'k',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_offset_fit_{qb}'] = {
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': self.fit_res[f'phase_offset_fit_{qb}'].best_fit,
'color': 'r',
'marker': '',
'setlabel': 'fit',
'do_legend': True,
}
self.plot_dicts[f'phase_offset_labels_{qb}'] = {
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['amps_reshaped'],
'yvals': pdd['phase_offset'][qb],
'marker': '',
'linestyle': '',
'setlabel': r'$a = {:.0f} \pm {:.0f}$ deg/V${{}}^2$'.
format(pdd['a'][qb], pdd['a_err'][qb]) + '\n' +
r'$c = {:.1f} \pm {:.1f}$ deg'.
format(pdd['c'][qb], pdd['c_err'][qb]),
'do_legend': True,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
class DriveCrosstalkCancellationAnalysis(MultiQubit_TimeDomain_Analysis):
def process_data(self):
super().process_data()
if self.sp is None:
raise ValueError('This analysis needs a SweepPoints '
'class instance.')
pdd = self.proc_data_dict
# get the ramsey phases as the values of the first sweep parameter
# in the 2nd sweep dimension.
# !!! This assumes all qubits have the same ramsey phases !!!
pdd['ramsey_phases'] = self.sp.get_sweep_params_property('values', 1)
pdd['qb_sweep_points'] = {}
pdd['qb_sweep_param'] = {}
for k, v in self.sp.get_sweep_dimension(0).items():
if k == 'phase':
continue
qb, param = k.split('.')
pdd['qb_sweep_points'][qb] = v[0]
pdd['qb_sweep_param'][qb] = (param, v[1], v[2])
pdd['qb_msmt_vals'] = {}
pdd['qb_cal_vals'] = {}
for qb, data in pdd['data_to_fit'].items():
pdd['qb_msmt_vals'][qb] = data[:, :-self.num_cal_points].reshape(
len(pdd['qb_sweep_points'][qb]), len(pdd['ramsey_phases']))
pdd['qb_cal_vals'][qb] = data[0, -self.num_cal_points:]
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
for qb in self.qb_names:
for i, data in enumerate(pdd['qb_msmt_vals'][qb]):
cos_mod = fit_mods.CosModel
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=pdd['ramsey_phases'],
data=data, freq_guess=1/360)
guess_pars['frequency'].value = 1/360
guess_pars['frequency'].vary = False
self.fit_dicts[f'cos_fit_{qb}_{i}'] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': pdd['ramsey_phases']},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['phase_contrast'] = {}
pdd['phase_offset'] = {}
for qb in self.qb_names:
pdd['phase_contrast'][qb] = np.array([
2*self.fit_res[f'cos_fit_{qb}_{i}'].best_values['amplitude']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['phase']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] *= 180/np.pi
pdd['phase_offset'][qb] += 180 * (pdd['phase_contrast'][qb] < 0)
pdd['phase_offset'][qb] = (pdd['phase_offset'][qb] + 180) % 360 - 180
pdd['phase_contrast'][qb] = np.abs(pdd['phase_contrast'][qb])
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'plotfn': self.plot_colorxy,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['qb_sweep_points'][qb],
'zvals': pdd['qb_msmt_vals'][qb],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': pdd['qb_sweep_param'][qb][2],
'yunit': pdd['qb_sweep_param'][qb][1],
'zlabel': 'Excited state population',
}
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i, pval in enumerate(pdd['qb_sweep_points'][qb]):
if i == len(pdd['qb_sweep_points'][qb]) - 1:
legendlabel='data, ref.'
else:
legendlabel = f'data, {pdd["qb_sweep_param"][qb][0]}='\
f'{pval:.4f}{pdd["qb_sweep_param"][qb][1]}'
color = colormap(i/(len(pdd['qb_sweep_points'][qb])-1))
label = f'cos_data_{qb}_{i}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['qb_msmt_vals'][qb][i],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': 'Excited state population',
'linestyle': '',
'color': color,
'setlabel': legendlabel,
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
if self.do_fitting:
for i, pval in enumerate(pdd['qb_sweep_points'][qb]):
if i == len(pdd['qb_sweep_points'][qb]) - 1:
legendlabel = 'fit, ref.'
else:
legendlabel = f'fit, {pdd["qb_sweep_param"][qb][0]}='\
f'{pval:.4f}{pdd["qb_sweep_param"][qb][1]}'
color = colormap(i/(len(pdd['qb_sweep_points'][qb])-1))
label = f'cos_fit_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'do_legend': False,
# 'setlabel': legendlabel
}
# Phase contrast
self.plot_dicts[f'phase_contrast_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['qb_sweep_points'][qb][:-1],
'yvals': pdd['phase_contrast'][qb][:-1] * 100,
'xlabel': pdd['qb_sweep_param'][qb][2],
'xunit': pdd['qb_sweep_param'][qb][1],
'ylabel': 'Phase contrast',
'yunit': '%',
'linestyle': '-',
'marker': 'o',
'color': 'C0',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_contrast_ref_{qb}'] = {
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_hlines,
'xmin': pdd['qb_sweep_points'][qb][:-1].min(),
'xmax': pdd['qb_sweep_points'][qb][:-1].max(),
'y': pdd['phase_contrast'][qb][-1] * 100,
'linestyle': '--',
'colors': '0.6',
'setlabel': 'ref',
'do_legend': True,
}
# Phase offset
self.plot_dicts[f'phase_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['qb_sweep_points'][qb][:-1],
'yvals': pdd['phase_offset'][qb][:-1],
'xlabel': pdd['qb_sweep_param'][qb][2],
'xunit': pdd['qb_sweep_param'][qb][1],
'ylabel': 'Phase offset',
'yunit': 'deg',
'linestyle': '-',
'marker': 'o',
'color': 'C0',
'setlabel': 'data',
'do_legend': True,
}
self.plot_dicts[f'phase_offset_ref_{qb}'] = {
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_hlines,
'xmin': pdd['qb_sweep_points'][qb][:-1].min(),
'xmax': pdd['qb_sweep_points'][qb][:-1].max(),
'y': pdd['phase_offset'][qb][-1],
'linestyle': '--',
'colors': '0.6',
'setlabel': 'ref',
'do_legend': True,
}
class FluxlineCrosstalkAnalysis(MultiQubit_TimeDomain_Analysis):
"""Analysis for the measure_fluxline_crosstalk measurement.
The measurement involves Ramsey measurements on a set of crosstalk qubits,
which have been brought to a flux-sensitive position with a flux pulse.
The first dimension is the ramsey-phase of these qubits.
In the second sweep dimension, the amplitude of a flux pulse on another
(target) qubit is swept.
The analysis extracts the change in Ramsey phase offset, which gets
converted to a frequency offset due to the flux pulse on the target qubit.
The frequency offset is then converted to a flux offset, which is a measure
of the crosstalk between the target fluxline and the crosstalk qubit.
The measurement is hard-compressed, meaning the raw data is inherently 1d,
with one set of calibration points as the final segments. The experiment
part of the measured values are reshaped to the correct 2d shape for
the analysis. The sweep points passed into the analysis should still reflect
the 2d nature of the measurement, meaning the ramsey phase values should be
passed in the first dimension and the target fluxpulse amplitudes in the
second sweep dimension.
"""
def __init__(self, qb_names, *args, **kwargs):
params_dict = {f'{qbn}.amp_to_freq_model':
f'Instrument settings.{qbn}.fit_ge_freq_from_flux_pulse_amp'
for qbn in qb_names}
kwargs['params_dict'] = kwargs.get('params_dict', {})
kwargs['params_dict'].update(params_dict)
super().__init__(qb_names, *args, **kwargs)
def process_data(self):
super().process_data()
if self.sp is None:
raise ValueError('This analysis needs a SweepPoints '
'class instance.')
pdd = self.proc_data_dict
pdd['ramsey_phases'] = self.sp.get_sweep_params_property('values', 0)
pdd['target_amps'] = self.sp.get_sweep_params_property('values', 1)
pdd['target_fluxpulse_length'] = \
self.get_param_value('target_fluxpulse_length')
pdd['crosstalk_qubits_amplitudes'] = \
self.get_param_value('crosstalk_qubits_amplitudes')
pdd['qb_msmt_vals'] = {qb:
pdd['data_to_fit'][qb][:, :-self.num_cal_points].reshape(
len(pdd['target_amps']), len(pdd['ramsey_phases']))
for qb in self.qb_names}
pdd['qb_cal_vals'] = {
qb: pdd['data_to_fit'][qb][0, -self.num_cal_points:]
for qb in self.qb_names}
def prepare_fitting(self):
pdd = self.proc_data_dict
self.fit_dicts = OrderedDict()
cos_mod = lmfit.Model(fit_mods.CosFunc)
cos_mod.guess = fit_mods.Cos_guess.__get__(cos_mod, cos_mod.__class__)
for qb in self.qb_names:
for i, data in enumerate(pdd['qb_msmt_vals'][qb]):
self.fit_dicts[f'cos_fit_{qb}_{i}'] = {
'model': cos_mod,
'guess_dict': {'frequency': {'value': 1 / 360,
'vary': False}},
'fit_xvals': {'t': pdd['ramsey_phases']},
'fit_yvals': {'data': data}}
def analyze_fit_results(self):
pdd = self.proc_data_dict
pdd['phase_contrast'] = {}
pdd['phase_offset'] = {}
pdd['freq_offset'] = {}
pdd['freq'] = {}
self.skip_qb_freq_fits = self.get_param_value('skip_qb_freq_fits', False)
if not self.skip_qb_freq_fits:
pdd['flux'] = {}
for qb in self.qb_names:
pdd['phase_contrast'][qb] = np.array([
2 * self.fit_res[f'cos_fit_{qb}_{i}'].best_values['amplitude']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] = np.array([
self.fit_res[f'cos_fit_{qb}_{i}'].best_values['phase']
for i, _ in enumerate(pdd['qb_msmt_vals'][qb])])
pdd['phase_offset'][qb] *= 180 / np.pi
pdd['phase_offset'][qb] += 180 * (pdd['phase_contrast'][qb] < 0)
pdd['phase_offset'][qb] = (pdd['phase_offset'][qb] + 180) % 360 - 180
pdd['phase_offset'][qb] = \
np.unwrap(pdd['phase_offset'][qb] / 180 * np.pi) * 180 / np.pi
pdd['phase_contrast'][qb] = np.abs(pdd['phase_contrast'][qb])
pdd['freq_offset'][qb] = pdd['phase_offset'][qb] / 360 / pdd[
'target_fluxpulse_length']
fr = lmfit.Model(lambda a, f_a=1, f0=0: a * f_a + f0).fit(
data=pdd['freq_offset'][qb], a=pdd['target_amps'])
pdd['freq_offset'][qb] -= fr.best_values['f0']
if not self.skip_qb_freq_fits:
mpars = eval(self.raw_data_dict[f'{qb}.amp_to_freq_model'])
freq_idle = fit_mods.Qubit_dac_to_freq(
pdd['crosstalk_qubits_amplitudes'].get(qb, 0), **mpars)
pdd['freq'][qb] = pdd['freq_offset'][qb] + freq_idle
mpars.update({'V_per_phi0': 1, 'dac_sweet_spot': 0})
pdd['flux'][qb] = fit_mods.Qubit_freq_to_dac(
pdd['freq'][qb], **mpars)
# fit fitted results to linear models
lin_mod = lmfit.Model(lambda x, a=1, b=0: a*x + b)
def guess(model, data, x, **kwargs):
a_guess = (data[-1] - data[0])/(x[-1] - x[0])
b_guess = data[0] - x[0]*a_guess
return model.make_params(a=a_guess, b=b_guess)
lin_mod.guess = guess.__get__(lin_mod, lin_mod.__class__)
keys_to_fit = []
for qb in self.qb_names:
for param in ['phase_offset', 'freq_offset', 'flux']:
if param == 'flux' and self.skip_qb_freq_fits:
continue
key = f'{param}_fit_{qb}'
self.fit_dicts[key] = {
'model': lin_mod,
'fit_xvals': {'x': pdd['target_amps']},
'fit_yvals': {'data': pdd[param][qb]}}
keys_to_fit.append(key)
self.run_fitting(keys_to_fit=keys_to_fit)
def prepare_plots(self):
pdd = self.proc_data_dict
rdd = self.raw_data_dict
for qb in self.qb_names:
self.plot_dicts[f'data_2d_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'plotfn': self.plot_colorxy,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['target_amps'],
'zvals': pdd['qb_msmt_vals'][qb],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'yunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'zlabel': 'Excited state population',
}
colormap = self.options_dict.get('colormap', mpl.cm.plasma)
for i, pval in enumerate(pdd['target_amps']):
legendlabel = f'data, amp. = {pval:.4f} V'
color = colormap(i / (len(pdd['target_amps']) - 1))
label = f'cos_data_{qb}_{i}'
self.plot_dicts[label] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['ramsey_phases'],
'yvals': pdd['qb_msmt_vals'][qb][i],
'xlabel': r'Ramsey phase, $\phi$',
'xunit': 'deg',
'ylabel': 'Excited state population',
'linestyle': '',
'color': color,
'setlabel': legendlabel,
'do_legend': False,
'legend_bbox_to_anchor': (1, 1),
'legend_pos': 'upper left',
}
if self.do_fitting:
for i, pval in enumerate(pdd['target_amps']):
legendlabel = f'fit, amp. = {pval:.4f} V'
color = colormap(i / (len(pdd['target_amps']) - 1))
label = f'cos_fit_{qb}_{i}'
self.plot_dicts[label] = {
'ax_id': f'param_crossections_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[label],
'plot_init': self.options_dict.get('plot_init', False),
'color': color,
'setlabel': legendlabel,
'do_legend': False,
}
# Phase contrast
self.plot_dicts[f'phase_contrast_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_contrast_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['phase_contrast'][qb] * 100,
'xlabel':self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'xunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'ylabel': 'Phase contrast',
'yunit': '%',
'linestyle': '-',
'marker': 'o',
'color': 'C0',
}
# Phase offset
self.plot_dicts[f'phase_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'phase_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['phase_offset'][qb],
'xlabel':self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'xunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'ylabel': 'Phase offset',
'yunit': 'deg',
'linestyle': 'none',
'marker': 'o',
'color': 'C0',
}
# Frequency offset
self.plot_dicts[f'freq_offset_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'freq_offset_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['freq_offset'][qb],
'xlabel':self.sp.get_sweep_params_property('label', 1,
'target_amp'),
'xunit': self.sp.get_sweep_params_property('unit', 1,
'target_amp'),
'ylabel': 'Freq. offset, $\\Delta f$',
'yunit': 'Hz',
'linestyle': 'none',
'marker': 'o',
'color': 'C0',
}
if not self.skip_qb_freq_fits:
# Flux
self.plot_dicts[f'flux_data_{qb}'] = {
'title': rdd['measurementstring'] +
'\n' + rdd['timestamp'] + '\n' + qb,
'ax_id': f'flux_{qb}',
'plotfn': self.plot_line,
'xvals': pdd['target_amps'],
'yvals': pdd['flux'][qb],
'xlabel': self.sp[1]['target_amp'][2],
'xunit': self.sp[1]['target_amp'][1],
'ylabel': 'Flux, $\\Phi$',
'yunit': '$\\Phi_0$',
'linestyle': 'none',
'marker': 'o',
'color': 'C0',
}
for param in ['phase_offset', 'freq_offset', 'flux']:
if param == 'flux' and self.skip_qb_freq_fits:
continue
self.plot_dicts[f'{param}_fit_{qb}'] = {
'ax_id': f'{param}_{qb}',
'plotfn': self.plot_fit,
'fit_res': self.fit_res[f'{param}_fit_{qb}'],
'plot_init': self.options_dict.get('plot_init', False),
'linestyle': '-',
'marker': '',
'color': 'C1',
}
class RabiAnalysis(MultiQubit_TimeDomain_Analysis):
def __init__(self, qb_names, *args, **kwargs):
params_dict = {}
for qbn in qb_names:
s = 'Instrument settings.'+qbn
for trans_name in ['ge', 'ef']:
params_dict[f'{trans_name}_amp180_'+qbn] = \
s+f'.{trans_name}_amp180'
params_dict[f'{trans_name}_amp90scale_'+qbn] = \
s+f'.{trans_name}_amp90_scale'
kwargs['params_dict'] = params_dict
kwargs['numeric_params'] = list(params_dict)
super().__init__(qb_names, *args, **kwargs)
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
for qbn in self.qb_names:
data = self.proc_data_dict['data_to_fit'][qbn]
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
if self.num_cal_points != 0:
data = data[:-self.num_cal_points]
cos_mod = lmfit.Model(fit_mods.CosFunc)
guess_pars = fit_mods.Cos_guess(
model=cos_mod, t=sweep_points, data=data)
guess_pars['amplitude'].vary = True
guess_pars['amplitude'].min = -10
guess_pars['offset'].vary = True
guess_pars['frequency'].vary = True
guess_pars['phase'].vary = True
self.set_user_guess_pars(guess_pars)
key = 'cos_fit_' + qbn
self.fit_dicts[key] = {
'fit_fn': fit_mods.CosFunc,
'fit_xvals': {'t': sweep_points},
'fit_yvals': {'data': data},
'guess_pars': guess_pars}
def analyze_fit_results(self):
self.proc_data_dict['analysis_params_dict'] = OrderedDict()
for qbn in self.qb_names:
fit_res = self.fit_dicts['cos_fit_' + qbn]['fit_res']
sweep_points = self.proc_data_dict['sweep_points_dict'][qbn][
'msmt_sweep_points']
self.proc_data_dict['analysis_params_dict'][qbn] = \
self.get_amplitudes(fit_res=fit_res, sweep_points=sweep_points)
self.save_processed_data(key='analysis_params_dict')
def get_amplitudes(self, fit_res, sweep_points):
# Extract the best fitted frequency and phase.
freq_fit = fit_res.best_values['frequency']
phase_fit = fit_res.best_values['phase']
freq_std = fit_res.params['frequency'].stderr
phase_std = fit_res.params['phase'].stderr
# If fitted_phase<0, shift fitted_phase by 4. This corresponds to a
# shift of 2pi in the argument of cos.
if np.abs(phase_fit) < 0.1:
phase_fit = 0
# If phase_fit<1, the piHalf amplitude<0.
if phase_fit < 1:
log.info('The data could not be fitted correctly. '
'The fitted phase "%s" <1, which gives '
'negative piHalf '
'amplitude.' % phase_fit)
stepsize = sweep_points[1] - sweep_points[0]
if freq_fit > 2 * stepsize:
log.info('The data could not be fitted correctly. The '
'frequency "%s" is too high.' % freq_fit)
n = np.arange(-2, 10)
piPulse_vals = (n*np.pi - phase_fit)/(2*np.pi*freq_fit)
piHalfPulse_vals = (n*np.pi + np.pi/2 - phase_fit)/(2*np.pi*freq_fit)
# find piHalfPulse
try:
piHalfPulse = \
np.min(piHalfPulse_vals[piHalfPulse_vals >= sweep_points[1]])
n_piHalf_pulse = n[piHalfPulse_vals==piHalfPulse]
except ValueError:
piHalfPulse = np.asarray([])
if piHalfPulse.size == 0 or piHalfPulse > max(sweep_points):
i = 0
while (piHalfPulse_vals[i] < min(sweep_points) and
i<piHalfPulse_vals.size):
i+=1
piHalfPulse = piHalfPulse_vals[i]
n_piHalf_pulse = n[i]
# find piPulse
try:
if piHalfPulse.size != 0:
piPulse = \
| np.min(piPulse_vals[piPulse_vals >= piHalfPulse]) | numpy.min |
import numpy as np
import rllab.misc.logger as logger
from rllab.sampler import parallel_sampler
from rllab.sampler.base import Sampler
from rllab.misc import ext
from rllab.misc import special
from rllab.misc import tensor_utils
from rllab.algos import util
def local_truncate_paths(paths, max_samples):
"""
Truncate the list of paths so that the total number of samples is almost equal to max_samples. This is done by
removing extra paths at the end of the list. But here, we do NOT make the last path shorter.
:param paths: a list of paths
:param max_samples: the absolute maximum number of samples
:return: a list of paths, truncated so that the number of samples adds up to max-samples
"""
# chop samples collected by extra paths
# make a copy
paths = list(paths)
total_n_samples = sum(len(path["rewards"]) for path in paths)
while len(paths) > 0 and total_n_samples - len(paths[-1]["rewards"]) >= max_samples:
total_n_samples -= len(paths.pop(-1)["rewards"])
return paths
class BatchSamplerPlus(Sampler):
def __init__(self, algo, **kwargs):
"""
:type algo: BatchPolopt
"""
self.algo = algo
self.experience_replay = []
self.env_interacts_memory = []
self.env_interacts = 0
self.total_env_interacts = 0
self.mean_path_len = 0
def start_worker(self):
parallel_sampler.populate_task(self.algo.env, self.algo.policy, scope=self.algo.scope)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr):
cur_params = self.algo.policy.get_param_values()
paths = parallel_sampler.sample_paths(
policy_params=cur_params,
max_samples=self.algo.batch_size,
max_path_length=self.algo.max_path_length,
scope=self.algo.scope,
)
"""log_likelihoods for importance sampling"""
for path in paths:
logli = self.algo.policy.distribution.log_likelihood(path["actions"],path["agent_infos"])
path["log_likelihood"] = logli
"""keep data use per iteration approximately fixed"""
if not(self.algo.all_paths):
paths = local_truncate_paths(paths, self.algo.batch_size)
"""keep track of path length"""
self.env_interacts = sum([len(path["rewards"]) for path in paths])
self.total_env_interacts += self.env_interacts
self.mean_path_len = float(self.env_interacts)/len(paths)
"""manage experience replay for old batch reuse"""
self.experience_replay.append(paths)
self.env_interacts_memory.append(self.env_interacts)
if len(self.experience_replay) > self.algo.batch_aggregate_n:
self.experience_replay.pop(0)
self.env_interacts_memory.pop(0)
return paths
def process_samples(self, itr, paths):
"""
we will ignore paths argument and only use experience replay.
note: if algo.batch_aggregate_n = 1, then the experience replay will
only contain the most recent batch, and so len(all_paths) == 1.
"""
if self.algo.exploration_bonus:
self.compute_exploration_bonuses_and_statistics()
self.compute_epoch_weights()
all_paths = []
all_baselines = []
all_returns = []
self.IS_coeffs = [[] for paths in self.experience_replay]
for paths, weight, age in zip(self.experience_replay,self.weights,self.age):
b_paths, b_baselines, b_returns = self.process_single_batch(paths, weight, age)
all_paths += b_paths
all_baselines += [b_baselines]
all_returns += [b_returns]
samples_data = self.create_samples_dict(all_paths)
"""log all useful info"""
self.record_statistics(itr, all_paths, all_baselines, all_returns)
"""update vf and exploration bonus model"""
self.update_parametrized_models()
return samples_data
def compute_exploration_bonuses_and_statistics(self):
for paths in self.experience_replay:
for path in paths:
path["bonuses"] = self.algo.exploration_bonus.get_bonus(path)
self.bonus_total = sum([
sum([
sum(path["bonuses"])
for path in paths])
for paths in self.experience_replay])
self.bonus_mean = self.bonus_total / sum(self.env_interacts_memory)
self.new_bonus_total = sum([sum(path["bonuses"]) for path in self.experience_replay[-1]])
self.new_bonus_mean = self.new_bonus_total / self.env_interacts_memory[-1]
self.bonus_baseline = self.algo.exploration_lambda * \
min(0,self.bonus_mean / max(1,np.abs(self.bonus_mean)))
def compute_epoch_weights(self):
"""create weights, with highest weight on most recent batch"""
self.raw_weights = np.array(
[self.algo.batch_aggregate_coeff**j for j in range(len(self.experience_replay))],
dtype='float'
)
self.raw_weights /= sum(self.raw_weights)
self.raw_weights = self.raw_weights[::-1]
self.weights = self.raw_weights.copy()
"""reweight the weights by how many paths are in that batch """
if self.algo.relative_weights:
total_paths = sum([len(paths) for paths in self.experience_replay])
for j in range(len(self.weights)):
self.weights[j] *= total_paths / len(self.experience_replay[j])
self.age = np.arange(len(self.experience_replay))[::-1]
def process_single_batch(self, paths, weight, age):
baselines = []
returns = []
if hasattr(self.algo.baseline, "predict_n"):
all_path_baselines = self.algo.baseline.predict_n(paths)
else:
all_path_baselines = [self.algo.baseline.predict(path) for path in paths]
for idx, path in enumerate(paths):
path_baselines = np.append(all_path_baselines[idx], 0)
deltas = path["rewards"] + \
self.algo.discount * path_baselines[1:] - \
path_baselines[:-1]
"""exploration bonuses"""
if self.algo.exploration_bonus:
path["bonuses"] *= self.algo.exploration_lambda
if self.algo.normalize_bonus:
path["bonuses"] /= max(1,np.abs(self.bonus_mean))
if self.algo.nonnegative_bonus_mean:
path["bonuses"] -= self.bonus_baseline
deltas += path["bonuses"]
"""recompute agent infos for old data"""
"""(necessary for correct reuse of old data)"""
if age > 0:
self.update_agent_infos(path)
"""importance sampling and batch aggregation"""
path["weights"] = weight * np.ones_like(path["rewards"])
if age > 0 and self.algo.importance_sampling:
self.compute_and_apply_importance_weights(path,age)
path["advantages"] = special.discount_cumsum(
deltas, self.algo.discount * self.algo.gae_lambda)
path["returns"] = special.discount_cumsum(path["rewards"], self.algo.discount)
baselines.append(path_baselines[:-1])
returns.append(path["returns"])
return paths, baselines, returns
def update_agent_infos(self,path):
"""
this updates the agent dist infos (i.e, mean & variance of Gaussian policy dist)
so that it can compute the probability of taking these actions on the most recent
policy is.
meanwhile, the log likelihood of taking the actions on the original behavior policy
can still be found in path["log_likelihood"].
"""
state_info_list = [path["agent_infos"][k] for k in self.algo.policy.state_info_keys]
input_list = tuple([path["observations"]] + state_info_list)
cur_dist_info = self.algo.dist_info_vars_func(*input_list)
for k in self.algo.policy.distribution.dist_info_keys:
path["agent_infos"][k] = cur_dist_info[k]
def compute_and_apply_importance_weights(self,path,age):
new_logli = self.algo.policy.distribution.log_likelihood(path["actions"],path["agent_infos"])
logli_diff = new_logli - path["log_likelihood"]
if self.algo.decision_weight_mode=='pd':
logli_diff = logli_diff[::-1]
log_decision_weighted_IS_coeffs = special.discount_cumsum(logli_diff,1)
IS_coeff = np.exp(log_decision_weighted_IS_coeffs[::-1])
elif self.algo.decision_weight_mode=='pt':
IS_coeff = np.exp(np.sum(logli_diff))
if self.algo.clip_IS_coeff_above:
IS_coeff = | np.minimum(IS_coeff,self.algo.IS_coeff_upper_bound) | numpy.minimum |
import tempfile, os, glob
from scipy.stats import norm as ndist
from traitlets import (HasTraits,
Integer,
Unicode,
Float,
Integer,
Instance,
Dict,
Bool,
default)
import numpy as np
import regreg.api as rr
from selection.algorithms.lasso import lasso, lasso_full, lasso_full_modelQ
from selection.algorithms.sqrt_lasso import choose_lambda
from selection.truncated.gaussian import truncated_gaussian_old as TG
from selection.randomized.lasso import lasso as random_lasso_method, form_targets
from selection.randomized.modelQ import modelQ as randomized_modelQ
from utils import BHfilter
from selection.randomized.base import restricted_estimator
# Rpy
import rpy2.robjects as rpy
from rpy2.robjects import numpy2ri
methods = {}
class generic_method(HasTraits):
need_CV = False
selectiveR_method = False
wide_ok = True # ok for p>= n?
# Traits
q = Float(0.2)
method_name = Unicode('Generic method')
model_target = Unicode()
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
(self.X,
self.Y,
self.l_theory,
self.l_min,
self.l_1se,
self.sigma_reid) = (X,
Y,
l_theory,
l_min,
l_1se,
sigma_reid)
def select(self):
raise NotImplementedError('abstract method')
@classmethod
def register(cls):
methods[cls.__name__] = cls
def selected_target(self, active, beta):
C = self.feature_cov[active]
Q = C[:,active]
return np.linalg.inv(Q).dot(C.dot(beta))
def full_target(self, active, beta):
return beta[active]
def get_target(self, active, beta):
if self.model_target not in ['selected', 'full']:
raise ValueError('Gaussian methods only have selected or full targets')
if self.model_target == 'full':
return self.full_target(active, beta)
else:
return self.selected_target(active, beta)
# Knockoff selection
class knockoffs_mf(generic_method):
method_name = Unicode('Knockoffs')
knockoff_method = Unicode('Second order')
model_target = Unicode("full")
def select(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, fdr=q)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
return np.asarray(V, np.int), np.asarray(V, np.int)
except:
return [], []
knockoffs_mf.register()
class knockoffs_sigma(generic_method):
factor_method = 'asdp'
method_name = Unicode('Knockoffs')
knockoff_method = Unicode("ModelX (asdp)")
model_target = Unicode("full")
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
numpy2ri.activate()
# see if we've factored this before
have_factorization = False
if not os.path.exists('.knockoff_factorizations'):
os.mkdir('.knockoff_factorizations')
factors = glob.glob('.knockoff_factorizations/*npz')
for factor_file in factors:
factor = np.load(factor_file)
feature_cov_f = factor['feature_cov']
if ((feature_cov_f.shape == feature_cov.shape) and
(factor['method'] == cls.factor_method) and
np.allclose(feature_cov_f, feature_cov)):
have_factorization = True
print('found factorization: %s' % factor_file)
cls.knockoff_chol = factor['knockoff_chol']
if not have_factorization:
print('doing factorization')
cls.knockoff_chol = factor_knockoffs(feature_cov, cls.factor_method)
numpy2ri.deactivate()
def select(self):
numpy2ri.activate()
rpy.r.assign('chol_k', self.knockoff_chol)
rpy.r('''
knockoffs = function(X) {
mu = rep(0, ncol(X))
mu_k = X # sweep(X, 2, mu, "-") %*% SigmaInv_s
X_k = mu_k + matrix(rnorm(ncol(X) * nrow(X)), nrow(X)) %*%
chol_k
return(X_k)
}
''')
numpy2ri.deactivate()
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, fdr=q, knockoffs=knockoffs)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
return np.asarray(V, np.int), np.asarray(V, np.int)
except:
return [], []
knockoffs_sigma.register()
def factor_knockoffs(feature_cov, method='asdp'):
numpy2ri.activate()
rpy.r.assign('Sigma', feature_cov)
rpy.r.assign('method', method)
rpy.r('''
# Compute the Cholesky -- from create.gaussian
diag_s = diag(switch(method, equi = create.solve_equi(Sigma),
sdp = create.solve_sdp(Sigma), asdp = create.solve_asdp(Sigma)))
if (is.null(dim(diag_s))) {
diag_s = diag(diag_s, length(diag_s))
}
SigmaInv_s = solve(Sigma, diag_s)
Sigma_k = 2 * diag_s - diag_s %*% SigmaInv_s
chol_k = chol(Sigma_k)
''')
knockoff_chol = np.asarray(rpy.r('chol_k'))
SigmaInv_s = np.asarray(rpy.r('SigmaInv_s'))
diag_s = np.asarray(rpy.r('diag_s'))
np.savez('.knockoff_factorizations/%s.npz' % (os.path.split(tempfile.mkstemp()[1])[1],),
method=method,
feature_cov=feature_cov,
knockoff_chol=knockoff_chol)
return knockoff_chol
class knockoffs_sigma_equi(knockoffs_sigma):
knockoff_method = Unicode('ModelX (equi)')
factor_method = 'equi'
knockoffs_sigma_equi.register()
class knockoffs_orig(generic_method):
wide_OK = False # requires at least n>p
method_name = Unicode("Knockoffs")
knockoff_method = Unicode('Candes & Barber')
model_target = Unicode('full')
def select(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, statistic=stat.glmnet_lambdadiff, fdr=q, knockoffs=create.fixed)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
V = np.asarray(V, np.int)
return V, V
except:
return [], []
knockoffs_orig.register()
class knockoffs_fixed(generic_method):
wide_OK = False # requires at least n>p
method_name = Unicode("Knockoffs")
knockoff_method = Unicode('Fixed')
model_target = Unicode('full')
def select(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, fdr=q, knockoffs=create.fixed)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
return np.asarray(V, np.int), np.asarray(V, np.int)
except:
return [], []
knockoffs_fixed.register()
# Liu, Markovic, Tibs selection
class parametric_method(generic_method):
confidence = Float(0.95)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
generic_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self._fit = False
def select(self):
if not self._fit:
self.method_instance.fit()
self._fit = True
active_set, pvalues = self.generate_pvalues()
if len(pvalues) > 0:
selected = [active_set[i] for i in BHfilter(pvalues, q=self.q)]
return selected, active_set
else:
return [], active_set
class liu_theory(parametric_method):
sigma_estimator = Unicode('relaxed')
method_name = Unicode("Liu")
lambda_choice = Unicode("theory")
model_target = Unicode("full")
dispersion = Float(0.)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
parametric_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
n, p = X.shape
if n < p:
self.method_name = 'ROSI'
self.lagrange = l_theory * np.ones(X.shape[1])
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full.gaussian(self.X, self.Y, self.lagrange * np.sqrt(n))
return self._method_instance
def generate_summary(self, compute_intervals=False):
if not self._fit:
self.method_instance.fit()
self._fit = True
X, Y, lagrange, L = self.X, self.Y, self.lagrange, self.method_instance
n, p = X.shape
if len(L.active) > 0:
if self.sigma_estimator == 'reid' and n < p:
dispersion = self.sigma_reid**2
elif self.dispersion != 0:
dispersion = self.dispersion
else:
dispersion = None
S = L.summary(compute_intervals=compute_intervals, dispersion=dispersion)
return S
def generate_pvalues(self):
S = self.generate_summary(compute_intervals=False)
if S is not None:
active_set = np.array(S['variable'])
pvalues = np.asarray(S['pval'])
return active_set, pvalues
else:
return [], []
def generate_intervals(self):
S = self.generate_summary(compute_intervals=True)
if S is not None:
active_set = np.array(S['variable'])
lower, upper = np.asarray(S['lower_confidence']), np.asarray(S['upper_confidence'])
return active_set, lower, upper
else:
return [], [], []
liu_theory.register()
class liu_aggressive(liu_theory):
lambda_choice = Unicode("aggressive")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
liu_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
liu_aggressive.register()
class liu_modelQ_pop_aggressive(liu_aggressive):
method_name = Unicode("Liu (ModelQ population)")
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full_modelQ(self.feature_cov * n, self.X, self.Y, self.lagrange * np.sqrt(n))
return self._method_instance
liu_modelQ_pop_aggressive.register()
class liu_modelQ_semi_aggressive(liu_aggressive):
method_name = Unicode("Liu (ModelQ semi-supervised)")
B = 10000 # how many samples to use to estimate E[XX^T]
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
cls._chol = np.linalg.cholesky(feature_cov)
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
# draw sample of X for semi-supervised method
_chol = self._chol
p = _chol.shape[0]
Q = 0
batch_size = int(self.B/10)
for _ in range(10):
X_semi = np.random.standard_normal((batch_size, p)).dot(_chol.T)
Q += X_semi.T.dot(X_semi)
Q += self.X.T.dot(self.X)
Q /= (10 * batch_size + self.X.shape[0])
n, p = self.X.shape
self._method_instance = lasso_full_modelQ(Q * self.X.shape[0], self.X, self.Y, self.lagrange * np.sqrt(n))
return self._method_instance
liu_modelQ_semi_aggressive.register()
class liu_sparseinv_aggressive(liu_aggressive):
method_name = Unicode("ROSI")
"""
Force the use of the debiasing matrix.
"""
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full.gaussian(self.X, self.Y, self.lagrange * np.sqrt(n))
self._method_instance.sparse_inverse = True
return self._method_instance
liu_sparseinv_aggressive.register()
class liu_aggressive_reid(liu_aggressive):
sigma_estimator = Unicode('Reid')
pass
liu_aggressive_reid.register()
class liu_CV(liu_theory):
need_CV = True
lambda_choice = Unicode("CV")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
liu_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_min * np.ones(X.shape[1])
liu_CV.register()
class liu_1se(liu_theory):
need_CV = True
lambda_choice = Unicode("1se")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
liu_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_1se * np.ones(X.shape[1])
liu_1se.register()
class liu_sparseinv_1se(liu_1se):
method_name = Unicode("ROSI")
"""
Force the use of the debiasing matrix.
"""
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full.gaussian(self.X, self.Y, self.lagrange * np.sqrt(n))
self._method_instance.sparse_inverse = True
return self._method_instance
liu_sparseinv_1se.register()
class liu_sparseinv_1se_known(liu_1se):
method_name = Unicode("ROSI - known")
dispersion = Float(1.)
"""
Force the use of the debiasing matrix.
"""
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full.gaussian(self.X, self.Y, self.lagrange * np.sqrt(n))
self._method_instance.sparse_inverse = True
return self._method_instance
liu_sparseinv_1se_known.register()
class liu_R_theory(liu_theory):
selectiveR_method = True
method_name = Unicode("Liu (R code)")
def generate_pvalues(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('y', self.Y)
rpy.r.assign('sigma_reid', self.sigma_reid)
rpy.r('y = as.numeric(y)')
rpy.r.assign('lam', self.lagrange[0])
rpy.r('''
p = ncol(X);
n = nrow(X);
sigma_est = 1.
if (p >= n) {
sigma_est = sigma_reid
} else {
sigma_est = sigma(lm(y ~ X - 1))
}
penalty_factor = rep(1, p);
lam = lam / sqrt(n); # lambdas are passed a sqrt(n) free from python code
soln = selectiveInference:::solve_problem_glmnet(X, y, lam, penalty_factor=penalty_factor, loss="ls")
PVS = selectiveInference:::inference_group_lasso(X, y,
soln, groups=1:ncol(X),
lambda=lam, penalty_factor=penalty_factor,
sigma_est, loss="ls", algo="Q",
construct_ci=FALSE)
active_vars=PVS$active_vars - 1 # for 0-based
pvalues = PVS$pvalues
''')
pvalues = np.asarray(rpy.r('pvalues'))
active_set = np.asarray(rpy.r('active_vars'))
numpy2ri.deactivate()
if len(active_set) > 0:
return active_set, pvalues
else:
return [], []
except:
return [np.nan], [np.nan] # some R failure occurred
liu_R_theory.register()
class liu_R_aggressive(liu_R_theory):
lambda_choice = Unicode('aggressive')
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
liu_R_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
liu_R_aggressive.register()
class lee_full_R_theory(liu_theory):
wide_OK = False # requires at least n>p
method_name = Unicode("Lee (R code)")
selectiveR_method = True
def generate_pvalues(self):
numpy2ri.activate()
rpy.r.assign('x', self.X)
rpy.r.assign('y', self.Y)
rpy.r('y = as.numeric(y)')
rpy.r.assign('sigma_reid', self.sigma_reid)
rpy.r.assign('lam', self.lagrange[0])
rpy.r('''
sigma_est=sigma_reid
n = nrow(x);
gfit = glmnet(x, y, standardize=FALSE, intercept=FALSE)
lam = lam / sqrt(n); # lambdas are passed a sqrt(n) free from python code
if (lam < max(abs(t(x) %*% y) / n)) {
beta = coef(gfit, x=x, y=y, s=lam, exact=TRUE)[-1]
out = fixedLassoInf(x, y, beta, lam*n, sigma=sigma_est, type='full', intercept=FALSE)
active_vars=out$vars - 1 # for 0-based
pvalues = out$pv
} else {
pvalues = NULL
active_vars = numeric(0)
}
''')
pvalues = np.asarray(rpy.r('pvalues'))
active_set = np.asarray(rpy.r('active_vars'))
numpy2ri.deactivate()
if len(active_set) > 0:
return active_set, pvalues
else:
return [], []
lee_full_R_theory.register()
class lee_full_R_aggressive(lee_full_R_theory):
lambda_choice = Unicode("aggressive")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
lee_full_R_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
lee_full_R_aggressive.register()
# Unrandomized selected
class lee_theory(parametric_method):
model_target = Unicode("selected")
method_name = Unicode("Lee")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
parametric_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1])
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso.gaussian(self.X, self.Y, self.lagrange * np.sqrt(n))
return self._method_instance
def generate_summary(self, compute_intervals=False):
if not self._fit:
self.method_instance.fit()
self._fit = True
X, Y, lagrange, L = self.X, self.Y, self.lagrange, self.method_instance
if len(L.active) > 0:
S = L.summary(compute_intervals=compute_intervals, alternative='onesided')
return S
def generate_pvalues(self):
S = self.generate_summary(compute_intervals=False)
if S is not None:
active_set = np.array(S['variable'])
pvalues = np.asarray(S['pval'])
return active_set, pvalues
else:
return [], []
def generate_intervals(self):
S = self.generate_summary(compute_intervals=True)
if S is not None:
active_set = np.array(S['variable'])
lower, upper = np.asarray(S['lower_confidence']), np.asarray(S['upper_confidence'])
return active_set, lower, upper
else:
return [], [], []
def point_estimator(self):
X, Y, lagrange, L = self.X, self.Y, self.lagrange, self.method_instance
n, p = X.shape
beta_full = np.zeros(p)
if self.estimator == "LASSO":
beta_full[L.active] = L.soln
else:
beta_full[L.active] = L.onestep_estimator
return L.active, beta_full
lee_theory.register()
class lee_CV(lee_theory):
need_CV = True
lambda_choice = Unicode("CV")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
lee_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_min * np.ones(X.shape[1])
lee_CV.register()
class lee_1se(lee_theory):
need_CV = True
lambda_choice = Unicode("1se")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
lee_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_1se * np.ones(X.shape[1])
lee_1se.register()
class lee_aggressive(lee_theory):
lambda_choice = Unicode("aggressive")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
lee_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = 0.8 * l_theory * np.ones(X.shape[1])
lee_aggressive.register()
class lee_weak(lee_theory):
lambda_choice = Unicode("weak")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
lee_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = 2 * l_theory * np.ones(X.shape[1])
lee_weak.register()
class sqrt_lasso(parametric_method):
method_name = Unicode('SqrtLASSO')
kappa = Float(0.7)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
parametric_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = self.kappa * choose_lambda(X)
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
self._method_instance = lasso.sqrt_lasso(self.X, self.Y, self.lagrange)
return self._method_instance
def generate_summary(self, compute_intervals=False):
X, Y, lagrange, L = self.X, self.Y, self.lagrange, self.method_instance
n, p = X.shape
X = X / np.sqrt(n)
if len(L.active) > 0:
S = L.summary(compute_intervals=compute_intervals, alternative='onesided')
return S
def generate_pvalues(self):
S = self.generate_summary(compute_intervals=False)
if S is not None:
active_set = np.array(S['variable'])
pvalues = np.asarray(S['pval'])
return active_set, pvalues
else:
return [], []
def generate_intervals(self):
S = self.generate_summary(compute_intervals=True)
if S is not None:
active_set = np.array(S['variable'])
lower, upper = np.asarray(S['lower_confidence']), np.asarray(S['upper_confidence'])
return active_set, lower, upper
else:
return [], [], []
sqrt_lasso.register()
# Randomized selected
class randomized_lasso(parametric_method):
method_name = Unicode("Randomized LASSO")
model_target = Unicode("selected")
lambda_choice = Unicode("theory")
randomizer_scale = Float(1)
ndraw = 10000
burnin = 1000
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
parametric_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1])
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
mean_diag = np.mean((self.X ** 2).sum(0))
self._method_instance = random_lasso_method.gaussian(self.X,
self.Y,
feature_weights = self.lagrange * np.sqrt(n),
ridge_term=np.std(self.Y) * np.sqrt(mean_diag) / np.sqrt(n),
randomizer_scale=self.randomizer_scale * np.std(self.Y) * np.sqrt(n))
return self._method_instance
def generate_summary(self, compute_intervals=False):
X, Y, lagrange, rand_lasso = self.X, self.Y, self.lagrange, self.method_instance
n, p = X.shape
if not self._fit:
signs = self.method_instance.fit()
self._fit = True
signs = rand_lasso.fit()
active_set = np.nonzero(signs)[0]
active = signs != 0
# estimates sigma
# JM: for transparency it's better not to have this digged down in the code
X_active = X[:,active_set]
rpy.r.assign('X_active', X_active)
rpy.r.assign('Y', Y)
rpy.r('X_active=as.matrix(X_active)')
rpy.r('Y=as.numeric(Y)')
rpy.r('sigma_est = sigma(lm(Y~ X_active - 1))')
dispersion = rpy.r('sigma_est')
print("dispersion (sigma est for Python)", dispersion)
(observed_target,
cov_target,
cov_target_score,
alternatives) = form_targets(self.model_target,
rand_lasso.loglike,
rand_lasso._W,
active,
**{'dispersion': dispersion})
if active.sum() > 0:
_, pvalues, intervals = rand_lasso.summary(observed_target,
cov_target,
cov_target_score,
alternatives,
level=0.9,
ndraw=self.ndraw,
burnin=self.burnin,
compute_intervals=compute_intervals)
return active_set, pvalues, intervals
else:
return [], [], []
def generate_pvalues(self, compute_intervals=False):
active_set, pvalues, _ = self.generate_summary(compute_intervals=compute_intervals)
if len(active_set) > 0:
return active_set, pvalues
else:
return [], []
def generate_intervals(self):
active_set, _, intervals = self.generate_summary(compute_intervals=True)
if len(active_set) > 0:
return active_set, intervals[:,0], intervals[:,1]
else:
return [], [], []
class randomized_lasso_CV(randomized_lasso):
need_CV = True
lambda_choice = Unicode("CV")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_min * np.ones(X.shape[1])
class randomized_lasso_1se(randomized_lasso):
need_CV = True
lambda_choice = Unicode("1se")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_1se * np.ones(X.shape[1])
randomized_lasso.register(), randomized_lasso_CV.register(), randomized_lasso_1se.register()
# More aggressive lambda choice
class randomized_lasso_aggressive(randomized_lasso):
lambda_choice = Unicode("aggressive")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
class randomized_lasso_aggressive_half(randomized_lasso):
lambda_choice = Unicode('aggressive')
randomizer_scale = Float(0.5)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
class randomized_lasso_weak_half(randomized_lasso):
lambda_choice = Unicode('weak')
randomizer_scale = Float(0.5)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 2.
randomized_lasso_weak_half.register()
class randomized_lasso_aggressive_quarter(randomized_lasso):
randomizer_scale = Float(0.25)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
randomized_lasso_aggressive.register(), randomized_lasso_aggressive_half.register(), randomized_lasso_aggressive_quarter.register()
# Randomized selected smaller randomization
class randomized_lasso_half(randomized_lasso):
randomizer_scale = Float(0.5)
pass
class randomized_lasso_half_CV(randomized_lasso_CV):
need_CV = True
randomizer_scale = Float(0.5)
pass
class randomized_lasso_half_1se(randomized_lasso_1se):
need_CV = True
randomizer_scale = Float(0.5)
pass
randomized_lasso_half.register(), randomized_lasso_half_CV.register(), randomized_lasso_half_1se.register()
# selective mle
class randomized_lasso_mle(randomized_lasso_aggressive_half):
method_name = Unicode("Randomized MLE")
randomizer_scale = Float(0.5)
model_target = Unicode("selected")
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = randomized_modelQ(self.feature_cov * n,
self.X,
self.Y,
self.lagrange * np.sqrt(n),
randomizer_scale=self.randomizer_scale * np.std(self.Y) * | np.sqrt(n) | numpy.sqrt |
"""
Approximation of the medial axis in a voxel model by propagating normals.
The general idea is described in the paper.
It estimates the normals on the outer crust and then propagates normals into voxels that are not yet occupied.
The normal field then grows inwards the model.
"""
from typing import Optional, Tuple, Dict
import numba
import numpy as np
from scipy import ndimage
import plotly.graph_objects as go
from reconstruction.data.chunks import ChunkGrid
from reconstruction.filters.dilate import dilate
from reconstruction.mathlib import Vec3f, normalize_vec
from reconstruction.render.cloud_render import CloudRender
from reconstruction.render.voxel_render import VoxelRender
from reconstruction.utils import timed
_CONST_NORMAL_DIRECTIONS = np.array([
normalize_vec(np.array(p, dtype=np.float32) - 1) if p != (1, 1, 1) else (0, 0, 0) for p in np.ndindex(3, 3, 3)
], dtype=np.float32)
@numba.njit(parallel=True, fastmath=True)
def normal_cone_angles(normals: np.ndarray, mask: np.ndarray, threshold=0.5 * np.pi, min_norm: float = 1e-15):
assert normals.ndim == 4
size = normals.shape[0]
assert normals.shape == (size, size, size, 3)
assert mask.shape == (size, size, size)
result = np.zeros((size - 2, size - 2, size - 2), dtype=np.bool8)
for i in numba.pndindex((size - 2, size - 2, size - 2)):
# Collect normals for position i
current = np.empty((26, 3), dtype=np.float32) # 26 possible neighbors
ci: numba.uint32 = 0
for n_o, o in enumerate(np.ndindex((3, 3, 3))):
if o != (1, 1, 1):
x, y, z = i[0] + o[0], i[1] + o[1], i[2] + o[2]
if mask[x, y, z]:
value = normals[x, y, z]
norm = np.linalg.norm(value)
if norm > min_norm: # Only add if norm is valid
current[ci] = value / norm
ci += 1
if ci > 3:
valid = current[:ci]
# Check angle between all valid normals
result[i[0], i[1], i[2]] = np.any( | np.arccos(valid @ valid.T) | numpy.arccos |
"""Script that calculates the parameters for a log normal distribution given the input
To use: python calculate_parameters file1.csv file2.csv ... fileN.csv [optional output_dir=output]
The details of the calculations in this script are in the appendix of the docs.
"""
import sys, csv
from scipy.optimize import minimize, Bounds, NonlinearConstraint
from scipy.stats import norm, lognorm
import numpy as np
def main(files, output_dir):
to_ignore = 0
for file in files:
company_sizes = read_file(file)
parameters = {}
options = []
for key, size_dist in company_sizes.items():
option_1 = max_likelihood(size_dist)
option_2 = match_expectation(size_dist)
options.append((option_1, option_2))
if option_1 is not None:
var = lognorm.var(option_1[1],scale=np.exp(option_1[0]))
elif option_2 is not None:
option_1 = option_2
var = lognorm.var(option_2[1],scale= | np.exp(option_2[0]) | numpy.exp |
'''
This code is based on https://github.com/ekwebb/fNRI which in turn is based on https://github.com/ethanfetaya/NRI
(MIT licence)
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.colors import ListedColormap
import matplotlib.collections as mcoll
import torch as torch
from matplotlib.patches import Ellipse
def gaussian(x, y, xmean, ymean, sigma):
# gaussian to used as fit.
return np.exp(-((x-xmean) ** 2 + (y-ymean) ** 2) / (2 * sigma ** 2))
def draw_lines(output,output_i,linestyle='-',alpha=1,darker=False,linewidth=2):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
"""
loc = np.array(output[output_i,:,:,0:2])
loc = np.transpose( loc, [1,2,0] )
x = loc[:,0,:]
y = loc[:,1,:]
x_min = np.min(x)
x_max = np.max(x)
y_min = np.min(y)
y_max = np.max(y)
max_range = max( y_max-y_min, x_max-x_min )
xmin = (x_min+x_max)/2-max_range/2-0.1
xmax = (x_min+x_max)/2+max_range/2+0.1
ymin = (y_min+y_max)/2-max_range/2-0.1
ymax = (y_min+y_max)/2+max_range/2+0.1
cmaps = [ 'Purples', 'Greens', 'Blues', 'Oranges', 'Reds', 'Purples', 'Greens', 'Blues', 'Oranges', 'Reds' ]
cmaps = [ matplotlib.cm.get_cmap(cmap, 512) for cmap in cmaps ]
cmaps = [ ListedColormap(cmap(np.linspace(0., 0.8, 256))) for cmap in cmaps ]
if darker:
cmaps = [ ListedColormap(cmap(np.linspace(0.2, 0.8, 256))) for cmap in cmaps ]
for i in range(loc.shape[-1]):
lc = colorline(loc[:,0,i], loc[:,1,i], cmap=cmaps[i],linestyle=linestyle,alpha=alpha,linewidth=linewidth)
return xmin, ymin, xmax, ymax
def draw_lines_animation(output,linestyle='-',alpha=1,darker=False,linewidth=2, animationtype = 'default'):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
"""
# animation for output used to show how physical and computational errors propagate through system
global xmin, xmax, ymin, ymax
# output here is of form [perturbation, particles, timestep,(x,y)]
import matplotlib.pyplot as plt
from matplotlib import animation
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
# scaling of variables.
loc_new = | np.array(output) | numpy.array |
# -*- coding: utf-8 -*-
"""
Module for mathematical analysis of voltage traces from electrophysiology.
AUTHOR: <NAME>
"""
import scipy.stats
import numpy as np
import math
import logging
import sys
from scipy import interpolate
import operator
import pprint
pp = pprint.PrettyPrinter(indent=4)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def print_comment_v(text, warning=False):
print_comment(text, True, warning)
def print_comment(text, print_it=False, warning=False):
prefix = "pyelectro >>> "
if warning:
prefix += "WARNING "
if not isinstance(text, str):
text = text.decode("ascii")
if print_it:
print("%s%s" % (prefix, text.replace("\n", "\n" + prefix)))
def voltage_plot(t, v, title=None):
"""
Plot electrophysiology recording.
"""
from matplotlib import pyplot as plt
plt.xlabel("Time (ms)")
plt.ylabel("Voltage (mV)")
plt.title(title)
plt.grid()
plt.plot(t, v)
plt.show()
def smooth(x, window_len=11, window="hanning"):
"""Smooth the data using a window with requested size.
This function is useful for smoothing out experimental data.
This method utilises the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
:param x: the input signal
:param window_len: the dimension of the smoothing window; should be an odd integer
:param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman', flat window will produce a moving average smoothing.
:return: smoothed signal
example:
.. code-block:: python
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
.. seealso::
numpy.hanning
numpy.hamming
numpy.bartlett
numpy.blackman
numpy.convolve
scipy.signal.lfilter
"""
if x.ndim != 1:
raise (ValueError, "smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise (ValueError, "Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if window not in ["flat", "hanning", "hamming", "bartlett", "blackman"]:
raise (
ValueError,
"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'",
)
s = np.r_[x[(window_len - 1):0:-1], x, x[-1:-window_len:-1]]
if window == "flat": # moving average
w = np.ones(window_len, "d")
else:
w = eval("np." + window + "(window_len)")
y = np.convolve(w / w.sum(), s, mode="valid")
edge = int(window_len / 2)
return y[edge:-edge]
def linear_fit(t, y):
"""Fits data to a line
:param t: time vector
:param y: variable which varies with time (such as voltage)
:returns: Gradient M for a formula of the type y=C+M*x
"""
vals = np.array(y)
m, C = np.polyfit(t, vals, 1)
return m
def three_spike_adaptation(t, y):
"""Linear fit of amplitude vs time of first three AP spikes
Initial action potential amplitudes may very substaintially in amplitude
and then settle down.
:param t: time vector (AP times)
:param y: corresponding AP amplitude
:returns: Gradient M for a formula of the type y=C+M*x for first three action potentials
"""
t = | np.array(t) | numpy.array |
import numpy as np
import scipy.sparse
class vert_grid:
def __init__(self,AP=None,BP=None,p_sfc=1013.25):
if (AP.size != BP.size) or (AP is None):
# Throw error?
print('Inconsistent vertical grid specification')
self.AP = np.array(AP)
self.BP = | np.array(BP) | numpy.array |
import subprocess
import numpy as np
import lmfit as lm
import scipy.special as sp
import scipy.constants as cs
from astropy.stats import jackknife_resampling
import nmrglue as ng
import ast
import pandas as pd
from more_itertools import pairwise
import statistics as st
def tedor_ideal(t_mix, a, dist, t2, j_cc, obs='C13', pulsed='N15', vr=14000, return_t=False):
"""
Makes a SpinEvolution input file from template file "tedor_ideal_template", calls SpinEvolution, parses the output,
and applies phenomenological scaling and exponential relaxation.
The tedor_ideal is a calculation for interpreting and ultimately fitting ZF-TEDOR build-up curves
Parameters
----------
a: float, scaling factor
dist: float, distance between 13C-15N
t2: float, $T_2$ relaxations time
vr: float, MAS speed in HZ
j_cc: float, carbon carbon J coupling in Hz
return_t: bool, should the function return t=np.arange(0, n)*tr
t_mix: array of mixing experimental mixing times in ms
obs: string, the observed nucleus for the TEDOR experiment
pulsed: string, the nucleus with the REDOR pulses on it
Returns
-------
signal: array, len(t_mix)
or
time; signal: array, len(n); array, len(t_mix)
"""
# Build the simulation program from the template
sim_params = {'dist': dist, 'vr': vr / 1000, 'tr': 1 / vr, 'obs': obs, 'pulsed': pulsed}
with open('templates/tedor_ideal_template', 'r') as fid:
template = fid.read()
with open('templates/tedor_ideal_step', 'w') as fid:
fid.write(template.format(**sim_params))
cmd = ['/opt/spinev/spinev', 'templates/tedor_ideal_step']
# Run the simulation
subprocess.call(cmd)
# Parse the results
output_file = 'templates/tedor_ideal_step_re.dat'
results = np.loadtxt(output_file)
time = results[:, 0]
signal = results[:, 1]
# Apply phenomenological corrections
signal = a * signal * (np.cos(np.pi * (j_cc * 1000 / 2))**2) * np.exp(-time / t2)
time_points = []
signal_points = []
for i in t_mix:
ind = (np.where((np.trunc(time * 100) / 100) == i)[0][0])
time_points.append(time[ind])
signal_points.append(signal[ind])
if return_t:
return time_points, signal_points
else:
return signal_points
def tedor_ideal_2n(t_mix, a, dist, t2, x, y, z, j_cc, obs='C13', pulsed='N15', vr=14000, return_t=False):
"""
Makes a SpinEvolution input file from template file "tedor_ideal_template_2N" using the CNN.cor coordinates file,
calls SpinEvolution, parses the output, and applies phenomenological scaling and exponential relaxation.
Parameters
----------
a: float, scaling factor
dist: float, distance between 13C-15N
t2: float, $T_2$ relaxations time
vr: float, MAS speed in HZ
j_cc: float, carbon carbon J coupling in Hz
return_t: bool, should the function return t=np.arange(0, n)*tr
t_mix: array of mixing experimental mixing times in ms
x: float, distance of second N from C
y: float, distance of second N from C
z: float, distance of second N from C
obs: string, the observed nucleus for the TEDOR experiment
pulsed: string, the nucleus with the REDOR pulses on it
Returns
-------
signal: array, len(t_mix)
or
time; signal: array, len(n); array, len(t_mix)
"""
# Build the simulation program from the template
sim_params = {'dist': dist, 'x': x, 'y': y, 'z': z, 'vr': vr / 1000, 'tr': 1 / vr, 'j_cc': j_cc, 'obs': obs,
'pulsed': pulsed}
with open('templates/CNN.cor', 'r') as fid:
template = fid.read()
with open('templates/CNN_step.cor', 'w') as fid:
fid.write(template.format(**sim_params))
with open('templates/tedor_ideal_template_2N', 'r') as fid:
template = fid.read()
with open('templates/tedor_ideal_step_2N', 'w') as fid:
fid.write(template.format(**sim_params))
cmd = ['/opt/spinev/spinev', 'templates/tedor_ideal_step_2N']
# Run the simulation
subprocess.call(cmd)
# Parse the results
output_file = 'templates/tedor_ideal_step_2N_re.dat'
results = np.loadtxt(output_file)
time = results[:, 0]
signal = results[:, 1]
# Apply phenomenological corrections
signal = a * signal * (np.cos(np.pi * (j_cc * 1000 / 2))**2) * np.exp(-time / t2)
time_points = []
signal_points = []
for i in t_mix:
ind = np.where((np.trunc(time * 100)/100) == i)[0][0]
time_points.append(time[ind])
signal_points.append(signal[ind])
if return_t:
return time_points, signal_points
else:
return signal_points
def tedor_fitting_spinev(data, err, t_mix, p0, p1, obs, pulsed, vr=14000, spins=2, method='nelder'):
"""
:param data: array, transfer efficiency values for fitting
:param err: array, error for each data point
:param t_mix: array, mixing times in ms
:param p0: array, initial guesses for [dist, j_cc, t2, and a]
:param p1: bool array len(3) -- allows you to turn on/off varying j_cc, t2, and a
:param obs: string, observed nucleus
:param pulsed: string, other nucleus
:param vr: MAS frequency
:param spins: float, total number of spins in system, either 2 or 3
:param method: fitting method -- for lmfit
:return: result - fitting result structure
"""
if spins == 2:
spin_model = tedor_ideal
else:
spin_model = tedor_ideal_2n
kws = {"obs": obs, "pulsed": pulsed}
# Build a model to fit the data - SPINEV function
tedor_model = lm.Model(spin_model, **kws)
params = tedor_model.make_params()
params['dist'].set(value=p0[0], min=2, max=8)
params['j_cc'].set(value=p0[1], min=0, max=75, vary=p1[0])
params['t2'].set(value=p0[2], min=2, max=30, vary=p1[1])
params['a'].set(value=p0[3], min=0, max=1, vary=p1[2])
params['vr'].set(value=vr, min=10000, max=20000, vary=False)
if spins == 3:
params['x'].set(value=2.0, min=1.5, max=7)
params['y'].set(value=2.0, min=1.5, max=7)
params['z'].set(value=2.0, min=1.5, max=7)
# Fit the data
result = tedor_model.fit(data, t_mix=t_mix, **params, weights=err, method=method)
return result
def tedor_analytical(t_mix, a, d_active, t2, j_cc, d_p1):
"""
Analytical equations for TEDOR fitting from Helmus et al 2008 and Jaroniec et al 2002
Uses Bessel function of first kind order 0 to simulate TEDOR behavior
Parameters
----------
a: float, scaling factor
d_active: float, dipolar coupling between 13C and 15N in Hz
d_p1: float, passive dipolar coupling between 13C and additional 15N in Hz
t2: float, $T_2$ relaxations time in ms
j_cc: float, carbon carbon J coupling in Hz
t_mix: array of mixing experimental mixing times in ms
Returns
-------
signal: array, len(t_mix)
KMM 11 May 2021
"""
t2_s = t2 / 1000 # puts t2 in terms of s, must be entered in ms
time = t_mix / 1000
signal = a * 0.5 * (1 - (sp.j0(np.sqrt(2) * d_active * time)) ** 2) * (np.cos(np.pi * (j_cc / 2)) ** 2) * \
(1 + (sp.j0( | np.sqrt(2) | numpy.sqrt |
import kfp.components as comp
def test( # noqa: C901
dataset_path: comp.InputPath(str),
feature_frames,
feature_hop_length,
feature_n_fft,
feature_n_mels,
feature_power,
fit_batch_size,
fit_compile_loss,
fit_compile_optimizer,
fit_epochs,
fit_shuffle,
fit_validation_split,
fit_verbose,
max_fpr,
models_dir: comp.InputPath(),
anomaly_dir: comp.OutputPath(str),
results_dir: comp.OutputPath(str),
mlpipelinemetrics_path: comp.OutputPath(),
labels_dir: comp.OutputPath(),
):
import csv
import glob
import itertools
import json
import os
import re
import sys
import librosa
import librosa.core
import librosa.feature
import numpy
import tensorflow as tf
from sklearn import metrics
# Parse pipeline parameters
feature_frames = int(feature_frames)
feature_hop_length = int(feature_hop_length)
feature_n_fft = int(feature_n_fft)
feature_n_mels = int(feature_n_mels)
feature_power = float(feature_power)
fit_batch_size = int(fit_batch_size)
fit_epochs = int(fit_epochs)
fit_validation_split = float(fit_validation_split)
fit_verbose = int(fit_verbose)
max_fpr = float(max_fpr)
def select_dirs(dataset_path):
"""
return :
dirs : list [ str ]
load base directory list of data
"""
print("load_directory <- data")
dir_path = os.path.abspath(dataset_path + "{base}/*".format(base="/data"))
dirs = sorted(glob.glob(dir_path))
return dirs
def file_to_vector_array(
file_name, n_mels=64, frames=5, n_fft=1024, hop_length=512, power=2.0
):
"""
convert file_name to a vector array.
file_name : str
target .wav file
return : numpy.array( numpy.array( float ) )
vector array
* dataset.shape = (dataset_size, feature_vector_length)
"""
dims = n_mels * frames
y, sr = file_load(file_name)
mel_spectrogram = librosa.feature.melspectrogram(
y=y, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels, power=power
)
log_mel_spectrogram = (
20.0 / power * numpy.log10(mel_spectrogram + sys.float_info.epsilon)
)
vector_array_size = len(log_mel_spectrogram[0, :]) - frames + 1
if vector_array_size < 1:
return numpy.empty((0, dims))
vector_array = numpy.zeros((vector_array_size, dims))
for t in range(frames):
vector_array[:, n_mels * t : n_mels * (t + 1)] = log_mel_spectrogram[
:, t : t + vector_array_size
].T
return vector_array
def file_load(wav_name, mono=False):
"""
load .wav file.
wav_name : str
target .wav file
sampling_rate : int
audio file sampling_rate
mono : boolean
When load a multi channels file and this param True, the returned data will be merged for mono data
return : numpy.array( float )
"""
try:
return librosa.load(wav_name, sr=None, mono=mono)
except Exception:
print("Error: file_broken or not exists!! : {}".format(wav_name))
def load_model(file_path):
"""
return:
model loaded from file_path
"""
return tf.keras.models.load_model(file_path)
def get_machine_id_list_for_test(target_dir, dir_name="test", ext="wav"):
"""
target_dir : str
base directory path of "dev_data" or "eval_data"
test_dir_name : str (default="test")
directory containing test data
ext : str (default="wav)
file extension of audio files
return :
machine_id_list : list [ str ]
list of machine IDs extracted from the names of test files
"""
# create test files
dir_path = os.path.abspath(
"{dir}/{dir_name}/*.{ext}".format(
dir=target_dir, dir_name=dir_name, ext=ext
)
)
file_paths = sorted(glob.glob(dir_path))
machine_id_list = sorted(
list(
set(
itertools.chain.from_iterable(
[re.findall("id_[0-9][0-9]", ext_id) for ext_id in file_paths]
)
)
)
)
return machine_id_list
def test_file_list_generator(
target_dir,
id_name,
dir_name="test",
prefix_normal="normal",
prefix_anomaly="anomaly",
ext="wav",
):
"""
target_dir : str
base directory path of the dev_data or eval_data
id_name : str
id of wav file in <<test_dir_name>> directory
dir_name : str (default="test")
directory containing test data
prefix_normal : str (default="normal")
normal directory name
prefix_anomaly : str (default="anomaly")
anomaly directory name
ext : str (default="wav")
file extension of audio files
return :
if the mode is "development":
test_files : list [ str ]
file list for test
test_labels : list [ boolean ]
label info. list for test
* normal/anomaly = 0/1
if the mode is "evaluation":
test_files : list [ str ]
file list for test
"""
print("target_dir : {}".format(target_dir + "_" + id_name))
normal_files = sorted(
glob.glob(
"{dir}/{dir_name}/{prefix_normal}_{id_name}*.{ext}".format(
dir=target_dir,
dir_name=dir_name,
prefix_normal=prefix_normal,
id_name=id_name,
ext=ext,
)
)
)
normal_labels = numpy.zeros(len(normal_files))
anomaly_files = sorted(
glob.glob(
"{dir}/{dir_name}/{prefix_anomaly}_{id_name}*.{ext}".format(
dir=target_dir,
dir_name=dir_name,
prefix_anomaly=prefix_anomaly,
id_name=id_name,
ext=ext,
)
)
)
anomaly_labels = numpy.ones(len(anomaly_files))
files = numpy.concatenate((normal_files, anomaly_files), axis=0)
labels = numpy.concatenate((normal_labels, anomaly_labels), axis=0)
print("test_file num : {num}".format(num=len(files)))
if len(files) == 0:
print("Exception: no_wav_file!!")
print("\n========================================")
return files, labels
def save_csv(save_file_path, save_data):
"""
Write csv data to specified path
"""
with open(save_file_path, "w", newline="") as f:
writer = csv.writer(f, lineterminator="\n")
writer.writerows(save_data)
dirs = select_dirs(dataset_path)
csv_lines = []
metrics_list = []
for idx, target_dir in enumerate(dirs):
print("\n===========================")
print(
"[{idx}/{total}] {dirname}".format(
dirname=target_dir, idx=idx + 1, total=len(dirs)
)
)
machine_type = os.path.split(target_dir)[1]
model_file_path = "{model}/model_{machine_type}.hdf5".format(
model=models_dir + "/model", machine_type=machine_type
)
# load model file
print("============== MODEL LOAD ==============")
if not os.path.exists(model_file_path):
print("{} model not found ".format(machine_type))
sys.exit(-1)
model = load_model(model_file_path)
model.summary()
# results by type
csv_lines.append([machine_type])
csv_lines.append(["id", "AUC", "pAUC"])
performance = []
machine_id_list = get_machine_id_list_for_test(target_dir)
print("Machine_id_list: " + str(machine_id_list))
for id_str in machine_id_list:
# load test file
test_files, y_true = test_file_list_generator(target_dir, id_str)
anomaly_score_list = []
print("\n============== BEGIN TEST FOR A MACHINE ID ==============")
y_scores = [0.0 for k in test_files]
for file_idx, file_path in enumerate(test_files):
try:
data = file_to_vector_array(
file_path,
n_mels=feature_n_mels,
frames=feature_frames,
n_fft=feature_n_fft,
hop_length=feature_hop_length,
power=feature_power,
)
errors = numpy.mean(
numpy.square(data - model.predict(data)), axis=1
)
y_scores[file_idx] = | numpy.mean(errors) | numpy.mean |
import cv2
import numpy as np
from styx_msgs.msg import TrafficLight
class TLClassifier(object):
def __init__(self):
pass
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# Transform to HSV and simply count the number of color within the range
hsv_img = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
# red has hue 0 - 10 & 160 - 180 add another filter
# TODO use Guassian mask
lower_red1 = np.array([0, 100, 100])
upper_red1 = np.array([10, 255, 255])
lower_red2 = np.array([160, 100, 100])
upper_red2 = np.array([179, 255, 255])
mask_red_lower = cv2.inRange(hsv_img, lower_red1, upper_red1)
mask_red_upper = cv2.inRange(hsv_img, lower_red2, upper_red2)
if cv2.countNonZero(mask_red_lower) + cv2.countNonZero(mask_red_upper) > 70:
return TrafficLight.RED
lower_yellow = np.array([40.0/360*255, 100, 100])
upper_yellow = np.array([66.0/360*255, 255, 255])
mask_yellow = cv2.inRange(hsv_img, lower_yellow, upper_yellow)
if cv2.countNonZero(mask_yellow) > 70:
return TrafficLight.YELLOW
lower_green = | np.array([90.0/360*255, 100, 100]) | numpy.array |
import h5py
import numpy as np
import os
from PIL import Image
import nibabel as nib
from sklearn.preprocessing import MinMaxScaler
from numpy import save
sample_counter = 0
for j in range(100, 1000):
img_path = '/scratch-second/arismu/Master_Thesis_Codes/My_TransUNet/synapse_train/DET0000%s_avg.nii' % (j)
if os.path.isfile(img_path):
print('image size: %d bytes'%os.path.getsize(img_path))
img = nib.load(img_path)
img_np= np.array(img.dataobj)
print(img_np)
img_np_clipped = np.ndarray.clip(img_np, -125, 275)
img_np_norm = (img_np_clipped - np.min(img_np_clipped))/(np.max(img_np_clipped)- np.min(img_np_clipped))
shape = np.shape(img_np_norm)
list_2D_pil = []
list_2D_np = []
sample_counter = sample_counter + 1
for i in range(shape[2]):
img_2D = Image.fromarray(img_np_norm[:, :, i]) #obtain 2D PIL Images
list_2D_pil.append(img_2D)
list_2D_np = np.array(list_2D_pil[i]) #convert pil image to numpy array
np.savez('/scratch-second/arismu/Master_Thesis_Codes/My_TransUNet/data/Synapse/train_npz/case%s_slice%s' % (sample_counter, i), list_2D_np[i])
else:
pass
for j in range(1000, 10000):
img_path = '/scratch-second/arismu/Master_Thesis_Codes/My_TransUNet/synapse_train/DET000%s_avg.nii' % (j)
if os.path.isfile(img_path):
print('image size: %d bytes'%os.path.getsize(img_path))
img = nib.load(img_path)
img_np= | np.array(img.dataobj) | numpy.array |
"""Test module for MFE class errors and warnings."""
import pytest
import numpy as np
from pymfe.mfe import MFE
from pymfe import _internal
from tests.utils import load_xy
GNAME = "errors-warnings"
class TestErrorsWarnings:
"""TestClass dedicated to test General metafeatures."""
def test_error_empty_data_1(self):
with pytest.raises(TypeError):
MFE().fit(X=None, y=None)
def test_error_sample_size(self):
with pytest.raises(ValueError):
MFE(lm_sample_frac=-1)
def test_error_empty_data_2(self):
with pytest.raises(TypeError):
X, y = load_xy(0)
model = MFE().fit(X=X.values, y=y.values)
model.X = None
model.extract()
def test_error_empty_data_3(self):
with pytest.raises(ValueError):
MFE().fit(X=[], y=[])
def test_error_empty_data_4(self):
with pytest.raises(TypeError):
X, y = load_xy(0)
model = MFE().fit(X=X.values, y=y.values)
model.y = None
model.extract()
def test_error_data_wrong_shape(self):
with pytest.raises(ValueError):
X, y = load_xy(0)
MFE().fit(X=X.values, y=y.values[:-1])
@pytest.mark.parametrize(
"group_name",
[
"land-marking",
"infotheo",
"generalgeneral",
"generalstatistical",
("general", "statistical", "invalid"),
("invalid", ),
0,
None,
[],
set(),
tuple(),
])
def test_error_invalid_groups_1(self, group_name):
with pytest.raises(ValueError):
MFE(groups=group_name)
@pytest.mark.parametrize(
"group_name",
[
1,
lambda x: x,
range(1, 5),
])
def test_error_invalid_groups_2(self, group_name):
with pytest.raises(TypeError):
MFE(groups=group_name)
def test_error_random_state(self):
with pytest.raises(ValueError):
MFE(random_state=1.5)
def test_error_folds(self):
with pytest.raises(ValueError):
MFE(num_cv_folds=1.5)
def test_error_cat_cols_1(self):
with pytest.raises(ValueError):
X, y = load_xy(0)
MFE().fit(X=X.values, y=y.values, cat_cols=1)
def test_error_cat_cols_2(self):
with pytest.raises(ValueError):
X, y = load_xy(0)
MFE().fit(X=X.values, y=y.values, cat_cols="all")
def test_error_invalid_timeopt(self):
with pytest.raises(ValueError):
X, y = load_xy(0)
MFE(measure_time="invalid").fit(X=X.values, y=y.values)
@pytest.mark.parametrize(
"value, group_name, allow_none, allow_empty",
[
(None, "groups", False, True),
(None, "groups", False, False),
("", "group", False, False),
("", "group", True, False),
("invalid", "groups", False, False),
("all", "invalid", False, False),
("invalid", "groups", False, True),
("invalid", "groups", True, False),
("invalid", "groups", True, True),
("mean", "summary", True, True),
("all", "summary", True, True),
("num_inst", "features", True, True),
("all", "features", True, True),
])
def test_error_process_generic_option_1(self,
value,
group_name,
allow_none,
allow_empty):
with pytest.raises(ValueError):
_internal.process_generic_option(
value=value,
group_name=group_name,
allow_none=allow_none,
allow_empty=allow_empty)
def test_error_process_generic_option_2(self):
with pytest.raises(TypeError):
_internal.process_generic_option(
values=[1, 2, 3],
group_name=None)
def test_error_process_generic_option_3(self):
with pytest.raises(TypeError):
_internal.process_generic_option(
values=[1, 2, 3],
group_name="timeopt")
@pytest.mark.parametrize(
"values, group_name, allow_none, allow_empty",
[
(None, "groups", False, True),
(None, "groups", False, False),
("", "group", False, False),
([], "groups", True, False),
([], "groups", False, False),
("invalid", "groups", False, False),
("all", "invalid", False, False),
("invalid", "groups", False, True),
("invalid", "groups", True, False),
("invalid", "groups", True, True),
("mean", "summary", True, True),
("all", "summary", True, True),
("num_inst", "features", True, True),
("all", "features", True, True),
])
def test_error_process_generic_set_1(self,
values,
group_name,
allow_none,
allow_empty):
with pytest.raises(ValueError):
_internal.process_generic_set(
values=values,
group_name=group_name,
allow_none=allow_none,
allow_empty=allow_empty)
def test_error_process_generic_set_2(self):
with pytest.raises(TypeError):
_internal.process_generic_set(
values=[1, 2, 3],
group_name=None)
@pytest.mark.parametrize(
"summary",
[
"meanmean",
"invalid",
])
def test_error_unknown_summary(self, summary):
with pytest.raises(ValueError):
MFE(summary=summary)
@pytest.mark.parametrize(
"features",
[
None,
[],
"",
])
def test_error_invalid_features(self, features):
with pytest.raises(ValueError):
MFE(features=features)
@pytest.mark.parametrize(
"score",
[
None,
[],
"",
"invalid",
"accuracyaccuracy",
])
def test_error_invalid_score(self, score):
with pytest.raises(ValueError):
MFE(score=score)
@pytest.mark.parametrize(
"rescale",
[
"",
"invalid",
"minmax",
])
def test_error_invalid_rescale_1(self, rescale):
with pytest.raises(ValueError):
X, y = load_xy(0)
MFE().fit(X=X.values, y=y.values, rescale=rescale)
def test_error_invalid_rescale_2(self):
with pytest.raises(TypeError):
X, y = load_xy(0)
MFE().fit(X=X.values, y=y.values, rescale=[])
@pytest.mark.parametrize(
"features, groups",
[
("invalid", "all"),
("invalid", "general"),
("mean", "info-theory"),
("nr_instt", "general"),
])
def test_warning_invalid_features(self, features, groups):
with pytest.warns(UserWarning):
X, y = load_xy(0)
model = MFE(features=features,
groups=groups).fit(X=X.values, y=y.values)
model.extract()
@pytest.mark.parametrize(
"groups, precomp_groups",
[
("all", "invalid"),
("general", "statistical"),
("info-theory", "general"),
(["general", "statistical"], ["general", "info-theory"]),
])
def test_warning_invalid_precomp(self, groups, precomp_groups):
with pytest.warns(UserWarning):
X, y = load_xy(0)
MFE(groups=groups).fit(X=X.values,
y=y.values,
precomp_groups=precomp_groups)
def test_warning_invalid_argument(self):
with pytest.warns(UserWarning):
X, y = load_xy(0)
model = MFE(features="sd").fit(X=X.values, y=y.values)
model.extract(sd={"ddof": 1, "invalid": "value?"})
def test_error_rescale_data(self):
X, y = load_xy(0)
with pytest.raises(ValueError):
_internal.rescale_data(X, option="42")
def test_error_transform_num(self):
X, y = load_xy(0)
with pytest.raises(TypeError):
_internal.transform_num(X, num_bins='')
with pytest.raises(ValueError):
_internal.transform_num(X, num_bins=-1)
def test_isnumeric_check(self):
assert _internal.isnumeric([]) is False
def test_error_check_data(self):
X, y = load_xy(0)
with pytest.raises(TypeError):
_internal.check_data(X, y='')
def test_errors__fill_col_ind_by_type(self):
X, y = load_xy(0)
with pytest.raises(TypeError):
mfe = MFE()
mfe._fill_col_ind_by_type()
X = [[1, 2, 'a', 'b']]*10 + [[3, 4, 'c', 'd']]*10
y = [0]*10 + [1]*10
mfe = MFE()
mfe.X, mfe.y = np.array(X), np.array(y)
mfe._fill_col_ind_by_type(cat_cols=None)
assert mfe._attr_indexes_cat == ()
mfe = MFE()
mfe.X, mfe.y = np.array(X), | np.array(y) | numpy.array |
'''
The Caffe data layer for training label classifier.
This layer will parse pixel values and actionness labels to the network.
'''
import sys
sys.path.insert(0, '/home/rhou/caffe/python')
import caffe
from dataset.ucf_sports import UcfSports
import numpy as np
from utils.cython_bbox import bbox_overlaps
class RecDataLayer():
def __init__(self, net, model):
self._batch_size = 1
self._depth = 8
self._height = 300
self._width = 400
self.dataset = UcfSports('test', [self._height, self._width],
'/home/rhou/ucf_sports')
self.anchors = self.dataset.get_anchors()
caffe.set_mode_gpu()
self._net = caffe.Net(net, model, caffe.TEST)
def forward(self):
self._net.blobs['data'].reshape(self._batch_size, 3,
self._depth, self._height, self._width)
self._net.blobs['tois'].reshape(self._batch_size * 3714, 5)
[clip, labels, gt_bboxes, is_last] = self.dataset.next_val_video(random=False)
n = int(np.floor(clip.shape[0] / 8.0))
result = np.empty((n, 3714, 22))
for i in xrange(n):
batch_clip = clip[i * 8 : i * 8 + 8].transpose([3, 0, 1, 2])
batch_clip = np.expand_dims(batch_clip, axis=0)
batch_tois = np.hstack(( | np.zeros((3714, 1)) | numpy.zeros |
"""Module containing core functionality of ``astrowidgets``."""
# STDLIB
import functools
import warnings
# THIRD-PARTY
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.table import Table, vstack
# Jupyter widgets
import ipywidgets as ipyw
# Ginga
from ginga.AstroImage import AstroImage
from ginga.canvas.CanvasObject import drawCatalog
from ginga.web.jupyterw.ImageViewJpw import EnhancedCanvasView
from ginga.util.wcs import raDegToString, decDegToString
__all__ = ['ImageWidget']
# Allowed locations for cursor display
ALLOWED_CURSOR_LOCATIONS = ['top', 'bottom', None]
# List of marker names that are for internal use only
RESERVED_MARKER_SET_NAMES = ['all']
class ImageWidget(ipyw.VBox):
"""
Image widget for Jupyter notebook using Ginga viewer.
.. todo:: Any property passed to constructor has to be valid keyword.
Parameters
----------
logger : obj or ``None``
Ginga logger. For example::
from ginga.misc.log import get_logger
logger = get_logger('my_viewer', log_stderr=False,
log_file='ginga.log', level=40)
image_width, image_height : int
Dimension of Jupyter notebook's image widget.
use_opencv : bool
Let Ginga use ``opencv`` to speed up image transformation;
e.g., rotation and mosaic. If this is enabled and you
do not have ``opencv``, you will get a warning.
pixel_coords_offset : int, optional
An offset, typically either 0 or 1, to add/subtract to all
pixel values when going to/from the displayed image.
*In almost all situations the default value, ``0``, is the
correct value to use.*
"""
def __init__(self, logger=None, image_width=500, image_height=500,
use_opencv=True, pixel_coords_offset=0):
super().__init__()
# TODO: Is this the best place for this?
if use_opencv:
try:
from ginga import trcalc
trcalc.use('opencv')
except ImportError:
warnings.warn('install opencv or set use_opencv=False')
self._viewer = EnhancedCanvasView(logger=logger)
self._pixel_offset = pixel_coords_offset
self._jup_img = ipyw.Image(format='jpeg')
# Set the image margin to over the widgets default of 2px on
# all sides.
self._jup_img.layout.margin = '0'
# Set both of those to ensure consistent display in notebook
# and jupyterlab when the image is put into a container smaller
# than the image.
self._jup_img.max_width = '100%'
self._jup_img.height = 'auto'
# Set the width of the box containing the image to the desired width
self.layout.width = str(image_width)
# Note we are NOT setting the height. That is because the height
# is automatically set by the image aspect ratio.
# These need to also be set for now; ginga uses them to figure
# out what size image to make.
self._jup_img.width = image_width
self._jup_img.height = image_height
self._viewer.set_widget(self._jup_img)
# enable all possible keyboard and pointer operations
self._viewer.get_bindings().enable_all(True)
# enable draw
self.dc = drawCatalog
self.canvas = self.dc.DrawingCanvas()
self.canvas.enable_draw(True)
self.canvas.enable_edit(True)
# Make sure all of the internal state trackers have a value
# and start in a state which is definitely allowed: all are
# False.
self._is_marking = False
self._click_center = False
self._click_drag = False
self._scroll_pan = False
# Set a couple of things to match the ginga defaults
self.scroll_pan = True
self.click_drag = False
bind_map = self._viewer.get_bindmap()
# Set up right-click and drag adjusts the contrast
bind_map.map_event(None, (), 'ms_right', 'contrast')
# Shift-right-click restores the default contrast
bind_map.map_event(None, ('shift',), 'ms_right', 'contrast_restore')
# Marker
self.marker = {'type': 'circle', 'color': 'cyan', 'radius': 20}
# Maintain marker tags as a set because we do not want
# duplicate names.
self._marktags = set()
# Let's have a default name for the tag too:
self._default_mark_tag_name = 'default-marker-name'
self._interactive_marker_set_name_default = 'interactive-markers'
self._interactive_marker_set_name = self._interactive_marker_set_name_default
# coordinates display
self._jup_coord = ipyw.HTML('Coordinates show up here')
# This needs ipyevents 0.3.1 to work
self._viewer.add_callback('cursor-changed', self._mouse_move_cb)
self._viewer.add_callback('cursor-down', self._mouse_click_cb)
# Define a callback that shows the output of a print
self.print_out = ipyw.Output()
self._cursor = 'bottom'
self.children = [self._jup_img, self._jup_coord]
@property
def logger(self):
"""Logger for this widget."""
return self._viewer.logger
@property
def image_width(self):
return int(self._jup_img.width)
@image_width.setter
def image_width(self, value):
# widgets expect width/height as strings, but most users will not, so
# do the conversion.
self._jup_img.width = str(value)
self._viewer.set_window_size(self.image_width, self.image_height)
@property
def image_height(self):
return int(self._jup_img.height)
@image_height.setter
def image_height(self, value):
# widgets expect width/height as strings, but most users will not, so
# do the conversion.
self._jup_img.height = str(value)
self._viewer.set_window_size(self.image_width, self.image_height)
@property
def pixel_offset(self):
"""
An offset, typically either 0 or 1, to add/subtract to all
pixel values when going to/from the displayed image.
*In almost all situations the default value, ``0``, is the
correct value to use.*
This value cannot be modified after initialization.
"""
return self._pixel_offset
def _mouse_move_cb(self, viewer, button, data_x, data_y):
"""
Callback to display position in RA/DEC deg.
"""
if self.cursor is None: # no-op
return
image = viewer.get_image()
if image is not None:
ix = int(data_x + 0.5)
iy = int(data_y + 0.5)
try:
imval = viewer.get_data(ix, iy)
imval = '{:8.3f}'.format(imval)
except Exception:
imval = 'N/A'
val = 'X: {:.2f}, Y: {:.2f}'.format(data_x + self._pixel_offset,
data_y + self._pixel_offset)
if image.wcs.wcs is not None:
ra, dec = image.pixtoradec(data_x, data_y)
val += ' (RA: {}, DEC: {})'.format(
raDegToString(ra), decDegToString(dec))
val += ', value: {}'.format(imval)
self._jup_coord.value = val
def _mouse_click_cb(self, viewer, event, data_x, data_y):
"""
Callback to handle mouse clicks.
"""
if self.is_marking:
marker_name = self._interactive_marker_set_name
objs = []
try:
c_mark = viewer.canvas.get_object_by_tag(marker_name)
except Exception: # Nothing drawn yet
pass
else: # Add to existing marks
objs = c_mark.objects
viewer.canvas.delete_object_by_tag(marker_name)
# NOTE: By always using CompoundObject, marker handling logic
# is simplified.
obj = self._marker(x=data_x, y=data_y)
objs.append(obj)
viewer.canvas.add(self.dc.CompoundObject(*objs),
tag=marker_name)
self._marktags.add(marker_name)
with self.print_out:
print('Selected {} {}'.format(obj.x, obj.y))
elif self.click_center:
self.center_on((data_x, data_y))
with self.print_out:
print('Centered on X={} Y={}'.format(data_x + self._pixel_offset,
data_y + self._pixel_offset))
# def _repr_html_(self):
# """
# Show widget in Jupyter notebook.
# """
# from IPython.display import display
# return display(self._widget)
def load_fits(self, fitsorfn, numhdu=None, memmap=None):
"""
Load a FITS file into the viewer.
Parameters
----------
fitsorfn : str or HDU
Either a file name or an HDU (*not* an HDUList).
If file name is given, WCS in primary header is automatically
inherited. If a single HDU is given, WCS must be in the HDU
header.
numhdu : int or ``None``
Extension number of the desired HDU.
If ``None``, it is determined automatically.
memmap : bool or ``None``
Memory mapping.
If ``None``, it is determined automatically.
"""
if isinstance(fitsorfn, str):
image = AstroImage(logger=self.logger, inherit_primary_header=True)
image.load_file(fitsorfn, numhdu=numhdu, memmap=memmap)
self._viewer.set_image(image)
elif isinstance(fitsorfn, (fits.ImageHDU, fits.CompImageHDU,
fits.PrimaryHDU)):
self._viewer.load_hdu(fitsorfn)
def load_nddata(self, nddata):
"""
Load an ``NDData`` object into the viewer.
.. todo:: Add flag/masking support, etc.
Parameters
----------
nddata : `~astropy.nddata.NDData`
``NDData`` with image data and WCS.
"""
from ginga.util.wcsmod.wcs_astropy import AstropyWCS
image = AstroImage(logger=self.logger)
image.set_data(nddata.data)
_wcs = AstropyWCS(self.logger)
if nddata.wcs:
_wcs.load_header(nddata.wcs.to_header())
try:
image.set_wcs(_wcs)
except Exception as e:
print('Unable to set WCS from NDData: {}'.format(str(e)))
self._viewer.set_image(image)
def load_array(self, arr):
"""
Load a 2D array into the viewer.
.. note:: Use :meth:`load_nddata` for WCS support.
Parameters
----------
arr : array-like
2D array.
"""
self._viewer.load_data(arr)
def center_on(self, point):
"""
Centers the view on a particular point.
Parameters
----------
point : tuple or `~astropy.coordinates.SkyCoord`
If tuple of ``(X, Y)`` is given, it is assumed
to be in data coordinates.
"""
if isinstance(point, SkyCoord):
self._viewer.set_pan(point.ra.deg, point.dec.deg, coord='wcs')
else:
self._viewer.set_pan(*(np.asarray(point) - self._pixel_offset))
def offset_to(self, dx, dy, skycoord_offset=False):
"""
Move the center to a point that is given offset
away from the current center.
Parameters
----------
dx, dy : float
Offset value. Unit is assumed based on
``skycoord_offset``.
skycoord_offset : bool
If `True`, offset must be given in degrees.
Otherwise, they are in pixel values.
"""
if skycoord_offset:
coord = 'wcs'
else:
coord = 'data'
pan_x, pan_y = self._viewer.get_pan(coord=coord)
self._viewer.set_pan(pan_x + dx, pan_y + dy, coord=coord)
@property
def zoom_level(self):
"""
Zoom level:
* 1 means real-pixel-size.
* 2 means zoomed in by a factor of 2.
* 0.5 means zoomed out by a factor of 2.
"""
return self._viewer.get_scale()
@zoom_level.setter
def zoom_level(self, val):
if val == 'fit':
self._viewer.zoom_fit()
else:
self._viewer.scale_to(val, val)
def zoom(self, val):
"""
Zoom in or out by the given factor.
Parameters
----------
val : int
The zoom level to zoom the image.
See `zoom_level`.
"""
self.zoom_level = self.zoom_level * val
@property
def is_marking(self):
"""
`True` if in marking mode, `False` otherwise.
Marking mode means a mouse click adds a new marker.
This does not affect :meth:`add_markers`.
"""
return self._is_marking
def start_marking(self, marker_name=None,
marker=None):
"""
Start marking, with option to name this set of markers or
to specify the marker style.
"""
self._cached_state = dict(click_center=self.click_center,
click_drag=self.click_drag,
scroll_pan=self.scroll_pan)
self.click_center = False
self.click_drag = False
# Set scroll_pan to ensure there is a mouse way to pan
self.scroll_pan = True
self._is_marking = True
if marker_name is not None:
self._validate_marker_name(marker_name)
self._interactive_marker_set_name = marker_name
self._marktags.add(marker_name)
else:
self._interactive_marker_set_name = \
self._interactive_marker_set_name_default
if marker is not None:
self.marker = marker
def stop_marking(self, clear_markers=False):
"""
Stop marking mode, with option to clear markers, if desired.
Parameters
----------
clear_markers : bool, optional
If ``clear_markers`` is `False`, existing markers are
retained until :meth:`reset_markers` is called.
Otherwise, they are erased.
"""
if self.is_marking:
self._is_marking = False
self.click_center = self._cached_state['click_center']
self.click_drag = self._cached_state['click_drag']
self.scroll_pan = self._cached_state['scroll_pan']
self._cached_state = {}
if clear_markers:
self.reset_markers()
@property
def marker(self):
"""
Marker to use.
.. todo:: Add more examples.
Marker can be set as follows::
{'type': 'circle', 'color': 'cyan', 'radius': 20}
{'type': 'cross', 'color': 'green', 'radius': 20}
{'type': 'plus', 'color': 'red', 'radius': 20}
"""
# Change the marker from a very ginga-specific type (a partial
# of a ginga drawing canvas type) to a generic dict, which is
# what we expect the user to provide.
#
# That makes things like self.marker = self.marker work.
return self._marker_dict
@marker.setter
def marker(self, val):
# Make a new copy to avoid modifying the dict that the user passed in.
_marker = val.copy()
marker_type = _marker.pop('type')
if marker_type == 'circle':
self._marker = functools.partial(self.dc.Circle, **_marker)
elif marker_type == 'plus':
_marker['type'] = 'point'
_marker['style'] = 'plus'
self._marker = functools.partial(self.dc.Point, **_marker)
elif marker_type == 'cross':
_marker['type'] = 'point'
_marker['style'] = 'cross'
self._marker = functools.partial(self.dc.Point, **_marker)
else: # TODO: Implement more shapes
raise NotImplementedError(
'Marker type "{}" not supported'.format(marker_type))
# Only set this once we have successfully created a marker
self._marker_dict = val
def get_markers(self, x_colname='x', y_colname='y',
skycoord_colname='coord',
marker_name=None):
"""
Return the locations of existing markers.
Parameters
----------
x_colname, y_colname : str
Column names for X and Y data coordinates.
Coordinates returned are 0- or 1-indexed, depending
on ``self.pixel_offset``.
skycoord_colname : str
Column name for ``SkyCoord``, which contains
sky coordinates associated with the active image.
This is ignored if image has no WCS.
Returns
-------
markers_table : `~astropy.table.Table` or ``None``
Table of markers, if any, or ``None``.
"""
if marker_name is None:
marker_name = self._default_mark_tag_name
if marker_name == 'all':
# If it wasn't for the fact that SKyCoord columns can't
# be stacked this would all fit nicely into a list
# comprehension. But they can't, so we delete the
# SkyCoord column if it is present, then add it
# back after we have stacked.
coordinates = []
tables = []
for name in self._marktags:
table = self.get_markers(x_colname=x_colname,
y_colname=y_colname,
skycoord_colname=skycoord_colname,
marker_name=name)
if table is None:
# No markers by this name, skip it
continue
try:
coordinates.extend(c for c in table[skycoord_colname])
except KeyError:
pass
else:
del table[skycoord_colname]
tables.append(table)
stacked = vstack(tables, join_type='exact')
if coordinates:
stacked[skycoord_colname] = SkyCoord(coordinates)
return stacked
# We should always allow the default name. The case
# where that table is empty will be handled in a moment.
if (marker_name not in self._marktags
and marker_name != self._default_mark_tag_name):
raise ValueError(f"No markers named '{marker_name}' found.")
try:
c_mark = self._viewer.canvas.get_object_by_tag(marker_name)
except Exception:
# No markers in this table. Issue a warning and continue
warnings.warn(f"Marker set named '{marker_name}' is empty",
category=UserWarning)
return None
image = self._viewer.get_image()
xy_col = []
if (image is None) or (image.wcs.wcs is None):
# Do not include SkyCoord column
include_skycoord = False
else:
include_skycoord = True
radec_col = []
# Extract coordinates from markers
for obj in c_mark.objects:
if obj.coord == 'data':
xy_col.append([obj.x, obj.y])
if include_skycoord:
radec_col.append([np.nan, np.nan])
elif not include_skycoord: # marker in WCS but image has none
self.logger.warning(
'Skipping ({},{}); image has no WCS'.format(obj.x, obj.y))
else: # wcs
xy_col.append([np.nan, np.nan])
radec_col.append([obj.x, obj.y])
# Convert to numpy arrays
xy_col = np.asarray(xy_col) # [[x0, y0], [x1, y1], ...]
if include_skycoord:
# [[ra0, dec0], [ra1, dec1], ...]
radec_col = np.asarray(radec_col)
# Fill in X,Y from RA,DEC
mask = np.isnan(xy_col[:, 0]) # One bool per row
if np.any(mask):
xy_col[mask] = image.wcs.wcspt_to_datapt(radec_col[mask])
# Fill in RA,DEC from X,Y
mask = | np.isnan(radec_col[:, 0]) | numpy.isnan |
import os
import numpy as np
import pytest
from autolens import exc
from autolens.data.array import mask
from autolens.data.array.util import mask_util
test_data_dir = "{}/../test_files/array/".format(os.path.dirname(os.path.realpath(__file__)))
class TestTotalPixels:
def test__total_image_pixels_from_mask(self):
mask = np.array([[True, False, True],
[False, False, False],
[True, False, True]])
assert mask_util.total_regular_pixels_from_mask(mask) == 5
def test__total_sub_pixels_from_mask(self):
mask = np.array([[True, False, True],
[False, False, False],
[True, False, True]])
assert mask_util.total_sub_pixels_from_mask_and_sub_grid_size(mask, sub_grid_size=2) == 20
def test__total_edge_pixels_from_mask(self):
mask = np.array([[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True]])
assert mask_util.total_edge_pixels_from_mask(mask) == 8
class TestTotalSparsePixels:
def test__mask_full_false__full_pixelization_grid_pixels_in_mask(self):
ma = mask.Mask(array=np.array([[False, False, False],
[False, False, False],
[False, False, False]]), pixel_scale=1.0)
full_pix_grid_pixel_centres = np.array([[0 ,0], [0 ,1], [0 ,2], [1 ,0]])
total_masked_pixels = mask_util.total_sparse_pixels_from_mask(mask=ma,
unmasked_sparse_grid_pixel_centres=full_pix_grid_pixel_centres)
assert total_masked_pixels == 4
full_pix_grid_pixel_centres = np.array([[0 ,0], [0 ,1], [0 ,2], [1 ,0], [1 ,1], [2 ,1]])
total_masked_pixels = mask_util.total_sparse_pixels_from_mask(mask=ma,
unmasked_sparse_grid_pixel_centres=full_pix_grid_pixel_centres)
assert total_masked_pixels == 6
def test__mask_is_cross__only_pixelization_grid_pixels_in_mask_are_counted(self):
ma = mask.Mask(array=np.array([[True, False, True],
[False, False, False],
[True, False, True]]), pixel_scale=1.0)
full_pix_grid_pixel_centres = | np.array([[0 ,0], [0 ,1], [0 ,2], [1 ,0]]) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import torch
from info_nce import InfoNCE
# Numpy helper functions
def dot(x, y):
x = x / | np.linalg.norm(x) | numpy.linalg.norm |
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
def vec_from_labels(X, xlab, Ydb):
Y = np.zeros((X.shape[0], Ydb.shape[1]))
for i, lab in enumerate(xlab):
Y[i] = Ydb[lab]
return Y
def norm_mat(mat):
return mat/np.linalg.norm(mat, axis=1)[:, None]
def pick_rnd_sample(X):
rnd_idx = np.random.randint(len(X[0]))
return X[0][rnd_idx], X[1][rnd_idx], X[2][rnd_idx]
def project_data(X, d=2000):
proj_mat = np.random.normal(size=(X.shape[1], d))
return np.dot(X, proj_mat)
def create_train_test(X, xlab, Y, nr_samp):
return (X[:nr_samp], Y[:nr_samp], xlab[:nr_samp]), (
X[nr_samp:], Y[nr_samp:], xlab[nr_samp:])
## TODO, this custom metric doesn't work
def closest_vec(y_true, y_pred):
print(y_true)
true_label = np.argmin(np.linalg.norm(y_true - X_wrd, axis=1))
pred_label = np.argmin(np.linalg.norm(y_pred - X_wrd, axis=1))
return np.int(true_label==pred_label)
path_to_embeddings = './data/cifar10_w2v_embeddings.npz'
path_to_cifar_fts = './data/output.npz'
# load w2v vectors and labels
wrd_fts = np.load(path_to_embeddings)['embeddings']
wrd_fts = norm_mat(wrd_fts)
wrd_lab = np.load(path_to_embeddings)['words']
# load visual vectors and labels
vis_fts = | np.load(path_to_cifar_fts) | numpy.load |
import os
import uproot
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
import copy
from collections import defaultdict
from astropy.coordinates.angle_utilities import angular_separation
from astropy.coordinates import Angle
from astropy import units as u
import pandas as pd
import seaborn as sns
from pathlib import Path
from joblib import dump, load
from sklearn.metrics import confusion_matrix, f1_score
from scipy.stats import mstats
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.linear_model import (
LinearRegression,
Ridge,
RidgeClassifier,
RidgeClassifierCV,
SGDClassifier,
SGDRegressor,
)
from sklearn.ensemble import (
AdaBoostClassifier,
AdaBoostRegressor,
BaggingClassifier,
GradientBoostingClassifier,
RandomForestClassifier,
RandomForestRegressor,
)
from sklearn.neural_network import MLPRegressor, MLPClassifier
from sklearn.svm import SVR, LinearSVR, SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.multiclass import OneVsRestClassifier
from sklearn import model_selection, preprocessing, feature_selection, metrics
from sklearn.pipeline import make_pipeline
def setStyle(palette='default', bigPlot=False):
'''
A function to set the plotting style.
The function receives the colour palette name and whether it is
a big plot or not. The latter sets the fonts and marker to be bigger in case it is a big plot.
The available colour palettes are as follows:
- classic (default): A classic colourful palette with strong colours and contrast.
- modified classic: Similar to the classic, with slightly different colours.
- autumn: A slightly darker autumn style colour palette.
- purples: A pseudo sequential purple colour palette (not great for contrast).
- greens: A pseudo sequential green colour palette (not great for contrast).
To use the function, simply call it before plotting anything.
Parameters
----------
palette: str
bigPlot: bool
Raises
------
KeyError if provided palette does not exist.
'''
COLORS = dict()
COLORS['classic'] = ['#ba2c54', '#5B90DC', '#FFAB44', '#0C9FB3', '#57271B', '#3B507D',
'#794D88', '#FD6989', '#8A978E', '#3B507D', '#D8153C', '#cc9214']
COLORS['modified classic'] = ['#D6088F', '#424D9C', '#178084', '#AF99DA', '#F58D46', '#634B5B',
'#0C9FB3', '#7C438A', '#328cd6', '#8D0F25', '#8A978E', '#ffcb3d']
COLORS['autumn'] = ['#A9434D', '#4E615D', '#3C8DAB', '#A4657A', '#424D9C', '#DC575A',
'#1D2D38', '#634B5B', '#56276D', '#577580', '#134663', '#196096']
COLORS['purples'] = ['#a57bb7', '#343D80', '#EA60BF', '#B7308E', '#E099C3', '#7C438A',
'#AF99DA', '#4D428E', '#56276D', '#CC4B93', '#DC4E76', '#5C4AE4']
COLORS['greens'] = ['#268F92', '#abc14d', '#8A978E', '#0C9FB3', '#BDA962', '#B0CB9E',
'#769168', '#5E93A5', '#178084', '#B7BBAD', '#163317', '#76A63F']
COLORS['default'] = COLORS['classic']
MARKERS = ['o', 's', 'v', '^', '*', 'P', 'd', 'X', 'p', '<', '>', 'h']
LINES = [(0, ()), # solid
(0, (1, 1)), # densely dotted
(0, (3, 1, 1, 1)), # densely dashdotted
(0, (5, 5)), # dashed
(0, (3, 1, 1, 1, 1, 1)), # densely dashdotdotted
(0, (5, 1)), # desnely dashed
(0, (1, 5)), # dotted
(0, (3, 5, 1, 5)), # dashdotted
(0, (3, 5, 1, 5, 1, 5)), # dashdotdotted
(0, (5, 10)), # loosely dashed
(0, (1, 10)), # loosely dotted
(0, (3, 10, 1, 10)), # loosely dashdotted
]
if palette not in COLORS.keys():
raise KeyError('palette must be one of {}'.format(', '.join(COLORS)))
fontsize = {'default': 15, 'bigPlot': 30}
markersize = {'default': 8, 'bigPlot': 18}
plotSize = 'default'
if bigPlot:
plotSize = 'bigPlot'
plt.rc('lines', linewidth=2, markersize=markersize[plotSize])
plt.rc('axes', prop_cycle=(
cycler(color=COLORS[palette])
+ cycler(linestyle=LINES)
+ cycler(marker=MARKERS))
)
plt.rc(
'axes',
titlesize=fontsize[plotSize],
labelsize=fontsize[plotSize],
labelpad=5,
grid=True,
axisbelow=True
)
plt.rc('xtick', labelsize=fontsize[plotSize])
plt.rc('ytick', labelsize=fontsize[plotSize])
plt.rc('legend', loc='best', shadow=False, fontsize='medium')
plt.rc('font', family='serif', size=fontsize[plotSize])
return
def branches_to_read():
'''
Define a list of branches to read from the ROOT file
(faster than reading all branches).
Returns
-------
A list of branches names.
'''
branches = [
'runNumber',
'eventNumber',
'MCe0',
'MCxoff',
'MCyoff',
'size',
'ErecS',
'NImages',
'Xcore',
'Ycore',
'Xoff',
'Yoff',
'img2_ang',
'EChi2S',
'SizeSecondMax',
'NTelPairs',
'MSCW',
'MSCL',
'EmissionHeight',
'EmissionHeightChi2',
'dist',
'DispDiff',
'dESabs',
'loss',
'NTrig',
'meanPedvar_Image',
'fui',
'cross',
'R',
'ES',
'asym',
'tgrad_x',
]
return branches
def nominal_labels_train_features():
'''
Define the nominal labels variable and training features to train with.
Returns
-------
label: str, train_features: list of str
Two variables are returned:
1. the name of the variable to use as the labels in the training.
2. list of names of variables to used as the training features.
'''
labels = 'log_ang_diff'
train_features = [
'log_reco_energy',
'log_NTels_reco',
'array_distance',
'img2_ang',
'log_SizeSecondMax',
'MSCW',
'MSCL',
'log_EChi2S',
'log_EmissionHeight',
'log_EmissionHeightChi2',
'log_DispDiff',
'log_dESabs',
'NTrig',
'meanPedvar_Image',
'MSWOL',
'log_av_size',
'log_me_size',
'log_std_size',
'av_dist',
'me_dist',
'std_dist',
'av_fui',
'me_fui',
'std_fui',
'av_cross',
'me_cross',
'std_cross',
'av_R',
'me_R',
'std_R',
'av_ES',
'me_ES',
'std_ES',
'sum_loss',
'av_loss',
'me_loss',
'std_loss',
'av_asym',
'me_asym',
'std_asym',
'av_tgrad_x',
'me_tgrad_x',
'std_tgrad_x',
'camera_offset',
]
return labels, train_features
def extract_df_from_dl2(root_filename):
'''
Extract a Pandas DataFrame from a ROOT DL2 file.
Selects all events surviving gamma/hadron cuts from the DL2 file.
No direction cut is applied on the sample. TODO: should this be an option or studied further?
The list of variables included in the DataFrame is subject to change.
TODO: Study further possible variables to use.
Parameters
----------
root_filename: str or Path
The location of the DL2 root file name from which to extract the DF.
TODO: Allow using several DL2 files (in a higher level function?)
Returns
-------
A pandas DataFrame with variables to use in the regression/classification, after cuts.
'''
branches = branches_to_read()
particle_file = uproot.open(root_filename)
cuts = particle_file['DL2EventTree']
cuts_arrays = cuts.arrays(expressions='CutClass', library='np')
# Cut 1: Events surviving gamma/hadron separation and direction cuts:
mask_gamma_like_and_direction = cuts_arrays['CutClass'] == 5
# Cut 2: Events surviving gamma/hadron separation cut and not direction cut
mask_gamma_like_no_direction = cuts_arrays['CutClass'] == 0
# Cut 0: Events before gamma/hadron and direction cuts (classes 0, 5 and 7)
gamma_like_events_all = mask_gamma_like_no_direction | mask_gamma_like_and_direction
gamma_like_events_all = gamma_like_events_all | (cuts_arrays['CutClass'] == 7)
step_size = 5000 # slightly optimized on my laptop
data_dict = defaultdict(list)
for i_event, data_arrays in enumerate(uproot.iterate(
'{}:data'.format(root_filename),
step_size=step_size,
expressions=branches,
library='np')
):
if i_event > 0:
if (i_event * step_size) % 100000 == 0:
print('Extracted {} events'.format(i_event * step_size))
gamma_like_events = gamma_like_events_all[i_event * step_size:(i_event + 1) * step_size]
cut_class = cuts_arrays['CutClass'][i_event * step_size:(i_event + 1) * step_size]
cut_class = cut_class[gamma_like_events]
# Label to train with:
x_off = data_arrays['Xoff'][gamma_like_events]
y_off = data_arrays['Yoff'][gamma_like_events]
x_off_mc = data_arrays['MCxoff'][gamma_like_events]
y_off_mc = data_arrays['MCyoff'][gamma_like_events]
ang_diff = np.sqrt((x_off - x_off_mc)**2. + (y_off - y_off_mc)**2.)
# Variables for training:
runNumber = data_arrays['runNumber'][gamma_like_events]
eventNumber = data_arrays['eventNumber'][gamma_like_events]
reco_energy = data_arrays['ErecS'][gamma_like_events]
true_energy = data_arrays['MCe0'][gamma_like_events]
camera_offset = np.sqrt(x_off**2. + y_off**2.)
NTels_reco = data_arrays['NImages'][gamma_like_events]
x_cores = data_arrays['Xcore'][gamma_like_events]
y_cores = data_arrays['Ycore'][gamma_like_events]
array_distance = np.sqrt(x_cores**2. + y_cores**2.)
img2_ang = data_arrays['img2_ang'][gamma_like_events]
EChi2S = data_arrays['EChi2S'][gamma_like_events]
SizeSecondMax = data_arrays['SizeSecondMax'][gamma_like_events]
NTelPairs = data_arrays['NTelPairs'][gamma_like_events]
MSCW = data_arrays['MSCW'][gamma_like_events]
MSCL = data_arrays['MSCL'][gamma_like_events]
EmissionHeight = data_arrays['EmissionHeight'][gamma_like_events]
EmissionHeightChi2 = data_arrays['EmissionHeightChi2'][gamma_like_events]
DispDiff = data_arrays['DispDiff'][gamma_like_events]
dESabs = data_arrays['dESabs'][gamma_like_events]
NTrig = data_arrays['NTrig'][gamma_like_events]
meanPedvar_Image = data_arrays['meanPedvar_Image'][gamma_like_events]
av_size = [np.average(sizes) for sizes in data_arrays['size'][gamma_like_events]]
me_size = [np.median(sizes) for sizes in data_arrays['size'][gamma_like_events]]
std_size = [np.std(sizes) for sizes in data_arrays['size'][gamma_like_events]]
av_dist = [np.average(dists) for dists in data_arrays['dist'][gamma_like_events]]
me_dist = [np.median(dists) for dists in data_arrays['dist'][gamma_like_events]]
std_dist = [np.std(dists) for dists in data_arrays['dist'][gamma_like_events]]
av_fui = [np.average(fui) for fui in data_arrays['fui'][gamma_like_events]]
me_fui = [np.median(fui) for fui in data_arrays['fui'][gamma_like_events]]
std_fui = [np.std(fui) for fui in data_arrays['fui'][gamma_like_events]]
av_cross = [np.average(cross) for cross in data_arrays['cross'][gamma_like_events]]
me_cross = [np.median(cross) for cross in data_arrays['cross'][gamma_like_events]]
std_cross = [np.std(cross) for cross in data_arrays['cross'][gamma_like_events]]
av_R = [np.average(R) for R in data_arrays['R'][gamma_like_events]]
me_R = [np.median(R) for R in data_arrays['R'][gamma_like_events]]
std_R = [np.std(R) for R in data_arrays['R'][gamma_like_events]]
av_ES = [np.average(ES) for ES in data_arrays['ES'][gamma_like_events]]
me_ES = [np.median(ES) for ES in data_arrays['ES'][gamma_like_events]]
std_ES = [np.std(ES) for ES in data_arrays['ES'][gamma_like_events]]
sum_loss = [np.sum(losses) for losses in data_arrays['loss'][gamma_like_events]]
av_loss = [np.average(losses) for losses in data_arrays['loss'][gamma_like_events]]
me_loss = [np.median(losses) for losses in data_arrays['loss'][gamma_like_events]]
std_loss = [np.std(losses) for losses in data_arrays['loss'][gamma_like_events]]
av_asym = [np.average(asym) for asym in data_arrays['asym'][gamma_like_events]]
me_asym = [np.median(asym) for asym in data_arrays['asym'][gamma_like_events]]
std_asym = [np.std(asym) for asym in data_arrays['asym'][gamma_like_events]]
av_tgrad_x = [np.average(tgrad_x) for tgrad_x in data_arrays['tgrad_x'][gamma_like_events]]
me_tgrad_x = [np.median(tgrad_x) for tgrad_x in data_arrays['tgrad_x'][gamma_like_events]]
std_tgrad_x = [np.std(tgrad_x) for tgrad_x in data_arrays['tgrad_x'][gamma_like_events]]
data_dict['runNumber'].extend(tuple(runNumber))
data_dict['eventNumber'].extend(tuple(eventNumber))
data_dict['cut_class'].extend(tuple(cut_class))
data_dict['log_ang_diff'].extend(tuple(np.log10(ang_diff)))
data_dict['log_true_energy'].extend(tuple(np.log10(true_energy)))
data_dict['log_reco_energy'].extend(tuple(np.log10(reco_energy)))
data_dict['camera_offset'].extend(tuple(camera_offset))
data_dict['log_NTels_reco'].extend(tuple(np.log10(NTels_reco)))
data_dict['array_distance'].extend(tuple(array_distance))
data_dict['img2_ang'].extend(tuple(img2_ang))
data_dict['log_EChi2S'].extend(tuple(np.log10(EChi2S)))
data_dict['log_SizeSecondMax'].extend(tuple(np.log10(SizeSecondMax)))
data_dict['log_NTelPairs'].extend(tuple(np.log10(NTelPairs)))
data_dict['MSCW'].extend(tuple(MSCW))
data_dict['MSCL'].extend(tuple(MSCL))
data_dict['log_EmissionHeight'].extend(tuple(np.log10(EmissionHeight)))
data_dict['log_EmissionHeightChi2'].extend(tuple(np.log10(EmissionHeightChi2)))
data_dict['log_DispDiff'].extend(tuple(np.log10(DispDiff)))
data_dict['log_dESabs'].extend(tuple(np.log10(dESabs)))
data_dict['NTrig'].extend(tuple(NTrig))
data_dict['meanPedvar_Image'].extend(tuple(meanPedvar_Image))
data_dict['MSWOL'].extend(tuple(MSCW/MSCL))
data_dict['log_av_size'].extend(tuple(np.log10(av_size)))
data_dict['log_me_size'].extend(tuple(np.log10(me_size)))
data_dict['log_std_size'].extend(tuple(np.log10(std_size)))
data_dict['av_dist'].extend(tuple(av_dist))
data_dict['me_dist'].extend(tuple(me_dist))
data_dict['std_dist'].extend(tuple(std_dist))
data_dict['av_fui'].extend(tuple(av_fui))
data_dict['me_fui'].extend(tuple(me_fui))
data_dict['std_fui'].extend(tuple(std_fui))
data_dict['av_cross'].extend(tuple(av_cross))
data_dict['me_cross'].extend(tuple(me_cross))
data_dict['std_cross'].extend(tuple(std_cross))
data_dict['av_R'].extend(tuple(av_R))
data_dict['me_R'].extend(tuple(me_R))
data_dict['std_R'].extend(tuple(std_R))
data_dict['av_ES'].extend(tuple(av_ES))
data_dict['me_ES'].extend(tuple(me_ES))
data_dict['std_ES'].extend(tuple(std_ES))
data_dict['sum_loss'].extend(tuple(sum_loss))
data_dict['av_loss'].extend(tuple(av_loss))
data_dict['me_loss'].extend(tuple(me_loss))
data_dict['std_loss'].extend(tuple(std_loss))
data_dict['av_asym'].extend(tuple(av_asym))
data_dict['me_asym'].extend(tuple(me_asym))
data_dict['std_asym'].extend(tuple(std_asym))
data_dict['av_tgrad_x'].extend(tuple(av_tgrad_x))
data_dict['me_tgrad_x'].extend(tuple(me_tgrad_x))
data_dict['std_tgrad_x'].extend(tuple(std_tgrad_x))
return pd.DataFrame(data=data_dict)
def save_dtf(dtf, suffix=''):
'''
Save the test dataset to disk as it is much quicker
to read the reduced pickled data than the ROOT file.
Parameters
----------
dtf: pandas DataFrames
suffix: str
The suffix to add to the file name
'''
this_dir = Path('reduced_data').mkdir(parents=True, exist_ok=True)
if suffix != '':
if not suffix.startswith('_'):
suffix = '_{}'.format(suffix)
data_file_name = Path('reduced_data').joinpath(
'dtf{}.joblib'.format(suffix)
)
dump(dtf, data_file_name, compress=3)
return
def load_dtf(suffix=''):
'''
Load the reduced data from reduced_data/.
Parameters
----------
suffix: str
The suffix added to the file name (the nominal is dtf.joblib)
Returns
-------
dtf: pandas DataFrames of the reduced data
'''
if suffix != '':
if not suffix.startswith('_'):
suffix = '_{}'.format(suffix)
data_file_name = Path('reduced_data').joinpath(
'dtf{}.joblib'.format(suffix)
)
return load(data_file_name)
def bin_data_in_energy(dtf, n_bins=20, log_e_reco_bins=None, return_bins=False):
'''
Bin the data in dtf to n_bins with equal statistics.
Parameters
----------
dtf: pandas DataFrame
The DataFrame containing the data.
Must contain a 'log_reco_energy' column (used to calculate the bins).
n_bins: int, default=20
The number of reconstructed energy bins to divide the data in.
log_e_reco_bins: array-like, None
In case it is not none, it will be used as the energy bins to divide the data sample
return_bins: bool
If true, the function will return the log_e_reco_bins used to bin the data.
Returns
-------
A dictionary of DataFrames (keys=energy ranges, values=separated DataFrames).
'''
dtf_e = dict()
if log_e_reco_bins is None:
log_e_reco_bins = mstats.mquantiles(
dtf['log_reco_energy'].values,
np.linspace(0, 1, n_bins)
)
for i_e_bin, log_e_high in enumerate(log_e_reco_bins):
if i_e_bin == 0:
continue
mask = np.logical_and(
dtf['log_reco_energy'] > log_e_reco_bins[i_e_bin - 1],
dtf['log_reco_energy'] < log_e_high
)
this_dtf = dtf[mask]
this_e_range = '{:3.3f} < E < {:3.3f} TeV'.format(
10**log_e_reco_bins[i_e_bin - 1],
10**log_e_high
)
if len(this_dtf) < 1:
raise RuntimeError('The range {} is empty'.format(this_e_range))
dtf_e[this_e_range] = this_dtf
if return_bins:
return dtf_e, log_e_reco_bins
else:
return dtf_e
def extract_energy_bins(e_ranges):
'''
Extract the energy bins from the list of energy ranges.
This is a little weird function which can probably be avoided if we use a class
instead of a namespace. However, it is useful for now so...
Parameters
----------
e_ranges: list of str
A list of energy ranges in string form as '{:3.3f} < E < {:3.3f} TeV'.
Returns
-------
energy_bins: list of floats
List of energy bin edges given in e_ranges.
'''
energy_bins = list()
for this_range in e_ranges:
low_e = float(this_range.split()[0])
energy_bins.append(low_e)
energy_bins.append(float(list(e_ranges)[-1].split()[4])) # Add also the upper bin edge
return energy_bins
def extract_energy_bins_centers(e_ranges):
'''
Extract the energy bins from the list of energy ranges.
This is a little weird function which can probably be avoided if we use a class
instead of a namespace. However, it is useful for now so...
Parameters
----------
e_ranges: list of str
A list of energy ranges in string form as '{:3.3f} < E < {:3.3f} TeV'.
Returns
-------
energy_bin_centers: list of floats
Energy bins calculated as the averages of the energy ranges in e_ranges.
'''
energy_bin_centers = list()
for this_range in e_ranges:
low_e = float(this_range.split()[0])
high_e = float(this_range.split()[4])
energy_bin_centers.append((high_e + low_e)/2.)
return energy_bin_centers
def split_data_train_test(dtf_e, test_size=0.75, random_state=75):
'''
Split the data into training and testing datasets.
The data is split in each energy range separately with 'test_size'
setting the fraction of the test sample.
Parameters
----------
dtf_e: dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to split.
The keys of the dict are the energy ranges of the data.
test_size: float or int, default=0.75
If float, should be between 0.0 and 1.0 and represents the proportion of the dataset
to include in the test split. If int, represents the absolute number of test samples.
If None it will be set to 0.25.
random_state: int
Returns
-------
Two dictionaries of DataFrames, one for training and one for testing
(keys=energy ranges, values=separated DataFrames).
'''
dtf_e_train = dict()
dtf_e_test = dict()
for this_e_range, this_dtf in dtf_e.items():
dtf_e_train[this_e_range], dtf_e_test[this_e_range] = model_selection.train_test_split(
this_dtf,
test_size=test_size,
random_state=random_state
)
return dtf_e_train, dtf_e_test
def add_event_type_column(dtf, labels, n_types=2):
'''
Add an event type column by dividing the data into n_types bins with equal statistics
based on the labels column in dtf.
Unlike in most cases in this code, dtf is the DataFrame itself,
not a dict of energy ranges. This function should be called per energy bin.
Parameters
----------
dtf: pandas DataFrames
A DataFrame to add event types to.
labels: str
Name of the variable used as the labels in the training.
n_types: int
The number of types to divide the data in.
Returns
-------
A DataFrame with an additional event_type column.
'''
event_type_quantiles = np.linspace(0, 1, n_types + 1)
event_types_bins = mstats.mquantiles(dtf[labels].values, event_type_quantiles)
event_types = list()
for this_value in dtf[labels].values:
this_event_type = np.searchsorted(event_types_bins, this_value)
if this_event_type < 1:
this_event_type = 1
if this_event_type > n_types:
this_event_type = n_types
event_types.append(this_event_type)
dtf.loc[:, 'event_type'] = event_types
return dtf
def define_regressors():
'''
Define regressors to train the data with.
All possible regressors should be added here.
Regressors can be simple ones or pipelines that include standardisation or anything else.
The parameters for the regressors are hard coded since they are expected to more or less
stay constant once tuned.
TODO: Include a feature selection method in the pipeline?
That way it can be done automatically separately in each energy bin.
(see https://scikit-learn.org/stable/modules/feature_selection.html).
Returns
-------
A dictionary of regressors to train.
'''
regressors = dict()
regressors['random_forest'] = RandomForestRegressor(n_estimators=300, random_state=0, n_jobs=8)
regressors['MLP_relu'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPRegressor(
hidden_layer_sizes=(100, 50),
solver='adam',
max_iter=20000,
activation='relu',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
regressors['MLP_logistic'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPRegressor(
hidden_layer_sizes=(80, 45),
solver='adam',
max_iter=20000,
activation='logistic',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
regressors['MLP_uniform'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='uniform', random_state=0),
MLPRegressor(
hidden_layer_sizes=(80, 45),
solver='adam',
max_iter=20000,
activation='tanh',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
regressors['MLP_tanh'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPRegressor(
hidden_layer_sizes=(36, 6),
solver='adam',
max_iter=20000,
activation='tanh',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
regressors['MLP_lbfgs'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPRegressor(
hidden_layer_sizes=(36, 6),
solver='lbfgs',
max_iter=20000,
activation='logistic',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
regressors['BDT'] = AdaBoostRegressor(
DecisionTreeRegressor(max_depth=30, random_state=0),
n_estimators=100, random_state=0
)
regressors['BDT_small'] = AdaBoostRegressor(
DecisionTreeRegressor(max_depth=30, random_state=0),
n_estimators=30, random_state=0
)
regressors['linear_regression'] = LinearRegression(n_jobs=4)
regressors['ridge'] = Ridge(alpha=1.0)
regressors['SVR'] = SVR(C=10.0, epsilon=0.2)
regressors['linear_SVR'] = make_pipeline(
preprocessing.StandardScaler(),
LinearSVR(random_state=0, tol=1e-5, C=10.0, epsilon=0.2, max_iter=100000)
)
regressors['SGD'] = make_pipeline(
preprocessing.StandardScaler(),
SGDRegressor(loss='epsilon_insensitive', max_iter=20000, tol=1e-5)
)
return regressors
def define_classifiers():
'''
Define classifiers to train the data with.
All possible classifiers should be added here.
Classifiers can be simple ones or pipelines that include standardisation or anything else.
The parameters for the classifiers are hard coded since they are expected to more or less
stay constant once tuned.
TODO: Include a feature selection method in the pipeline?
That way it can be done automatically separately in each energy bin.
(see https://scikit-learn.org/stable/modules/feature_selection.html).
Returns
-------
A dictionary of classifiers to train.
'''
classifiers = dict()
classifiers['random_forest_classifier'] = RandomForestClassifier(
n_estimators=100,
random_state=0,
n_jobs=8
)
classifiers['MLP_classifier'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPClassifier(
hidden_layer_sizes=(36, 6),
solver='adam',
max_iter=20000,
activation='tanh',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
classifiers['MLP_relu_classifier'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPClassifier(
hidden_layer_sizes=(100, 50),
solver='adam',
max_iter=20000,
activation='relu',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
classifiers['MLP_logistic_classifier'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPClassifier(
hidden_layer_sizes=(80, 45),
solver='adam',
max_iter=20000,
activation='logistic',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
classifiers['MLP_uniform_classifier'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='uniform', random_state=0),
MLPClassifier(
hidden_layer_sizes=(80, 45),
solver='adam',
max_iter=20000,
activation='tanh',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
classifiers['BDT_classifier'] = AdaBoostClassifier(
n_estimators=100, random_state=0
)
classifiers['ridge_classifier'] = RidgeClassifier()
classifiers['ridgeCV_classifier'] = RidgeClassifierCV(
alphas=[1e-3, 1e-2, 1e-1, 1],
normalize=True
)
classifiers['SVC_classifier'] = SVC(gamma=2, C=1)
classifiers['SGD_classifier'] = make_pipeline(
preprocessing.StandardScaler(),
SGDClassifier(loss='epsilon_insensitive', max_iter=20000, tol=1e-5)
)
classifiers['Gaussian_process_classifier'] = GaussianProcessClassifier(1.0 * RBF(1.0))
classifiers['bagging_svc_classifier'] = BaggingClassifier(
base_estimator=SVC(),
n_estimators=100,
random_state=0
)
classifiers['bagging_dt_classifier'] = BaggingClassifier(
base_estimator=DecisionTreeClassifier(random_state=0),
n_estimators=100,
random_state=0
)
classifiers['oneVsRest_classifier'] = OneVsRestClassifier(SVC(), n_jobs=8)
classifiers['gradient_boosting_classifier'] = GradientBoostingClassifier(
n_estimators=100,
learning_rate=0.1,
max_depth=5,
random_state=0
)
return classifiers
def train_models(dtf_e_train, models_to_train):
'''
Train all the models in models, using the data in dtf_e_train.
The models are trained per energy range in dtf_e_train.
Parameters
----------
dtf_e_train: dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to train with.
The keys of the dict are the energy ranges of the data.
Each DataFrame is assumed to contain all 'train_features' and 'labels'.
models: a nested dict of models:
1st dict:
keys=model names, values=2nd dict
2nd dict:
'model':dict of sklearn models (as returned from define_regressors/classifiers()).
'train_features': list of variable names to train with.
'labels': Name of the variable used as the labels in the training.
Returns
-------
A nested dictionary trained models, train_features and labels:
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values 3rd dict
3rd dict:
'model': trained model for this energy range
'train_features': list of variable names to train with.
'labels': Name of the variable used as the labels in the training.
'''
models = dict()
for this_model_name, this_model in models_to_train.items():
models[this_model_name] = dict()
for this_e_range in dtf_e_train.keys():
print('Training {} in the energy range - {}'.format(this_model_name, this_e_range))
X_train = dtf_e_train[this_e_range][this_model['train_features']].values
y_train = dtf_e_train[this_e_range][this_model['labels']].values
models[this_model_name][this_e_range] = dict()
models[this_model_name][this_e_range]['train_features'] = this_model['train_features']
models[this_model_name][this_e_range]['labels'] = this_model['labels']
models[this_model_name][this_e_range]['test_data_suffix'] = this_model[
'test_data_suffix'
]
models[this_model_name][this_e_range]['model'] = copy.deepcopy(
this_model['model'].fit(X_train, y_train)
)
return models
def save_models(trained_models):
'''
Save the trained models to disk.
The path for the models is in models/'model name'.
All models are saved per energy range for each model in trained_models.
Parameters
----------
trained_models: a nested dict of trained sklearn model per energy range.
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values 3rd dict
3rd dict:
'model': trained model for this energy range.
'train_features': list of variable names trained with.
'labels': name of the variable used as the labels in the training.
'test_data_suffix': suffix of the test dataset saved to disk.
'''
for model_name, this_model in trained_models.items():
this_dir = Path('models').joinpath(model_name).mkdir(parents=True, exist_ok=True)
for this_e_range, model_now in this_model.items():
e_range_name = this_e_range.replace(' < ', '-').replace(' ', '_')
model_file_name = Path('models').joinpath(
model_name,
'{}.joblib'.format(e_range_name)
)
dump(model_now, model_file_name, compress=3)
return
def save_test_dtf(dtf_e_test, suffix='default'):
'''
Save the test data to disk so it can be loaded together with load_models().
The path for the test data is in models/test_data.
Parameters
----------
dtf_e_test: dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to test with.
The keys of the dict are the energy ranges of the data.
Each DataFrame is assumed to contain all 'train_features' and 'labels'.
suffix: str
The suffix to add to the file name
'''
this_dir = Path('models').joinpath('test_data').mkdir(parents=True, exist_ok=True)
if suffix != '':
if not suffix.startswith('_'):
suffix = '_{}'.format(suffix)
test_data_file_name = Path('models').joinpath('test_data').joinpath(
'dtf_e_test{}.joblib'.format(suffix)
)
dump(dtf_e_test, test_data_file_name, compress=3)
return
def save_scores(scores):
'''
Save the scores of the trained models to disk.
The path for the scores is in scores/'model name'.
Parameters
----------
scores: a dict of scores per energy range per trained sklearn model.
dict:
keys=model names, values=list of scores
'''
this_dir = Path('scores').mkdir(parents=True, exist_ok=True)
for model_name, these_scores in scores.items():
file_name = Path('scores').joinpath('{}.joblib'.format(model_name))
dump(these_scores, file_name, compress=3)
return
def load_test_dtf(suffix='default'):
'''
Load the test data together with load_models().
The path for the test data is in models/test_data.
Parameters
----------
suffix: str
The suffix added to the file name (the nominal is dtf_e_test_default.joblib)
Returns
-------
dtf_e_test: dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to test with.
The keys of the dict are the energy ranges of the data.
Each DataFrame is assumed to contain all 'train_features' and 'labels'.
'''
if suffix != '':
if not suffix.startswith('_'):
suffix = '_{}'.format(suffix)
test_data_file_name = Path('models').joinpath('test_data').joinpath(
'dtf_e_test{}.joblib'.format(suffix)
)
return load(test_data_file_name)
def load_multi_test_dtfs(data_names=['default']):
'''
Load the test data together with load_models().
The path for the test data is in models/test_data.
Parameters
----------
suffix: str
The suffix added to the file name (the nominal is dtf_e_test_default.joblib)
Returns
-------
dtf_e_test: a nested dict of test datasets per trained model
1st dict:
keys=test_data_suffix, values=2nd dict
2nd dict:
dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to test with.
The keys of the dict are the energy ranges of the data.
Each DataFrame is assumed to contain all 'train_features' and 'labels'.
'''
dtf_e_test = dict()
for this_data_name in data_names:
dtf_e_test[this_data_name] = load_test_dtf(this_data_name)
return dtf_e_test
def load_models(model_names=list()):
'''
Read the trained models from disk.
The path for the models is in models/'model name'.
All models are saved per energy range for each model in trained_models.
Parameters
----------
model_names: list of str
A list of model names to load from disk
Returns
-------
trained_models: a nested dict of trained sklearn model per energy range.
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values 3rd dict
3rd dict:
'model': trained model for this energy range.
'train_features': list of variable names trained with.
'labels': name of the variable used as the labels in the training.
'test_data_suffix': suffix of the test dataset saved to disk.
'''
trained_models = defaultdict(dict)
for model_name in model_names:
print('Loading the {} model'.format(model_name))
models_dir = Path('models').joinpath(model_name)
for this_file in sorted(models_dir.iterdir(), key=os.path.getmtime):
if this_file.is_file():
e_range_name = this_file.stem.replace('-', ' < ').replace('_', ' ')
model_file_name = Path('models').joinpath(
model_name,
'{}.joblib'.format(e_range_name)
)
trained_models[model_name][e_range_name] = load(this_file)
return trained_models
def partition_event_types(dtf_e_test, trained_models, n_types=2, type_bins='equal statistics',
return_partition=False, event_type_bins=None):
'''
Divide the events into n_types event types.
The bins defining the types are calculated from the predicted label values.
Two lists of types are returned per model and per energy range, one true and one predicted.
Parameters
----------
dtf_e_test: a nested dict of test datasets per trained model
1st dict:
keys=test_data_suffix, values=2nd dict
2nd dict:
dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to test with.
The keys of the dict are the energy ranges of the data.
Each DataFrame is assumed to contain all 'train_features' and 'labels'.
trained_models: a nested dict of trained sklearn model per energy range.
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values 3rd dict
3rd dict:
'model': trained model for this energy range.
'train_features': list of variable names trained with.
'labels': name of the variable used as the labels in the training.
'test_data_suffix': suffix of the test dataset saved to disk.
n_types: int (default=2)
The number of types to divide the data in.
type_bins: list of floats or str
A list defining the bin sizes of each type,
e.g., [0, 0.2, 0.8, 1] would divide the reconstructed labels dataset (angular error)
into three bins, best 20%, middle 60% and worst 20%.
The list must be n_types + 1 long and the first and last values must be zero and one.
The default is equal statistics bins, given as the default string.
return_partition: Bool
If true, a dictionary containing the partition values used for each model and each energy bin will
be returned.
event_type_bins: a nested dict of partition values per trained model and energy range
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values=partition values array
Returns
-------
event_types: nested dict
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values=3rddict
3rd dict:
keys=true or reco, values=event type
'''
event_types = dict()
if type_bins == 'equal statistics':
type_bins = np.linspace(0, 1, n_types + 1)
elif not isinstance(type_bins, list):
raise ValueError('type_bins must be a list of floats or equal statistics')
elif len(type_bins) != n_types + 1:
raise ValueError('type_bins must be n_types + 1 long')
elif type_bins[0] != 0 or type_bins[-1] != 1:
raise ValueError('the first and last values of type_bins must be zero and one')
else:
pass
if return_partition:
event_type_bins = dict()
for model_name, model in trained_models.items():
event_types[model_name] = dict()
if return_partition:
event_type_bins[model_name] = dict()
print('Calculating event types for the {} model'.format(model_name))
for this_e_range, this_model in model.items():
event_types[model_name][this_e_range] = defaultdict(list)
event_types[model_name][this_e_range] = defaultdict(list)
# To keep lines short
dtf_this_e = dtf_e_test[this_model['test_data_suffix']][this_e_range]
X_test = dtf_this_e[this_model['train_features']].values
# Check if any value is inf (found one on a proton file...).
# If true, change it to a big negative or positive value.
if np.any(np.isinf(X_test)):
# Remove positive infs
X_test[X_test > 999999] = 999999
# Remove negative infs
X_test[X_test < -999999] = -999999
if np.any(np.isnan(X_test)):
# Remove nans
X_test[np.isnan(X_test)] = 99999
y_pred = this_model['model'].predict(X_test)
event_types_bins = mstats.mquantiles(
y_pred,
type_bins
)
# If return_partition == True, then store the event type bins into the container.
if return_partition:
event_type_bins[model_name][this_e_range] = event_types_bins
# If return_partition == False and a event_type_bins container was provided, then use the values from
# the container.
if not return_partition and event_type_bins is not None:
event_types_bins = event_type_bins[model_name][this_e_range]
for this_value in y_pred:
this_event_type = np.searchsorted(event_types_bins, this_value)
if this_event_type < 1:
this_event_type = 1
if this_event_type > n_types:
this_event_type = n_types
event_types[model_name][this_e_range]['reco'].append(this_event_type)
for this_value in dtf_this_e[this_model['labels']].values:
this_event_type = np.searchsorted(event_types_bins, this_value)
if this_event_type < 1:
this_event_type = 1
if this_event_type > n_types:
this_event_type = n_types
event_types[model_name][this_e_range]['true'].append(this_event_type)
if return_partition:
return event_types, event_type_bins
else:
return event_types
def predicted_event_types(dtf_e_test, trained_models, n_types=2):
'''
Get the true and predicted event types for n_types event types.
Two lists of types are returned per model and per energy range, one true and one predicted.
This function is meant to be used only for the classification case.
Parameters
----------
dtf_e_test: a nested dict of test datasets per trained model
1st dict:
keys=test_data_suffix, values=2nd dict
2nd dict:
dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to test with.
The keys of the dict are the energy ranges of the data.
Each DataFrame is assumed to contain all 'train_features' and 'labels'.
trained_models: a nested dict of trained sklearn model per energy range.
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values 3rd dict
3rd dict:
'model': trained model for this energy range,
'train_features': list of variable names trained with.
'labels': name of the variable used as the labels in the training.
'test_data_suffix': suffix of the test dataset saved to disk.
n_types: int (default=2)
The number of types used in the training.
Returns
-------
event_types: nested dict
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values=3rddict
3rd dict:
keys=true or reco, values=event type
'''
event_types = dict()
for model_name, model in trained_models.items():
event_types[model_name] = dict()
for this_e_range, this_model in model.items():
event_types[model_name][this_e_range] = defaultdict(list)
event_types[model_name][this_e_range] = defaultdict(list)
# To keep lines short
dtf_this_e = dtf_e_test[this_model['test_data_suffix']][this_e_range]
event_types[model_name][this_e_range]['true'] = dtf_this_e[
'event_type_{:d}'.format(n_types)
]
X_test = dtf_this_e[this_model['train_features']].values
event_types[model_name][this_e_range]['reco'] = this_model['model'].predict(X_test)
return event_types
def add_event_types_column(dtf_e, labels, n_types=[2, 3, 4]):
'''
Divide the events into n_types event types.
The bins defining the types are calculated from the label values.
The data will be divided to n number of types with equivalent number of events in each type.
A column with the type will be added to the DataFrame per entry in the n_types list.
Parameters
----------
dtf_e: dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data.
The keys of the dict are the energy ranges of the data.
labels: str
The variable to use as a basis on which to divide the data.
n_types: list of ints (default=[2, 3, 4])
The data will be divided to n number of types
with equivalent number of events in each type.
A column with the type will be added to the DataFrame per entry in the n_types list.
Returns
-------
dtf_e: dict of pandas DataFrames
The same DataFrame as the input but with added columns for event types,
one column per n_types entry. The column names are event_type_n.
'''
pd.options.mode.chained_assignment = None
for this_n_type in n_types:
for this_e_range, this_dtf in dtf_e.items():
event_types = list()
event_types_bins = mstats.mquantiles(
this_dtf[labels].values,
np.linspace(0, 1, this_n_type + 1)
)
for this_value in this_dtf[labels].values:
this_event_type = np.searchsorted(event_types_bins, this_value)
if this_event_type < 1:
this_event_type = 1
if this_event_type > this_n_type:
this_event_type = this_n_type
event_types.append(this_event_type)
this_dtf.loc[:, 'event_type_{:d}'.format(this_n_type)] = event_types
return dtf_e
def extract_unique_dataset_names(trained_models):
'''
Extract all test datasets names necessary for the given trained models.
Parameters
----------
trained_models: a nested dict of trained sklearn model per energy range.
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values 3rd dict
3rd dict:
'model': trained model for this energy range.
'train_features': list of variable names trained with.
'labels': name of the variable used as the labels in the training.
'test_data_suffix': suffix of the test dataset saved to disk.
Returns
-------
dataset_names: set
Set of unique data set names
'''
dataset_names = set()
for model in trained_models.values():
for this_model in model.values():
dataset_names.add(this_model['test_data_suffix'])
return dataset_names
def plot_pearson_correlation(dtf, title):
'''
Calculate the Pearson correlation between all variables in this DataFrame.
Parameters
----------
dtf: pandas DataFrame
The DataFrame containing the data.
title: str
A title to add to the olot (will be added to 'Pearson correlation')
Returns
-------
A pyplot instance with the Pearson correlation plot.
'''
plt.subplots(figsize=[16, 16])
corr_matrix = dtf.corr(method='pearson')
sns.heatmap(
corr_matrix,
vmin=-1.,
vmax=1.,
annot=True,
fmt='.2f',
cmap="YlGnBu",
cbar=True,
linewidths=0.5
)
plt.title('Pearson correlations {}'.format(title))
plt.tight_layout()
return plt
def plot_test_vs_predict(dtf_e_test, trained_models, trained_model_name):
'''
Plot true values vs. the predictions of the model for all energy bins.
Parameters
----------
dtf_e_test: a nested dict of test datasets per trained model
1st dict:
keys=test_data_suffix, values=2nd dict
2nd dict:
dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to test with.
The keys of the dict are the energy ranges of the data.
Each DataFrame is assumed to contain all 'train_features' and 'labels'.
trained_models: a nested dict of one trained sklearn model per energy range.
1st dict:
keys=energy ranges, values=2nd dict
2nd dict:
'model': trained model for this energy range
'train_features': list of variable names trained with.
'labels': Name of the variable used as the labels in the training.
trained_model_name: str
Name of the model trained.
Returns
-------
A pyplot instance with the test vs. prediction plot.
'''
nrows = 5
ncols = 4
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=[14, 18])
for i_plot, (this_e_range, this_model) in enumerate(trained_models.items()):
# To keep lines short
dtf_this_e = dtf_e_test[this_model['test_data_suffix']][this_e_range]
X_test = dtf_this_e[this_model['train_features']].values
y_test = dtf_this_e[this_model['labels']].values
if np.any(np.isinf(X_test)):
# Remove positive infs
X_test[X_test > 999999] = 999999
# Remove negative infs
X_test[X_test < -999999] = -999999
y_pred = this_model['model'].predict(X_test)
ax = axs[int(np.floor(i_plot/ncols)), i_plot % ncols]
ax.hist2d(y_pred, y_test, bins=(50, 50), cmap=plt.cm.jet)
ax.plot(
[min(y_test), max(y_test)], [min(y_test), max(y_test)],
linestyle='--',
lw=2,
color='white'
)
ax.set_xlim(np.quantile(y_pred, [0.01, 0.99]))
ax.set_ylim(np.quantile(y_test, [0.01, 0.99]))
ax.set_title(this_e_range)
ax.set_ylabel('True')
ax.set_xlabel('Predicted')
axs[nrows - 1, ncols - 1].axis('off')
axs[nrows - 1, ncols - 1].text(
0.5,
0.5,
trained_model_name,
horizontalalignment='left',
verticalalignment='center',
fontsize=18,
transform=axs[nrows - 1, ncols - 1].transAxes
)
plt.tight_layout()
return plt
def plot_matrix(dtf, train_features, labels, n_types=2, plot_events=20000):
'''
Plot a matrix of each variable in train_features against another (not all combinations).
The data is divided to n_types bins of equal statistics based on the labels.
Each type is plotted in a different colour.
This function produces mutliple plots, where in each plot a maximum of 5 variables are plotted.
Unlike in most cases in this code, dtf is the DataFrame itself,
not a dict of energy ranges. This function should be called per energy bin.
Parameters
----------
dtf: pandas DataFrames
A DataFrame to add event types to.
train_features: list
List of variable names trained with.
labels: str
Name of the variable used as the labels in the training.
n_types: int (default=2)
The number of types to divide the data in.
plot_events: int (default=20000)
For efficiency, limit the number of events that will be used for the plots
Returns
-------
A list of seaborn.PairGrid instances, each with one matrix plot.
'''
setStyle()
# Check if event_type column already present within dtf:
if "event_type" not in dtf.columns:
dtf = add_event_type_column(dtf, labels, n_types)
# Mask out the events without a clear event type
dtf = dtf[dtf['event_type'] > 0]
type_colors = {
1: "#ba2c54",
2: "#5B90DC",
3: '#FFAB44',
4: '#0C9FB3'
}
vars_to_plot = np.array_split(
[labels] + train_features,
round(len([labels] + train_features)/5)
)
grid_plots = list()
for these_vars in vars_to_plot:
grid_plots.append(
sns.pairplot(
dtf.sample(n=plot_events),
vars=these_vars,
hue='event_type',
palette=type_colors,
corner=True
)
)
return grid_plots
def plot_score_comparison(dtf_e_test, trained_models):
'''
Plot the score of the model as a function of energy.
#TODO add a similar function that plots from saved scores instead of calculating every time.
Parameters
----------
dtf_e_test: a nested dict of test datasets per trained model
1st dict:
keys=test_data_suffix, values=2nd dict
2nd dict:
dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to test with.
The keys of the dict are the energy ranges of the data.
Each DataFrame is assumed to contain all 'train_features' and 'labels'.
trained_models: a nested dict of trained sklearn model per energy range.
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values 3rd dict
3rd dict:
'model': dict of trained models for this energy range.
'train_features': list of variable names trained with.
'labels': name of the variable used as the labels in the training.
'test_data_suffix': suffix of the test dataset saved to disk.
Returns
-------
A pyplot instance with the scores plot.
'''
setStyle()
fig, ax = plt.subplots(figsize=(8, 6))
scores = defaultdict(dict)
energy_bins = extract_energy_bins_centers(trained_models[next(iter(trained_models))].keys())
for this_model_name, trained_model in trained_models.items():
print('Calculating scores for {}'.format(this_model_name))
scores_this_model = list()
for this_e_range, this_model in trained_model.items():
# To keep lines short
dtf_this_e = dtf_e_test[this_model['test_data_suffix']][this_e_range]
X_test = dtf_this_e[this_model['train_features']].values
y_test = dtf_this_e[this_model['labels']].values
if np.any(np.isinf(X_test)):
# Remove positive infs
X_test[X_test > 999999] = 999999
# Remove negative infs
X_test[X_test < -999999] = -999999
y_pred = this_model['model'].predict(X_test)
scores_this_model.append(this_model['model'].score(X_test, y_test))
scores[this_model_name]['scores'] = scores_this_model
scores[this_model_name]['energy'] = energy_bins
ax.plot(
scores[this_model_name]['energy'],
scores[this_model_name]['scores'],
label=this_model_name
)
ax.set_xlabel('E [TeV]')
ax.set_ylabel('score')
ax.set_xscale('log')
ax.legend()
plt.tight_layout()
return plt, scores
def plot_confusion_matrix(event_types, trained_model_name, n_types=2):
'''
Plot the confusion matrix of the model for all energy bins.
Parameters
----------
event_types: nested dict
1st dict:
keys=energy ranges, values=2nd dict
2nd dict:
keys=true or reco, values=event type
trained_model_name: str
Name of the model used to obtained the reconstructed event types
n_types: int (default=2)
The number of types the data was divided in.
Returns
-------
A pyplot instance with the confusion matrix plot.
'''
# setStyle()
nrows = 5
ncols = 4
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=[14, 18])
for i_plot, this_e_range in enumerate(event_types.keys()):
ax = axs[int(np.floor((i_plot)/ncols)), (i_plot) % ncols]
cm = confusion_matrix(
event_types[this_e_range]['true'],
event_types[this_e_range]['reco'],
normalize='true',
)
sns.heatmap(
cm,
annot=True,
fmt='.1%',
ax=ax,
cmap='Blues',
cbar=False,
xticklabels=['{}'.format(tick) for tick in np.arange(1, n_types + 1, 1)],
yticklabels=['{}'.format(tick) for tick in np.arange(1, n_types + 1, 1)]
)
ax.set_xlabel('Prediction')
ax.set_ylabel('True')
ax.set_title(this_e_range)
axs[nrows - 1, ncols - 1].axis('off')
axs[nrows - 1, ncols - 1].text(
0.5,
0.5,
trained_model_name,
horizontalalignment='center',
verticalalignment='center',
fontsize=18,
transform=axs[nrows - 1, ncols - 1].transAxes
)
plt.tight_layout()
return plt
def plot_1d_confusion_matrix(event_types, trained_model_name, n_types=2):
'''
Plot a one-dimensional confusion matrix of the model for all energy bins.
Parameters
----------
event_types: nested dict
1st dict:
keys=energy ranges, values=2nd dict
2nd dict:
keys=true or reco, values=event type
trained_model_name: str
Name of the model used to obtained the reconstructed event types
n_types: int (default=2)
The number of types the data was divided in.
Returns
-------
A pyplot instance with the one-dimensional confusion matrix plot.
'''
# setStyle()
nrows = 5
ncols = 4
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=[14, 10])
for i_plot, this_e_range in enumerate(event_types.keys()):
ax = axs[int(np.floor((i_plot)/ncols)), (i_plot) % ncols]
pred_error = np.abs(
| np.array(event_types[this_e_range]['true']) | numpy.array |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module contains base implementation of a NN classifier trained using supervised learning.
"""
import tensorflow as tf
from tensorflow.core.framework import summary_pb2
import numpy
import time
import os
import pickle
import scipy.io
class BaseNetwork(object):
"""
This defines the basic network structure we use.
"""
def __init__(self, path_to_logs=os.getcwd()):
"""
The initializer of the BasicNetwork object.
Attributes:
+ self._tf_graph: the tf graph containing the network structure
+ self._tf_session: the tf session used to compute operations relative to the object
+ self._tf_fw: the tf file writer to display the graph in tensorboard
+ self._net_loss: the tf expression of loss attached to the graph
+ self._net_optimize: the tf optimization method
+ self._net_input: the tf input placeholder
+ self._net_label: the tf labels placeholder
+ self._net_output: the tf net outuput
+ self._net_accuracy: the tf accuracy method
+ self._net_train_dict: the dictionnary added for training
+ self._net_test_dict: the dictionnary added for testing
+ self._net_summaries: the tensoroard merged summaries
+ self._net_history: a list containing training records (arrays [time, train accuracy, test accuracy])
+ self._logs_path: the path to tensorboard logs files
"""
# Initialize super
object.__init__(self)
# We initialize the variables of the object
self._tf_graph = tf.Graph()
self._tf_session =None
self._tf_fw = None
self._net_loss = None
self._net_optimize = None
self._net_input = None
self._net_label = None
self._net_output = None
self._net_accuracy = None
self._net_train_dict = dict()
self._net_test_dict = dict()
self._net_summaries = None
self._net_history = list()
self._net_summaries_history = list()
self._net_summary_parser = summary_pb2.Summary()
self._logs_path = path_to_logs
# We construct and initialize everything
self._construct_arch()
self._initialize_fw()
self._initialize_session()
self._initialize_weights()
def train(self, X_train, y_train,
X_test, y_test,
iterations=0,
criterion=0,
train_batch_size=100,
test_batch_size=100,
callback=None):
"""
The public training method. A network can be trained for a specified number of iterations using the _iterations_
parameter, or with a stopping criterion over the training accuracy using the _criterion_ argument.
Parameters:
+ X_train: a numpy array containing training input data
+ y_train: a numpy array containing training output classes
+ X_test: a numpy array containing testing input data
+ y_test: a numpy array containing testing output classes
+ iterations: number of iterations to perform
+ criterion: stopping criterion over training accuracy
+ train_batch_size: the batch size for training data
+ test_batch_size: the batch size for testing data
+ callback: a method to be called before each printing iteration
"""
# We check that the number of iterations set is greater than 100 if iterations is used
if (criterion == 0 and iterations<100):
raise Warning("Number of iterations must be superior to 100")
# We initialize history if the network is fresh
if len(self._net_history)==0:
self._net_history.append([0., 0., 0.])
start_time = 0.
else:
start_time = max(numpy.asarray(self._net_history)[:,0])
start_tick = time.time()
# Training with iterations
if iterations != 0 and criterion == 0:
for iter in range(iterations):
# We get the random indexes to use in the batch
train_idx = numpy.random.permutation(X_train.shape[0])
train_idx = train_idx[0:train_batch_size]
# We execute the gradient descent step
input_dict = {self._net_input: X_train[train_idx], self._net_label: y_train[train_idx]}
input_dict.update(self._net_train_dict)
self._net_optimize.run(feed_dict=input_dict, session=self._tf_session)
# If the iteration is a multiple of 100, we do things
if (iter % 100 == 0) and (iter > 0):
# We compute the train accuracy over the batch
input_dict = {self._net_input: X_train[train_idx], self._net_label: y_train[train_idx]}
input_dict.update(self._net_test_dict)
train_accuracy = self._net_accuracy.eval(feed_dict=input_dict, session=self._tf_session)
# We compute the test accuracy over the batch
test_idx = numpy.random.permutation(X_test.shape[0])
test_idx = test_idx[0:test_batch_size]
input_dict = {self._net_input: X_test[test_idx], self._net_label: y_test[test_idx]}
input_dict.update(self._net_test_dict)
test_accuracy = self._net_accuracy.eval(feed_dict=input_dict, session=self._tf_session)
# We update tensorboard summaries
summary = self._net_summaries.eval(feed_dict=input_dict,session=self._tf_session)
self._net_summary_parser.ParseFromString(summary)
self._net_summaries_history.append({str(val.tag):val.simple_value for val in self._net_summary_parser.value})
self._tf_fw.add_summary(summary,iter)
self._tf_fw.flush()
# We write the record to the history
self._net_history.append([(time.time() - start_tick) + start_time, train_accuracy, test_accuracy])
# We execute the callback if it exists
if callback is not None: callback(self)
# Training with criterion
elif criterion != 0 and iterations == 0:
iter = 0
train_accuracy = 0
while train_accuracy < criterion:
iter += 1
# We get the random indexes to use in the batch
train_idx = | numpy.random.permutation(X_train.shape[0]) | numpy.random.permutation |
import imgaug.augmenters as iaa
import numpy as np
import cv2
import random
from os import listdir
from os.path import isfile, join
truncate_fg=True,
change_back_ground_prob=0.5
color_aug_prob=0.8
image_augmentations_lm=(
iaa.Sequential([
iaa.Sometimes(0.4, iaa.CoarseDropout( p=0.1, size_percent=0.05) ),
iaa.Sometimes(0.5, iaa.GaussianBlur(np.random.rand())),
iaa.Sometimes(0.5, iaa.Add((-20, 20), per_channel=0.3)),
iaa.Sometimes(0.4, iaa.Invert(0.20, per_channel=True)),
iaa.Sometimes(0.5, iaa.Multiply((0.7, 1.4), per_channel=0.8)),
iaa.Sometimes(0.5, iaa.Multiply((0.7, 1.4))),
iaa.Sometimes(0.5, iaa.ContrastNormalization((0.5, 2.0), per_channel=0.3))
], random_order=False)
)
image_augmentations_bop=(
iaa.Sequential([
iaa.Sometimes(0.5, iaa.CoarseDropout( p=0.2, size_percent=0.05) ),
iaa.Sometimes(0.5, iaa.GaussianBlur(1.2*np.random.rand())),
iaa.Sometimes(0.5, iaa.Add((-25, 25), per_channel=0.3)),
iaa.Sometimes(0.3, iaa.Invert(0.2, per_channel=True)),
iaa.Sometimes(0.5, iaa.Multiply((0.6, 1.4), per_channel=0.5)),
iaa.Sometimes(0.5, iaa.Multiply((0.6, 1.4))),
iaa.Sometimes(0.5, iaa.LinearContrast((0.5, 2.2), per_channel=0.3))
], random_order = False)
)
def resize_short_edge(im, target_size, max_size, stride=0, interpolation=cv2.INTER_LINEAR, return_scale=False):
"""Scale the shorter edge to the given size, with a limit of `max_size` on
the longer edge. If `max_size` is reached, then downscale so that the
longer edge does not exceed max_size. only resize input image to target
size and return scale.
:param im: BGR image input by opencv
:param target_size: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:param stride: if given, pad the image to designated stride
:param interpolation: if given, using given interpolation method to resize image
:return:
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=interpolation)
if stride == 0:
if return_scale:
return im, im_scale
else:
return im
else:
# pad to product of stride
im_height = int(np.ceil(im.shape[0] / float(stride)) * stride)
im_width = int(np.ceil(im.shape[1] / float(stride)) * stride)
im_channel = im.shape[2]
padded_im = np.zeros((im_height, im_width, im_channel))
padded_im[: im.shape[0], : im.shape[1], :] = im
if return_scale:
return padded_im, im_scale
else:
return padded_im
def get_bg_image(filename, imH, imW, channel=3):
"""keep aspect ratio of bg during resize target image size:
imHximWxchannel.
"""
target_size = min(imH, imW)
max_size = max(imH, imW)
real_hw_ratio = float(imH) / float(imW)
bg_image = cv2.imread(filename)
bg_h, bg_w, bg_c = bg_image.shape
bg_image_resize = np.zeros((imH, imW, channel), dtype="uint8")
if (float(imH) / float(imW) < 1 and float(bg_h) / float(bg_w) < 1) or (
float(imH) / float(imW) >= 1 and float(bg_h) / float(bg_w) >= 1
):
if bg_h >= bg_w:
bg_h_new = int(np.ceil(bg_w * real_hw_ratio))
if bg_h_new < bg_h:
bg_image_crop = bg_image[0:bg_h_new, 0:bg_w, :]
else:
bg_image_crop = bg_image
else:
bg_w_new = int(np.ceil(bg_h / real_hw_ratio))
if bg_w_new < bg_w:
bg_image_crop = bg_image[0:bg_h, 0:bg_w_new, :]
else:
bg_image_crop = bg_image
else:
if bg_h >= bg_w:
bg_h_new = int(np.ceil(bg_w * real_hw_ratio))
bg_image_crop = bg_image[0:bg_h_new, 0:bg_w, :]
else: # bg_h < bg_w
bg_w_new = int( | np.ceil(bg_h / real_hw_ratio) | numpy.ceil |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: <NAME>
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import sys
import h5py
import numpy as np
import dask.array as da
import matplotlib as mpl
# Attempting to get things to work for all versions of python on Travis
mpl.use('Agg')
from sidpy.hdf.hdf_utils import get_attr
sys.path.append("../../pyUSID/")
from pyUSID.io import USIDataset
from pyUSID.io.hdf_utils.model import reshape_to_n_dims, get_dimensionality
from pyUSID.io.write_utils import Dimension
from . import data_utils
skip_viz_tests = True
if sys.version_info.major == 3:
unicode = str
if sys.version_info.minor > 4:
skip_viz_tests = False
test_h5_file_path = data_utils.std_beps_path
class TestBEPS(unittest.TestCase):
def setUp(self):
data_utils.make_beps_file()
self.orig_labels_order = ['X', 'Y', 'Cycle', 'Bias']
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r')
h5_grp = self.h5_file['/Raw_Measurement/']
self.source_nd_s2f = h5_grp['n_dim_form'][()]
self.source_nd_f2s = self.source_nd_s2f.transpose(1, 0, 3, 2)
self.h5_source = USIDataset(h5_grp['source_main'])
self.pos_dims=[]
self.spec_dims=[]
for dim_name, dim_units in zip(self.h5_source.pos_dim_labels,
get_attr(self.h5_source.h5_pos_inds, 'units')):
self.pos_dims.append(
Dimension(dim_name, dim_units, h5_grp[dim_name][()]))
for dim_name, dim_units in zip(self.h5_source.spec_dim_labels,
get_attr(self.h5_source.h5_spec_inds, 'units')):
self.spec_dims.append(
Dimension(dim_name, dim_units, h5_grp[dim_name][()]))
res_grp_0 = h5_grp['source_main-Fitter_000']
self.results_0_nd_s2f = res_grp_0['n_dim_form'][()]
self.results_0_nd_f2s = self.results_0_nd_s2f.transpose(1, 0, 3, 2)
self.h5_compound = USIDataset(res_grp_0['results_main'])
res_grp_1 = h5_grp['source_main-Fitter_001']
self.results_1_nd_s2f = res_grp_1['n_dim_form'][()]
self.results_1_nd_f2s = self.results_1_nd_s2f.transpose(1, 0, 3, 2)
self.h5_complex = USIDataset(res_grp_1['results_main'])
def tearDown(self):
self.h5_file.close()
os.remove(data_utils.std_beps_path)
class TestUSIDatasetReal(unittest.TestCase):
def setUp(self):
self.rev_spec = False
data_utils.make_beps_file(rev_spec=self.rev_spec)
self.orig_labels_order = ['X', 'Y', 'Cycle', 'Bias'] if self.rev_spec else ['X', 'Y', 'Bias', 'Cycle']
def tearDown(self):
os.remove(test_h5_file_path)
def get_expected_n_dim(self, h5_f):
nd_slow_to_fast = h5_f['/Raw_Measurement/n_dim_form'][()]
nd_fast_to_slow = nd_slow_to_fast.transpose(1, 0, 3, 2)
if self.rev_spec:
nd_fast_to_slow = nd_fast_to_slow.transpose(0, 1, 3, 2)
return nd_slow_to_fast, nd_fast_to_slow
class TestStringRepr(TestBEPS):
def test_string_representation(self):
usi_dset = self.h5_source
h5_main = self.h5_file[usi_dset.name]
actual = usi_dset.__repr__()
actual = [line.strip() for line in actual.split("\n")]
actual = [actual[line_ind] for line_ind in [0, 2, 4, 7, 8, 10, 11]]
expected = list()
expected.append(h5_main.__repr__())
expected.append(h5_main.name)
expected.append(get_attr(h5_main, "quantity") + " (" + get_attr(h5_main, "units") + ")")
for h5_inds in [usi_dset.h5_pos_inds, usi_dset.h5_spec_inds]:
for dim_name, dim_size in zip(get_attr(h5_inds, "labels"),
get_dimensionality(h5_inds)):
expected.append(dim_name + ' - size: ' + str(dim_size))
self.assertTrue(np.all([x == y for x, y in zip(actual, expected)]))
class TestEquality(TestBEPS):
def test_correct_USIDataset(self):
expected = USIDataset(self.h5_source)
self.assertTrue(expected == expected)
def test_correct_h5_dataset(self):
h5_main = self.h5_file[self.h5_source.name]
expected = USIDataset(h5_main)
self.assertTrue(expected == h5_main)
def test_incorrect_USIDataset(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected = USIDataset(h5_main)
incorrect = USIDataset(h5_f['/Raw_Measurement/source_main-Fitter_000/results_main'])
self.assertFalse(expected == incorrect)
def test_incorrect_h5_dataset(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected = USIDataset(h5_main)
incorrect = h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices']
self.assertFalse(expected == incorrect)
def test_incorrect_object(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected = USIDataset(h5_main)
incorrect = np.zeros(shape=(1, 2, 3, 4))
self.assertFalse(expected == incorrect)
class TestGetNDimFormExistsReal(TestUSIDatasetReal):
def test_sorted_and_unsorted(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_dset = USIDataset(h5_f['/Raw_Measurement/source_main'])
nd_slow_to_fast, nd_fast_to_slow = self.get_expected_n_dim(h5_f)
actual_f2s = usi_dset.get_n_dim_form(lazy=False)
self.assertTrue(np.allclose(nd_fast_to_slow, actual_f2s))
nd_form, success = reshape_to_n_dims(usi_dset, sort_dims=True)
print(nd_form.shape)
usi_dset.toggle_sorting()
actual_s2f = usi_dset.get_n_dim_form(lazy=False)
self.assertTrue(np.allclose(nd_slow_to_fast, actual_s2f))
class TestPosSpecSlicesReal(TestUSIDatasetReal):
def test_empty_dict(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({})
self.assertTrue(np.allclose(np.expand_dims(np.arange(14), axis=1), actual_spec))
self.assertTrue(np.allclose(np.expand_dims(np.arange(15), axis=1), actual_pos))
def test_non_existent_dim(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(KeyError):
_ = usi_main._get_pos_spec_slices({'blah': 4, 'X': 3, 'Y': 1})
def test_incorrect_type(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(TypeError):
_ = usi_main._get_pos_spec_slices({'X': 'fdfd', 'Y': 1})
def test_negative_index(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(ValueError):
_ = usi_main._get_pos_spec_slices({'X': -4, 'Y': 1})
def test_out_of_bounds(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(IndexError):
_ = usi_main._get_pos_spec_slices({'X': 15, 'Y': 1})
def test_one_pos_dim_removed(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
# orig_pos = np.vstack([np.tile(np.arange(5), 3), np.repeat(np.arange(3), 5)]).T
# orig_spec = np.vstack([np.tile(np.arange(7), 2), np.repeat(np.arange(2), 7)])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': 3})
# we want every fifth position starting from 3
expected_pos = np.expand_dims(np.arange(3, 15, 5), axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_one_pos_dim_sliced(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': slice(1, 5, 2)})
# we want every fifth position starting from 3
positions = []
for row_ind in range(3):
for col_ind in range(1, 5, 2):
positions.append(5 * row_ind + col_ind)
expected_pos = np.expand_dims(positions, axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_two_pos_dim_sliced(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': slice(1, 5, 2), 'Y': 1})
# we want every fifth position starting from 3
positions = []
for row_ind in range(1, 2):
for col_ind in range(1, 5, 2):
positions.append(5 * row_ind + col_ind)
expected_pos = np.expand_dims(positions, axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_two_pos_dim_sliced_list(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': [1, 2, 4], 'Y': 1})
# we want every fifth position starting from 3
positions = []
for row_ind in range(1, 2):
for col_ind in [1, 2, 4]:
positions.append(5 * row_ind + col_ind)
expected_pos = np.expand_dims(positions, axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_both_pos_removed(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': 3, 'Y': 1})
# we want every fifth position starting from 3
expected_pos = np.expand_dims([1 * 5 + 3], axis=1)
expected_spec = np.expand_dims( | np.arange(14) | numpy.arange |
''' phase_uncert_thetar simulating Optical Neural Network
using Neuroptica and linearly separable datasets
Now goes over every topology types with N = 4-32
Author: <NAME>
Edit: 2020.03.09
'''
import numpy as np
import calculate_accuracy as calc_acc
import ONN_Simulation_Class as ONN_Cls
import onnClassTraining
import digital_NN_main as dnn
import create_datasets as cd
import random
import os
import shutil
import matplotlib
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
matplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
matplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
def get_dataset(folder, N, rng, lim=99, SAMPLES=100, EPOCHS=20):
while True:
print(f'RNG = {rng}, N = {N}')
X, y, Xt, yt = cd.gaussian_dataset(targets=int(N), features=int(N), nsamples=SAMPLES*N, rng=rng)
random.seed(rng)
X = (X - np.min(X))/(np.max(X) - np.min(X))
Xt = (Xt - np.min(Xt))/(np.max(Xt) - np.min(Xt))
Xog, Xtog = X, Xt
net, weights = dnn.create_train_dnn(X, y, Xt, yt, folder, EPOCHS)
print('Validation Accuracy: {:.1f}%'.format(dnn.get_current_accuracy(Xt, yt, net)*100))
rng += 1
if dnn.get_current_accuracy(Xt, yt, net)*100 > lim:
if not os.path.isdir(folder):
os.makedirs(folder)
np.savetxt(f'{folder}/X.txt', X, delimiter=',', fmt='%.6f')
np.savetxt(f'{folder}/Xt.txt', Xt, delimiter=',', fmt='%.6f')
np.savetxt(f'{folder}/y.txt', y, delimiter=',', fmt='%.6f')
np.savetxt(f'{folder}/yt.txt', yt, delimiter=',', fmt='%.6f')
print('This dataset works!\n')
return rng
def test_onn(folder, ONN, lim=98.5):
ONN.get_topology_name()
ONN.X = np.loadtxt(folder + f'/X.txt', delimiter=',')
ONN.y = | np.loadtxt(folder + f'/y.txt', delimiter=',') | numpy.loadtxt |
import os
import json
import random
import torch
import torch.utils.data
import numpy as np
from tasks.data_utils import InputExample
from tqdm import tqdm
from utils import print_rank_0
from data_utils.corpora import punctuation_standardization
def gigaword_detokenize(string, is_target=False):
_tok_dict = {"(": "-lrb-", ")": "-rrb-",
"[": "-lsb-", "]": "-rsb-",
"{": "-lcb-", "}": "-rcb-",
'&': '&', '<': '<', '>': '>'}
string = string.replace('UNK', '[UNK]')
string = string.replace('<unk>', '[UNK]')
for key, value in _tok_dict.items():
string = string.replace(value, key)
# string = string.replace("''", "\"")
# string = string.replace("``", "\"")
# string = string.replace("`", "'")
# string = string.replace(" n't", "n't")
# string = string.replace(" 's", "'s")
# string = string.replace(" 'd", "'d")
# string = string.replace(" 'll", "'ll")
return string
def cnndm_detokenize(string, is_target=False):
_tok_dict = {"(": "-LRB-", ")": "-RRB-",
"[": "-LSB-", "]": "-RSB-",
"{": "-LCB-", "}": "-RCB-"}
if not is_target:
string = string.replace("<S_SEP>", "")
else:
string = string.replace("<S_SEP>", "[SEP]")
for key, value in _tok_dict.items():
string = string.replace(value, key)
string = string.replace("''", "\"")
string = string.replace("``", "\"")
string = string.replace("`", "'")
string = string.replace(" n't", "n't")
string = string.replace(" 's", "'s")
string = string.replace(" 'd", "'d")
string = string.replace(" 'll", "'ll")
return string
def blanklm_detokenize(string, is_target=False):
string = string.replace("_UNK", "[UNK]")
string = string.replace("<blank>", "[MASK]")
return string
class SummmaryProcessor:
def __init__(self, task, data_dir, tokenizer):
self.task = task
self.data_dir = data_dir
self.tokenizer = tokenizer
def create_examples(self, split):
if split == "train":
filename = "train"
elif split == "dev":
filename = "val"
elif split == "test":
filename = "test"
else:
raise NotImplementedError(split)
print_rank_0(f"Creating {self.task}-{split} dataset from {self.data_dir}")
if self.task == "gigaword":
detokenizer = gigaword_detokenize
elif self.task == "cnn_dm":
detokenizer = cnndm_detokenize
else:
detokenizer = None
source_texts, target_texts = [], []
with open(os.path.join(self.data_dir, f"{filename}.source"), encoding='utf-8') as file:
for line in file:
line = line.strip()
line = punctuation_standardization(line)
line = detokenizer(line) if detokenizer else line
source_texts.append(line)
with open(os.path.join(self.data_dir, f"{filename}.target"), encoding='utf-8') as file:
for line in file:
line = line.strip()
line = punctuation_standardization(line)
line = detokenizer(line, is_target=True) if detokenizer else line
target_texts.append(line)
assert len(source_texts) == len(target_texts)
example_list = []
for idx, (source_text, target_text) in enumerate(zip(source_texts, target_texts)):
if (idx + 1) % 20000 == 0:
print_rank_0(f"Complete {idx + 1} examples")
guid = "%s-%s" % (split, idx)
meta = {"ref": self.tokenizer.DecodeIds(self.tokenizer.EncodeAsIds(target_text).tokenization)}
example = InputExample(guid=guid, text_a=source_text, text_b=target_text, meta=meta)
if idx < 10:
print_rank_0((source_text.encode('utf-8'), target_text.encode('utf-8'), meta["ref"].encode('utf-8')))
example_list.append(example)
return example_list
class SQuADProcessor:
def __init__(self, data_dir, tokenizer):
self.data_dir = data_dir
self.tokenizer = tokenizer
def create_examples(self, split):
if split == "train":
filename = "train.json"
elif split == "dev":
filename = "dev.json"
elif split == "test":
filename = "test.json"
else:
raise NotImplementedError(split)
print_rank_0(f"Creating SQuAD-{split} dataset from {self.data_dir}")
example_list = []
idx = 0
with open(os.path.join(self.data_dir, filename), encoding='utf-8') as file:
dataset = json.load(file)
for paragraphs in dataset:
for paragraph in paragraphs['paragraphs']:
context = paragraph['context']
for qa in paragraph['qas']:
question = qa["question"]
answers = {answer["text"] for answer in qa["answers"]}
answer_starts = {answer["text"]: answer["answer_start"] for answer in qa["answers"]}
for answer in answers:
guid = "%s-%s" % (split, idx)
meta = {
"answer_start": answer_starts[answer],
"answer": answer,
"question": question,
"ref": self.tokenizer.DecodeIds(self.tokenizer.EncodeAsIds(question).tokenization)}
example = InputExample(guid=guid, text_a=context, meta=meta)
if idx < 10:
print_rank_0(
(context.encode('utf-8'), answer.encode('utf-8'), meta["ref"].encode('utf-8')))
example_list.append(example)
idx += 1
print_rank_0(f"Creating {len(example_list)} examples for {split}")
return example_list
class XSumProcessor:
def __init__(self, data_dir, tokenizer):
self.data_dir = data_dir
self.tokenizer = tokenizer
def create_examples(self, split):
if split == "train":
key = "train"
elif split == "dev":
key = "validation"
elif split == "test":
key = "test"
else:
raise NotImplementedError(split)
print_rank_0(f"Creating XSUM-{split} dataset from {self.data_dir}")
with open(os.path.join(self.data_dir, "XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json")) as file:
id_list = json.load(file)
id_list = id_list[key]
source_texts, target_texts = [], []
for i, idx in enumerate(id_list):
with open(os.path.join(self.data_dir, f"{idx}.summary")) as file:
key, sentences = None, []
source_text, target_text = None, None
for line in file:
line = line.strip()
if line.startswith("[SN]"):
if key is not None:
if key == "RESTBODY":
source_text = " ".join(sentences)
elif key == "FIRST-SENTENCE":
target_text = " ".join(sentences)
key = line[4:-4]
sentences = []
elif line:
sentences.append(line)
if key is not None:
if key == "RESTBODY":
source_text = " ".join(sentences)
elif key == "FIRST-SENTENCE":
target_text = " ".join(sentences)
source_texts.append(source_text)
target_texts.append(target_text)
if (i + 1) % 1000 == 0:
print_rank_0(f"Complete {i + 1} examples")
assert len(source_texts) == len(target_texts)
example_list = []
for idx, (source_text, target_text) in enumerate(zip(source_texts, target_texts)):
if (idx + 1) % 20000 == 0:
print_rank_0(f"Complete {idx + 1} examples")
guid = "%s-%s" % (split, idx)
meta = {"ref": self.tokenizer.DecodeIds(self.tokenizer.EncodeAsIds(target_text).tokenization)}
example = InputExample(guid=guid, text_a=source_text, text_b=target_text, meta=meta)
if idx < 10:
print_rank_0((source_text.encode('utf-8'), target_text.encode('utf-8'), meta["ref"].encode('utf-8')))
example_list.append(example)
return example_list
class Seq2SeqDataset(torch.utils.data.Dataset):
def __init__(self, args, split, tokenizer):
self.args = args
self.task, self.data_dir = args.task.lower(), args.data_dir
self.max_src_length, self.max_tgt_length = args.src_seq_length, args.tgt_seq_length
self.split = split
self.tokenizer = tokenizer
self.dataset_name = split
if self.task in ["gigaword", "cnn_dm", "cnn_dm_original"]:
self.processor = SummmaryProcessor(self.task, self.data_dir, tokenizer)
elif self.task in ["xsum"]:
self.processor = XSumProcessor(self.data_dir, tokenizer)
elif self.task in ["squad_generation"]:
self.processor = SQuADProcessor(self.data_dir, tokenizer)
else:
raise NotImplementedError
example_list = self.processor.create_examples(split)
self.example_list = example_list
self.examples = {example.guid: example for example in example_list}
print_rank_0(f"Return {len(self.examples)} {split} examples")
def __len__(self):
return len(self.example_list)
def __getitem__(self, idx):
example = self.example_list[idx]
cls_id = self.tokenizer.get_command('ENC').Id
mask_token = 'sMASK' if self.args.task_mask else 'MASK'
mask_id = self.tokenizer.get_command(mask_token).Id
pad_id = self.tokenizer.get_command('pad').Id
sop_id = self.tokenizer.get_command('sop').Id
eop_id = self.tokenizer.get_command('eop').Id
if self.task in ["gigaword", "cnn_dm", "cnn_dm_original", "xsum"]:
source_text, target_text = example.text_a, example.text_b
source_tokens = self.tokenizer.EncodeAsIds(" " + source_text).tokenization
prompt = [cls_id, mask_id] + self.tokenizer.EncodeAsIds(" Content:").tokenization
if len(source_tokens) > self.max_src_length - len(prompt):
source_tokens = source_tokens[:self.max_src_length - len(prompt)]
source_tokens = prompt + source_tokens
elif self.task == "squad_generation":
source_text = example.text_a
target_text, answer = example.meta["question"], example.meta["answer"]
source_tokens = self.tokenizer.EncodeAsIds(source_text.rstrip() + " Question:").tokenization
answer_tokens = self.tokenizer.EncodeAsIds(" Answer: " + answer).tokenization
if len(source_tokens) > self.max_src_length - len(answer_tokens) - 2:
max_src_length = self.max_src_length - len(answer_tokens) - 2
answer_pattern = self.tokenizer.EncodeAsIds(" " + answer).tokenization
def sub_finder(mylist, pattern):
matches = []
for i in range(len(mylist)):
if mylist[i] == pattern[0] and mylist[i:i + len(pattern)] == pattern:
matches.append(i)
return matches
answer_indices = sub_finder(source_tokens, answer_pattern)
if len(answer_indices) == 0:
print(f"Answer {answer} not exists in the source text")
source_tokens = source_tokens[:max_src_length]
else:
start_index = max(answer_indices[0] - max_src_length // 2, 0)
source_tokens = source_tokens[start_index: start_index + max_src_length]
source_tokens = [cls_id] + source_tokens + [mask_id] + answer_tokens
else:
raise NotImplementedError
if len(source_tokens) < self.max_src_length:
source_tokens = source_tokens + [pad_id] * (self.max_src_length - len(source_tokens))
sep = len(source_tokens)
position_ids = list(range(len(source_tokens)))
block_position_ids = [0] * len(source_tokens)
mask_pos = source_tokens.index(mask_id)
if self.split == 'train':
target_tokens = self.tokenizer.EncodeAsIds(" " + target_text).tokenization
target_tokens = target_tokens + [eop_id]
if len(target_tokens) > self.max_tgt_length:
target_tokens = target_tokens[:self.max_tgt_length]
target_truncated = True
loss_mask = [1] * len(target_tokens)
if len(target_tokens) < self.max_tgt_length:
loss_mask += [0] * (self.max_tgt_length - len(target_tokens))
target_tokens += [pad_id] * (self.max_tgt_length - len(target_tokens))
tokens = source_tokens + [sop_id] + target_tokens[:-1]
loss_mask = [0] * len(source_tokens) + loss_mask
target_ids = [0] * len(source_tokens) + target_tokens
position_ids += [mask_pos] * len(target_tokens)
if self.args.no_block_position:
block_position_ids += [1] * len(target_tokens)
else:
block_position_ids += list(range(1, len(target_tokens) + 1))
position_ids = [position_ids, block_position_ids]
sample = {'text': | np.array(tokens, dtype=np.int64) | numpy.array |
import numpy as np
from ..util import three_dimensionalize, euclidean, unitize
from ..constants import log
from ..constants import tol_path as tol
from ..constants import res_path as res
from .intersections import line_line
def arc_center(points):
"""
Given three points of an arc, find the center, radius, normal, and angle.
This uses the fact that the intersection of the perpendicular
bisectors of the segments between the control points is the center of the arc.
Parameters
---------
points: (3,d) list of points where (d in [2,3])
Returns
---------
result: dict, with keys:
'center': (d,) float, cartesian center of the arc
'radius': float, radius of the arc
'normal': (3,) float, the plane normal.
'angle': (2,) float, angle of start and end, in radians
'span' : float, angle swept by the arc, in radians
"""
# it's a lot easier to treat 2D as 3D with a zero Z value
is_2D, points = three_dimensionalize(points, return_2D=True)
# find the two edge vectors of the triangle
edge_direction = np.diff(points, axis=0)
edge_midpoints = (edge_direction * .5) + points[0:2]
# three points define a plane, so we find its normal vector
plane_normal = unitize(np.cross(*edge_direction[::-1]))
vector_edge = unitize(edge_direction)
vector_perpendicular = unitize(np.cross(vector_edge, plane_normal))
intersects, center = line_line(edge_midpoints, vector_perpendicular)
if not intersects:
raise ValueError('Segments do not intersect!')
radius = euclidean(points[0], center)
vector = unitize(points - center)
angle = np.arccos(np.clip(np.dot(*vector[[0, 2]]), -1.0, 1.0))
large_arc = (abs(angle) > tol.zero and
np.dot(*edge_direction) < 0.0)
if large_arc:
angle = (np.pi * 2) - angle
angles = np.arctan2(*vector[:, 0:2].T[::-1]) + np.pi * 2
angles_sorted = np.sort(angles[[0, 2]])
reverse = angles_sorted[0] < angles[1] < angles_sorted[1]
angles_sorted = angles_sorted[::(1 - int(not reverse) * 2)]
result = {'center': center[:(3 - is_2D)],
'radius': radius,
'normal': plane_normal,
'span': angle,
'angles': angles_sorted}
return result
def discretize_arc(points, close=False, scale=1.0):
"""
Returns a version of a three point arc consisting of line segments
Parameters
---------
points: (n, d) points on the arc where d in [2,3]
close: boolean, if True close the arc (circle)
Returns
---------
discrete: (m, d)
points: either (3,3) or (3,2) of points for arc going from
points[0] to points[2], going through control point points[1]
"""
two_dimensional, points = three_dimensionalize(points, return_2D=True)
center_info = arc_center(points)
center, R, N, angle = (center_info['center'],
center_info['radius'],
center_info['normal'],
center_info['span'])
if close:
angle = np.pi * 2
# the number of facets, based on the angle critera
count_a = angle / res.seg_angle
count_l = ((R * angle)) / (res.seg_frac * scale)
count = np.max([count_a, count_l])
# force at LEAST 4 points for the arc, otherwise the endpoints will diverge
count = np.clip(count, 4, np.inf)
count = int(np.ceil(count))
V1 = unitize(points[0] - center)
V2 = unitize(np.cross(-N, V1))
t = np.linspace(0, angle, count)
discrete = np.tile(center, (count, 1))
discrete += R * np.cos(t).reshape((-1, 1)) * np.tile(V1, (count, 1))
discrete += R * np.sin(t).reshape((-1, 1)) * np.tile(V2, (count, 1))
if not close:
arc_dist = np.linalg.norm(points[[0, -1]] - discrete[[0, -1]], axis=1)
arc_ok = (arc_dist < tol.merge).all()
if not arc_ok:
log.warn(
'Failed to discretize arc (endpoint distance %s)',
str(arc_dist))
log.warn('Failed arc points: %s', str(points))
raise ValueError('Arc endpoints diverging!')
discrete = discrete[:, 0:(3 - two_dimensional)]
return discrete
def arc_tangents(points):
"""
returns tangent vectors for points
"""
two_dimensional, points = three_dimensionalize(points, return_2D=True)
center, R, N, angle = arc_center(points)
vectors = points - center
tangents = unitize(np.cross(vectors, N))
return tangents[:, 0:(3 - two_dimensional)]
def arc_offset(points, distance):
two_dimensional, points = three_dimensionalize(points)
center, R, N, angle = arc_center(points)
vectors = unitize(points - center)
new_points = center + vectors * distance
return new_points[:, 0:(3 - two_dimensional)]
def to_threepoint(center, radius, angles=None):
"""
For 2D arcs, given a center and radius convert them to three
points on the arc.
Parameters
-----------
center: (2,) float, center point on the plane
radius: float, radius of arc
angles: (2,) float, angles in radians to make the arc
if not specified, will default to (0.0, pi)
Returns
----------
three: (3,2) float, arc control points
"""
# if no angles provided assume we want a half circle
if angles is None:
angles = [0.0, np.pi]
# force angles to float64
angles = | np.asanyarray(angles, dtype=np.float64) | numpy.asanyarray |
"""
orderparam.py
Contains the methods for order parameter calculation and anlyses.
"""
from collections import defaultdict
import numpy as np
from memly.metrics import Metric
from memly.membrane import unit_vector, angle_between
def calculate_orderparam(angle):
"""
Calculate the order parameter for the provided angle
:param angle:
:return:
"""
return 0.5 * (3 * (np.cos(np.radians(angle))) ** 2 - 1)
class OrderParam(Metric):
def __init__(self, membrane, title="Order parameter", units="unitless"):
"""
Calculates order parameters for each lipid species in the simulation.
:param membrane: memly.membrane.Membrane object
:param title: str, optional. The title of the metric. Default is "Order parameter".
:param units: str, optional. Units of the metric. Default is "unitless".
"""
# Run the initialisation of the parent Metric class
Metric.__init__(self, membrane, title, units)
# Collect bonded pairs, grouped together by residue name
self.bonded_catalogue = defaultdict(list)
for bond in self.membrane.sim.topology.bonds:
catalogue_name = bond.atom1.residue.name + "-" + bond.atom1.name + "-" + bond.atom2.name
self.bonded_catalogue[catalogue_name].append((bond.atom1.index, bond.atom2.index))
# Calculate order parameters
self.orderparams = {catalogue_name: calculate_orderparam(self.get_ensemble_average_angle(catalogue_name))
for catalogue_name in self.bonded_catalogue.keys()}
# Store results
for catalogue_name, orderp in self.orderparams.items():
self.add_results(lipid=catalogue_name, value=orderp, leaflet="Both")
for leaflet_name in self.membrane.leaflets[0].keys():
self.add_results(lipid=catalogue_name,
value=calculate_orderparam(self.get_leaflet_average_angle(catalogue_name, leaflet_name)),
leaflet=leaflet_name)
def get_ensemble_average_angle(self, catalogue_name):
"""
Returns the ensemble-averaged angle between all the bonded pairs and the bilayer normal
:param catalogue_name:
:return:
"""
angles = []
for particle_pair in self.bonded_catalogue[catalogue_name]:
angles.append(np.asarray(self.calculate_angles_to_bilayer_normal(particle_pair)))
return np.mean(np.asarray(angles))
def get_leaflet_average_angle(self, catalogue_name, leaflet_name):
"""
Returns the ensemble-averaged angle between instances of the named bonded pair that are located
in the indicated leaflet.
:param leaflet_name: str, Label of the leaflet to analyse.
:param catalogue_name:
:return:
"""
angles = []
for particle_pair in self.bonded_catalogue[catalogue_name]:
# Create a selection mask for frames in which the particles belong to a lipid residue that is located in the chosen leaflet
# Have to do this separately for each particle pair, since each particle's leaflet presence changes independantly between frames.
#chosen_frames = np.asarray([True if self.membrane.lipid_residues_by_particle[particle_pair[0]] in self.membrane.leaflets[frame][leaflet_name] else False for frame in range(0,len(self.membrane.sim))])
chosen_frames = | np.asarray(self.membrane.leaflet_occupancy_by_resid[self.membrane.lipid_residues_by_particle[particle_pair[0]]]) | numpy.asarray |
import numpy as np
import os
from skimage.io import imread, imsave
from skimage.transform import estimate_transform, warp
from time import time
from predictor import PosPrediction
class PRN:
''' Joint 3D Face Reconstruction and Dense Alignment with Position Map Regression Network
Args:
is_dlib(bool, optional): If true, dlib is used for detecting faces.
prefix(str, optional): If run at another folder, the absolute path is needed to load the data.
'''
def __init__(self, is_dlib = False, prefix = '.'):
# resolution of input and output image size.
self.resolution_inp = 256
self.resolution_op = 256
#---- load detectors
if is_dlib:
import dlib
detector_path = os.path.join(prefix, 'Data/net-data/mmod_human_face_detector.dat')
self.face_detector = dlib.cnn_face_detection_model_v1(
detector_path)
#---- load PRN
self.pos_predictor = PosPrediction(self.resolution_inp, self.resolution_op)
prn_path = os.path.join(prefix, 'Data/net-data/256_256_resfcn256_weight')
if not os.path.isfile(prn_path + '.data-00000-of-00001'):
print("please download PRN trained model first.")
exit()
self.pos_predictor.restore(prn_path)
# uv file
self.uv_kpt_ind = np.loadtxt(prefix + '/Data/uv-data/uv_kpt_ind.txt').astype(np.int32) # 2 x 68 get kpt
self.face_ind = np.loadtxt(prefix + '/Data/uv-data/face_ind.txt').astype(np.int32) # get valid vertices in the pos map
self.triangles = np.loadtxt(prefix + '/Data/uv-data/triangles.txt').astype(np.int32) # ntri x 3
self.uv_coords = self.generate_uv_coords()
def generate_uv_coords(self):
resolution = self.resolution_op
uv_coords = np.meshgrid(range(resolution),range(resolution))
uv_coords = np.transpose(np.array(uv_coords), [1,2,0])
uv_coords = np.reshape(uv_coords, [resolution**2, -1]);
uv_coords = uv_coords[self.face_ind, :]
uv_coords = np.hstack((uv_coords[:,:2], np.zeros([uv_coords.shape[0], 1])))
return uv_coords
def dlib_detect(self, image):
return self.face_detector(image, 1)
def net_forward(self, image):
''' The core of out method: regress the position map of a given image.
Args:
image: (256,256,3) array. value range: 0~1
Returns:
pos: the 3D position map. (256, 256, 3) array.
'''
return self.pos_predictor.predict(image)
def process(self, input, image_info = None):
''' process image with crop operation.
Args:
input: (h,w,3) array or str(image path). image value range:1~255.
image_info(optional): the bounding box information of faces. if None, will use dlib to detect face.
Returns:
pos: the 3D position map. (256, 256, 3).
'''
if isinstance(input, str):
try:
image = imread(input)
except IOError:
print("error opening file: ", input)
return None
else:
image = input
if image.ndim < 3:
image = np.tile(image[:,:,np.newaxis], [1,1,3])
if image_info is not None:
if np.max(image_info.shape) > 4: # key points to get bounding box
kpt = image_info
if kpt.shape[0] > 3:
kpt = kpt.T
left = | np.min(kpt[0, :]) | numpy.min |
#Modified from: https://github.com/websitefingerprinting/WebsiteFingerprinting, created by WFDetection.
import sys
import os
import multiprocessing as mp
from os import mkdir
from os.path import join, abspath, dirname, pardir
import numpy as np
import pandas as pd
import json
import argparse
import logging
import datetime
BASE_DIR = abspath(join(dirname(__file__), pardir))
logger = logging.getLogger('glue')
def config_logger(args):
# Set file
log_file = sys.stdout
if args.log != 'stdout':
log_file = open(args.log, 'w')
ch = logging.StreamHandler(log_file)
# Set logging format
LOG_FORMAT = "%(asctime)s %(name)-12s %(levelname)-8s %(message)s"
ch.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(ch)
# Set level format
logger.setLevel(logging.INFO)
def parse_arguments():
parser = argparse.ArgumentParser(description='It simulates adaptive padding on a set of web traffic traces.')
parser.add_argument('traces_path',
metavar='<traces path>',
help='Path to the directory with the traffic traces to be simulated.')
parser.add_argument('-listpath',
type=str,
metavar='<mergelist>',
help='Give an order of traces in a l-traces')
parser.add_argument('-noise',
type=str,
metavar='<pad noise>',
default= 'False',
help='Simulate whether pad glue noise or not')
parser.add_argument('-glue',
type=str,
metavar='<pad front noise>',
default= 'False',
help='Simulate whether pad front noise or not')
parser.add_argument('-forWFD',
type=str,
metavar='<only save .npy>',
default= 'False',
help='Only save .npy of directions, for training data generate WFD')
parser.add_argument('-output',
type=str,
metavar='<output_dir>',
help='Output directory for l-traces')
parser.add_argument('--log',
type=str,
dest="log",
metavar='<log path>',
default='stdout',
help='path to the log file. It will print to stdout by default.')
args = parser.parse_args()
#config = dict(conf_parser._sections[args.section])
config_logger(args)
return args
# '''used to save single traces'''
# global list_names
def load_trace(fname, t = 999, noise = False):
'''load a trace from fpath/fname up to t time.'''
'''return trace'''
pkts = []
with open(fname, 'r') as f:
for line in f:
try:
timestamp, length = line.strip().split('\t')
pkts.append([float(timestamp), int(length)])
if float(timestamp) >= t+0.5:
break
except ValueError:
logger.warn("Could not split line: %s in %s", line, fname)
return np.array(pkts)
def weibull(k = 0.75):
return np.random.weibull(0.75)
def uniform():
return np.random.uniform(1,10)
def simulate(trace):
# logger.debug("Simulating trace {}".format(fdir))
np.random.seed(datetime.datetime.now().microsecond)
front_trace = RP(trace)
return front_trace
def RP(trace):
# format: [[time, pkt],[...]]
# trace, cpkt_num, spkt_num, cwnd, swnd
client_dummy_pkt_num = 1100
server_dummy_pkt_num = 1100
client_min_dummy_pkt_num = 1
server_min_dummy_pkt_num = 1
start_padding_time = 0
max_wnd = 14
min_wnd = 1
client_wnd = np.random.uniform(min_wnd, max_wnd)
server_wnd = np.random.uniform(min_wnd, max_wnd)
if client_min_dummy_pkt_num != client_dummy_pkt_num:
client_dummy_pkt = np.random.randint(client_min_dummy_pkt_num,client_dummy_pkt_num)
else:
client_dummy_pkt = client_dummy_pkt_num
if server_min_dummy_pkt_num != server_dummy_pkt_num:
server_dummy_pkt = np.random.randint(server_min_dummy_pkt_num,server_dummy_pkt_num)
else:
server_dummy_pkt = server_dummy_pkt_num
logger.debug("client_wnd:",client_wnd)
logger.debug("server_wnd:",server_wnd)
logger.debug("client pkt:", client_dummy_pkt)
logger.debug("server pkt:", server_dummy_pkt)
first_incoming_pkt_time = trace[np.where(trace[:,1] <0)][0][0]
#This is to find the last pkt time of first trace. We cant use last pkt time of the whole trace for MP
last_pkt_time = trace[np.where(abs(trace[:,1]) == 1 )][-1][0]
# print("last_pkt_time",last_pkt_time)
client_timetable = getTimestamps(client_wnd, client_dummy_pkt)
client_timetable = client_timetable[ | np.where(start_padding_time+client_timetable[:,0] <= last_pkt_time) | numpy.where |
import pytest
import numpy as np
from keras.utils.test_utils import get_test_data
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
input_dim = 5
hidden_dims = 5
num_train = 100
num_test = 50
num_class = 3
batch_size = 32
epochs = 1
verbosity = 0
optim = 'adam'
loss = 'categorical_crossentropy'
np.random.seed(42)
(X_train, y_train), (X_test, y_test) = get_test_data(
num_train=num_train, num_test=num_test, input_shape=(input_dim,),
classification=True, num_classes=num_class)
def build_fn_clf(hidden_dims):
model = Sequential()
model.add(Dense(input_dim, input_shape=(input_dim,)))
model.add(Activation('relu'))
model.add(Dense(hidden_dims))
model.add(Activation('relu'))
model.add(Dense(num_class))
model.add(Activation('softmax'))
model.compile(optimizer='sgd', loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def test_classify_build_fn():
clf = KerasClassifier(
build_fn=build_fn_clf, hidden_dims=hidden_dims,
batch_size=batch_size, epochs=epochs)
assert_classification_works(clf)
assert_string_classification_works(clf)
def test_classify_class_build_fn():
class ClassBuildFnClf(object):
def __call__(self, hidden_dims):
return build_fn_clf(hidden_dims)
clf = KerasClassifier(
build_fn=ClassBuildFnClf(), hidden_dims=hidden_dims,
batch_size=batch_size, epochs=epochs)
assert_classification_works(clf)
assert_string_classification_works(clf)
def test_classify_inherit_class_build_fn():
class InheritClassBuildFnClf(KerasClassifier):
def __call__(self, hidden_dims):
return build_fn_clf(hidden_dims)
clf = InheritClassBuildFnClf(
build_fn=None, hidden_dims=hidden_dims,
batch_size=batch_size, epochs=epochs)
assert_classification_works(clf)
assert_string_classification_works(clf)
def assert_classification_works(clf):
clf.fit(X_train, y_train, batch_size=batch_size, epochs=epochs)
score = clf.score(X_train, y_train, batch_size=batch_size)
assert np.isscalar(score) and np.isfinite(score)
preds = clf.predict(X_test, batch_size=batch_size)
assert preds.shape == (num_test, )
for prediction in np.unique(preds):
assert prediction in range(num_class)
proba = clf.predict_proba(X_test, batch_size=batch_size)
assert proba.shape == (num_test, num_class)
assert np.allclose(np.sum(proba, axis=1), np.ones(num_test))
def assert_string_classification_works(clf):
string_classes = ['cls{}'.format(x) for x in range(num_class)]
str_y_train = np.array(string_classes)[y_train]
clf.fit(X_train, str_y_train, batch_size=batch_size, epochs=epochs)
score = clf.score(X_train, str_y_train, batch_size=batch_size)
assert np.isscalar(score) and np.isfinite(score)
preds = clf.predict(X_test, batch_size=batch_size)
assert preds.shape == (num_test, )
for prediction in np.unique(preds):
assert prediction in string_classes
proba = clf.predict_proba(X_test, batch_size=batch_size)
assert proba.shape == (num_test, num_class)
assert np.allclose(np.sum(proba, axis=1), | np.ones(num_test) | numpy.ones |
'''Reinforcement learning (RL) environment for the pegs on disks domain.'''
# python
import os
import fnmatch
from copy import copy
from time import sleep, time
# scipy
from scipy.io import loadmat
from matplotlib import pyplot
from scipy.spatial import cKDTree
from numpy.linalg import inv, norm
from numpy.random import choice, rand, randint, randn, uniform
from numpy import arccos, argmax, argmin, array, arange, cos, dot, eye, hstack, logical_or, mean, \
pi, power, repeat, reshape, sin, sqrt, sum, vstack, zeros
# openrave
import openravepy
# self
import point_cloud
from rl_environment import RlEnvironment
from hand_descriptor import HandDescriptor
class RlEnvironmentPegsOnDisks(RlEnvironment):
def __init__(self, params):
'''Initializes openrave environment, parameters, and first episode.
- Input params: System parameters data structure.
'''
RlEnvironment.__init__(self, params)
# parameters
self.nObjects = params["nObjects"]
self.nSupportObjects = params["nSupportObjects"]
self.objectFolder = params["objectFolder"]
self.supportObjectFolder = params["supportObjectFolder"]
self.placeOrientTolerance = self.params["placeOrientTolerance"]
self.placeHeightTolerance = self.params["placeHeightTolerance"]
self.rewardCapGrasps = self.params["rewardCapGrasps"]
self.colors = array([ \
(1.0, 0.0, 0.0, 0.5), (0.0, 1.0, 0.0, 0.5), (0.0, 0.0, 1.0, 0.5), (0.0, 1.0, 1.0 ,0.5),
(1.0, 0.0, 1.0, 0.5), (1.0, 1.0, 0.0, 0.5), (0.5, 1.0, 0.0, 0.5), (0.5, 0.0, 1.0, 0.5),
(0.0, 0.5, 1.0, 0.5), (1.0, 0.5, 0.0, 0.5), (1.0, 0.0, 0.5, 0.5), (0.0, 1.0, 0.5, 0.5) ])
self.pointToRealRadiusError = 0.0001
# initialization
self.InitializeHandRegions()
self.objectFileNames = os.listdir(self.objectFolder)
self.objectFileNames = fnmatch.filter(self.objectFileNames, "*.dae")
self.supportObjectFileNames = os.listdir(self.supportObjectFolder)
self.supportObjectFileNames = fnmatch.filter(self.supportObjectFileNames, "*.dae")
# internal state
self.objects = []
self.supportObjects = []
self.ResetEpisode()
def GenerateCylinderMesh(self, heightMinMax, radiusMinMax, name):
'''Generates a cylinder and saves it into a CAD model file.
- Input heightMinMax: Tuple specifying range (min, max) from which to select cylinder height.
- Input radiusMinmax: Tuple specifying range (min, max) from which to select cylinder radius.
- Input name: String name of object; also determines name of file to save.
- Returns body: Handle to the openrave object, added to the environment.
'''
# create object
height = uniform(heightMinMax[0], heightMinMax[1])
radius = uniform(radiusMinMax[0], radiusMinMax[1])
geomInfo = openravepy.KinBody.Link.GeometryInfo()
geomInfo._type = openravepy.KinBody.Link.GeomType.Cylinder
geomInfo._vGeomData = [radius, height]
geomInfo._vDiffuseColor = self.colors[randint(len(self.colors))]
body = openravepy.RaveCreateKinBody(self.env, "")
body.InitFromGeometries([geomInfo])
body.SetName(name)
body.height = height
body.radius = radius
self.env.Add(body, True)
# save mesh file
self.env.Save(name + ".dae", openravepy.Environment.SelectionOptions.Body, name)
print("Saved " + name + ".")
return body
def GetArtificialCloud(self):
'''Concatenates point cloud data from all objects and support objects.
- Returns cloud: Point cloud in the base/world reference frame.
'''
clouds = []
objects = self.supportObjects + self.objects
for obj in objects:
cloud = point_cloud.Transform(obj.GetTransform(), obj.cloud)
clouds.append(cloud)
return vstack(clouds)
def IsPegGrasp(self, descriptor):
'''Checks if, when the hand is placed at the descriptor's pose and closed, a grasp takes place.
A grasp must be (1) collision-free (2) contain exactly 1 peg's geometry, (3) contain the
cylinder's axis, and (4) not contact the side and cap of the cylinder.
- Input descriptor: HandDescriptor object of the target hand pose.
- Returns graspedObject: The handle of the grasped object if a cylinder can be grasped from the
target hand pose; otherwise None.
- Returns isCapGrasp: True if this is a good grasp and each finger contacts the bottom/top of
the peg.
'''
# check collision
collision, objCloudsInHandFrame = self.IsRobotInCollision(descriptor)
if collision: return None, False
# check intersection of exactly 1 object
graspedObject = None; pointsInHand = None
for i, obj in enumerate(self.objects):
X = point_cloud.FilterWorkspace(self.handClosingRegion, objCloudsInHandFrame[i])
intersect = X.size > 0
if intersect:
if graspedObject is None:
graspedObject = obj
pointsInHand = X
else:
# intersection of multiple objects
return None, False
if graspedObject is None:
# intersection of no objects
return None, False
# A cylinder can only be upright or on the side. We handle these two cases separately.
bTo = graspedObject.GetTransform()
if self.IsPegUpright(graspedObject):
# Top-center of cylinder in the hand is necessary and sufficient.
bp = copy(bTo[0:3, 3])
bp[2] += graspedObject.height / 2.0
hp = point_cloud.Transform(inv(descriptor.T), array([bp]))
hP = point_cloud.FilterWorkspace(self.handClosingRegion, hp)
if hP.size == 0:
return None, False
return graspedObject, False
# Cylinder is on its side.
# check if finger tips are below cylinder axis
cylinderZ = bTo[2, 3]
fingerZ = descriptor.center[2] - descriptor.depth / 2.0
if fingerZ > cylinderZ:
return None, False
# make sure cylinder caps are not in hand
contactIdxs = array([argmax(pointsInHand[:, 1]), argmin(pointsInHand[:, 1])])
contacts = pointsInHand[contactIdxs, :]
oX = point_cloud.Transform(dot(inv(bTo), descriptor.T), pointsInHand)
capIdxs = sum(power(oX[:, 0:2], 2), 1) < (graspedObject.radius - self.pointToRealRadiusError)**2
capIdxs = capIdxs.flatten()
nContactsOnCap = sum(capIdxs[contactIdxs])
if nContactsOnCap == 1 or sum(power(contacts[0, 0:2] - contacts[1, 0:2], 2)) < \
(min(2 * graspedObject.radius, graspedObject.height) - 2 * self.pointToRealRadiusError)**2:
# 1 finger contacts cap, other finger contacts side
return None, False
# side grasp is good
return graspedObject, nContactsOnCap == 2
def IsRobotInCollision(self, descriptor):
'''Checks collision between the robot and the world.
- Input descriptor: HandDescriptor object for the current hand pose.
- Returns: True if in collision and False otherwise.
- Returns objCloudsInHandFrame: List of point clouds, one for each object, in the descriptor
reference frame. Or, None if a collision is detected. (This is to avoid performing transforms
of all object clouds twice.)
'''
# ODE misses several box-cylinder collisions. So we have to implement this ourselves.
# check collision with table
bX = point_cloud.Transform(descriptor.T, self.externalHandPoints)
if (bX[:, 2] < self.GetTableHeight()).any():
return True, None
# some preparation
hTb = inv(descriptor.T)
self.robot.SetTransform(descriptor.T) # for visualization only
objects = self.objects + self.supportObjects
objCloudsInHandFrame = []
# check if any object points intersect hand collision geometry
for i, obj in enumerate(objects):
bTo = obj.GetTransform()
hX = point_cloud.Transform(dot(hTb, bTo), obj.cloud)
X = point_cloud.FilterWorkspace(self.handFingerRegionL, hX)
if X.size > 0: return True, None
X = point_cloud.FilterWorkspace(self.handFingerRegionR, hX)
if X.size > 0: return True, None
X = point_cloud.FilterWorkspace(self.handTopRegion, hX)
if X.size > 0: return True, None
if i < len(self.objects): objCloudsInHandFrame.append(hX)
return False, objCloudsInHandFrame
def InitializeHandRegions(self):
'''Determines hand geometry, in the descriptor reference frame, for collision checking. Should
be called once at initialization.
'''
# find default descriptor geometry
desc = HandDescriptor(eye(4), self.params["imP"], self.params["imD"], self.params["imW"])
# important reference points
topUp = desc.top + (desc.height / 2) * desc.axis
topDn = desc.top - (desc.height / 2) * desc.axis
BtmUp = desc.top + (desc.height / 2) * desc.axis
BtmDn = desc.top - (desc.height / 2) * desc.axis
# cuboids representing hand regions, in workspace format
self.handClosingRegion = [
(-desc.height / 2, desc.height / 2),
(-desc.width / 2, desc.width / 2),
(-desc.depth / 2, desc.depth / 2)]
self.handFingerRegionL = [
(-desc.height / 2, desc.height / 2),
(-desc.width / 2 - 0.01, -desc.width / 2),
(-desc.depth / 2, desc.depth / 2)]
self.handFingerRegionR = [
(-desc.height / 2, desc.height / 2),
(desc.width / 2, desc.width / 2 + 0.01),
(-desc.depth / 2, desc.depth / 2)]
self.handTopRegion = [
(-desc.height / 2, desc.height / 2),
(-desc.width / 2 - 0.01, desc.width / 2 + 0.01),
(desc.depth / 2, desc.depth / 2 + 0.01)]
# find corners of hand collision geometry
self.externalHandPoints = array([ \
topUp + ((desc.width / 2) + 0.01) * desc.binormal,
topUp - ((desc.width / 2) + 0.01) * desc.binormal,
topDn + ((desc.width / 2) + 0.01) * desc.binormal,
topDn - ((desc.width / 2) + 0.01) * desc.binormal,
BtmUp + ((desc.width / 2) + 0.01) * desc.binormal,
BtmUp - ((desc.width / 2) + 0.01) * desc.binormal,
BtmDn + ((desc.width / 2) + 0.01) * desc.binormal,
BtmDn - ((desc.width / 2) + 0.01) * desc.binormal, ])
def IsPegUpright(self, obj):
'''Returns True iff the peg's axis is (nearly) normal to the table plane. In this environment it
can be only be normal or orthogonal.'''
return abs(obj.GetTransform()[2, 2]) > 0.9
def PerformGrasp(self, descriptor, cloud):
'''Tests for and simulates a grasp. If an object is grasped, self.holdingObject is set.
- Input descriptor: Pose of the grasp.
- Input cloud: Point cloud of the current scene, in the base/world frame (excluding table).
- Returns reward: -1 if grasping a placed object, 1 if grasping an unplaced object, and 0 otherwise.
'''
self.holdingObject, isCapGrasp = self.IsPegGrasp(descriptor)
if not self.holdingObject:
if self.params["showSteps"]:
raw_input("Grasp failed.")
return 0.0
if self.params["showSteps"]:
raw_input("Grasp succeeded.")
# generate grasp image
descriptor.GenerateHeightmap(cloud, self.GetTableHeight())
self.holdingDescriptor = descriptor
# simulate object movement when hand closes
self.SimulateObjectMovementOnClose(descriptor, self.holdingObject, isCapGrasp)
# move to holding pose
self.MoveHandToHoldingPose()
self.MoveObjectToHandAtGrasp(descriptor.T, self.holdingObject)
# compute reward
if self.holdingObject in self.placedObjects:
del self.placedObjects[self.holdingObject]
return -1.0
if not self.rewardCapGrasps and isCapGrasp:
return 0.0
return 1.0
def PerformPlace(self, descriptor):
'''Places the object and computes the appropriate reward. If place is not good, the object gets
removed from the environment, as its resulting state is hard to determine. Assumes robot and
object are at the holding pose.
- Input descriptor: Location of the hand at place.
- Returns reward: 1 if place is on an unoccupied disk and 0 otherwise.
'''
# move object to hand at place
bTg = self.robot.GetTransform()
self.MoveHandToPose(descriptor.T)
self.MoveObjectToHandAtGrasp(bTg, self.holdingObject)
self.MoveHandToHoldingPose()
# no longer holding an object
placedObject = self.holdingObject
self.holdingObject = None
self.holdingDescriptor = None
# check if peg is vertical
bTo = placedObject.GetTransform()
if abs(dot(bTo[0:3, 2], array([0.0, 0.0, 1.0]))) < 1.0 - self.placeOrientTolerance:
self.PlaceFailed(placedObject)
return 0.0
# check if peg is entirely over a disk
supportObject = None
for disk in self.supportObjects:
diskXY = disk.GetTransform()[0:2, 3]
if sum(power(diskXY - bTo[0:2, 3], 2)) < (disk.radius - placedObject.radius)**2:
supportObject = disk
break
# not above any disk
if supportObject is None:
self.PlaceFailed(placedObject)
return 0.0
# support object is already occupied
if supportObject in self.placedObjects.values():
self.PlaceFailed(placedObject)
return 0.0
# check if height is good
supportTopZ = supportObject.GetTransform()[2, 3] + supportObject.height / 2.0
objectBottomZ = placedObject.GetTransform()[2, 3] - placedObject.height / 2.0
if objectBottomZ < supportTopZ - self.placeHeightTolerance[0] or \
objectBottomZ > supportTopZ + self.placeHeightTolerance[1]:
self.PlaceFailed(placedObject)
return 0.0
# check if hand is in collision
collision, cloudsInHandFrame = self.IsRobotInCollision(descriptor)
if collision:
self.PlaceFailed(placedObject)
return 0.0
# place is good
if self.params["showSteps"]:
raw_input("Placed object successfully.")
self.placedObjects[placedObject] = supportObject
return 1.0
def PlaceObjects(self, isSupportObjects, maxPlaceAttempts=10,
workspace=((-0.18, 0.18), (-0.18, 0.18))):
'''Chooses and places objects randomly on the table.
- Input isSupportObjects: Are the objects support objects (i.e. disks)?
- Input maxPlaceAttempts: Maximum number of times to place an object collision-free. If exceeded,
the object will be placed in collision with some already placed object(s).
- Input workspace: Area to place objects in, [(minX, maxX), (minY, maxY)]. Center of objects will
not be outside of these bounds.
- Returns None.
'''
# support object / graspable object
if isSupportObjects:
nObjects = self.nSupportObjects
folderName = self.supportObjectFolder
fileNames = self.supportObjectFileNames
else:
nObjects = self.nObjects
folderName = self.objectFolder
fileNames = self.objectFileNames
# select file(s)
fileIdxs = choice(len(fileNames), size=nObjects, replace=False)
objectHandles = []
# add objects
for i in xrange(nObjects):
# choose a random object from the folder
objectName = fileNames[fileIdxs[i]]
# load object
self.env.Load(folderName + "/" + objectName)
shortObjectName = objectName[:-4]
body = self.env.GetKinBody(shortObjectName)
# load points, height, and radius
data = loadmat(folderName + "/" + shortObjectName + ".mat")
body.cloud = data["cloud"]
body.height = data["height"]
body.radius = data["radius"]
# select pose for object
for j in xrange(maxPlaceAttempts):
# choose orientation
r1 = 0.0 if isSupportObjects else choice([pi / 2.0, 0.0], p=[2.0 / 3.0, 1.0 / 3.0])
r2 = uniform(0, 2.0 * pi)
R1 = openravepy.matrixFromAxisAngle([1.0, 0.0, 0.0], r1)
R2 = openravepy.matrixFromAxisAngle([0.0, 1.0, 0.0], r2) if r1 > 0 else eye(4)
# choose xy position
xy = array([ \
uniform(workspace[0][0], workspace[0][1]),
uniform(workspace[1][0], workspace[1][1])])
# set height
z = body.height / 2.0 if r1 == 0 else copy(body.radius)
z += self.GetTableHeight()
# set transform
T = eye(4)
T[0:2, 3] = xy
T[2, 3] = z
T = dot(T, dot(R1, R2))
body.SetTransform(T)
if not self.env.CheckCollision(body): break
# add to environment
objectHandles.append(body)
if isSupportObjects:
self.supportObjects += objectHandles
else:
self.objects += objectHandles
def PlaceFailed(self, placedObject):
'''Helper function to be called if a successful place condition is not met.'''
if self.params["showSteps"]:
raw_input("Place failed.")
self.objects.remove(placedObject)
self.env.Remove(placedObject)
def ResetEpisode(self):
'''Resets all internal variables pertaining to a particular episode, including objects placed.'''
self.RemoveObjectSet(self.objects)
self.RemoveObjectSet(self.supportObjects)
self.objects = []
self.supportObjects = []
self.holdingObject = None
self.holdingDescriptor = None
self.placedObjects = {}
def SimulateObjectMovementOnClose(self, descriptor, obj, isCapGrasp):
'''The object can move when the fingers close during a grasp.
This sets the object to an approximation to the correct resultant pose.
- Input descriptor: Grasp pose. Assumes this is a valid grasp.
- Input obj: The object being grasped.
- Returns None.
'''
# get object pose in hand frame
bTo = obj.GetTransform()
hTb = inv(descriptor.T)
hTo = dot(hTb, bTo)
if self.IsPegUpright(obj):
# Top grasp. Simply set the y-position to 0.
hTo[1, 3] = 0
elif isCapGrasp:
# Side grasp where fingers contact peg caps.
# Set y = 0 at center point.
hTo[1, 3] = 0
# Set the orientation to be horizontal in hand.
zAxis = hTo[0:2, 2] if hTo[1, 2] >= 0 else -hTo[0:2, 2]
angle = arccos(dot(zAxis, array([0.0, 1.0])) / | norm(zAxis) | numpy.linalg.norm |
"""
This is the main code for P-CRITICAL on Loihi.
The NxPCritical class provides the input and reservoir layers of a liquid state machine.
Output is time-binned on the lakemonts and returned through a snip channel.
Usage examples are available on the scripts directory.
"""
import os
import logging
from time import sleep
from enum import IntEnum
import numpy as np
import networkx as netx
from quantities import ms
from scipy.sparse import coo_matrix
import nxsdk.api.n2a as nx
from nxsdk.arch.n2a.n2board import N2Board
from nxsdk.graph.monitor.probes import SpikeProbeCondition, IntervalProbeCondition
from tqdm import trange
_SCALING_FACTOR = 256
_logger = logging.getLogger(__name__)
def rescale(var, dt):
"""
Rescale variable to fit dt, based on quantities library
:param var: Variable to rescale
:param dt: Time steps
:return: Rescaled integer
"""
return (var.rescale(dt.units) / dt).magnitude.astype(int).item()
def calc_minimum_number_of_cores(nb_of_nodes, nb_of_conn):
"""Calc an approximate minimum number of loihi neuro cores required"""
MAX_NEURONS_PER_CORE = 1024
MAX_CONN_PER_CORE = 10 * MAX_NEURONS_PER_CORE
neuron_bounded = nb_of_nodes / MAX_NEURONS_PER_CORE
conn_bounded = nb_of_conn / MAX_CONN_PER_CORE
return int(np.ceil(max(neuron_bounded, conn_bounded)))
class NxPCritical(object):
class PairWeightMode(IntEnum):
BIN_SIZE_SYNC = 1 << 0
MEAN_VALUE = 1 << 1
HALF_VTH = 1 << 2
def __init__(
self,
topology: netx.DiGraph,
input_dim: int,
nb_of_conn_per_input: int = 1,
alpha=2,
beta=0.25,
tau_v=40 * ms,
tau_i=5 * ms,
v_th=1.0,
refractory_period=2 * ms,
dt=1 * ms,
tau_v_pair=None,
tau_i_pair=None,
bin_size=60 * ms,
pair_weight_mode: PairWeightMode = PairWeightMode.HALF_VTH,
network=None,
debug=False,
get_power_eff=False,
power_eff_input_freq=None,
):
self.net = nx.NxNet() if network is None else network
self.board = None
self.topology = topology
self.number_of_neurons = topology.number_of_nodes()
self.pair_weight_mode = pair_weight_mode
self.debug = debug
self.get_power_eff = get_power_eff
self.input_dim = input_dim
if get_power_eff:
assert not debug, "Can't get power efficiency in debug mode"
assert power_eff_input_freq is not None
self.power_eff_input_freq = rescale(power_eff_input_freq, 1 / dt)
# Rescale variables for Loihi
refractory_period = rescale(refractory_period, dt)
v_decay = int(2 ** 12 * (1 - np.exp(-1 / rescale(tau_v, dt))))
c_decay = int(2 ** 12 * (1 - np.exp(-1 / rescale(tau_i, dt))))
v_decay_pair = (
v_decay
if tau_v_pair is None
else int(2 ** 12 * (1 - np.exp(-1 / rescale(tau_v_pair, dt))))
)
c_decay_pair = (
c_decay
if tau_i_pair is None
else int(2 ** 12 * (1 - np.exp(-1 / rescale(tau_i_pair, dt))))
)
v_th = int(v_th * _SCALING_FACTOR)
self.bin_size = rescale(bin_size, dt)
build_neuron_nargs = {
"nb_of_neurons": topology.number_of_nodes(),
"nb_of_synapses": topology.number_of_edges(),
"nb_inputs": nb_of_conn_per_input * input_dim,
"v_decay": v_decay,
"c_decay": c_decay,
"v_decay_pair": v_decay_pair,
"c_decay_pair": c_decay_pair,
"v_th": v_th,
"refractory_period": refractory_period,
"alpha": alpha,
}
build_synapse_nargs = {
"topology": topology,
"alpha": alpha,
"beta": beta,
}
if get_power_eff:
cores_left = 128 # For one full loihi chip
self.nb_replicas = 0
while True:
self.nb_replicas += 1
build_neuron_nargs["starting_core"] = 128 - cores_left
nb_cores_used = self._build_neurons(**build_neuron_nargs)
self._build_synapses(**build_synapse_nargs)
cores_left -= nb_cores_used
if cores_left < nb_cores_used:
break
else:
self._build_neurons(**build_neuron_nargs)
self._build_synapses(**build_synapse_nargs)
self._build_fake_probes() # For snips bin-counters
self._build_input_gen(
nb_neurons=topology.number_of_nodes(),
input_dim=input_dim,
nb_of_conn_per_input=nb_of_conn_per_input,
)
self.weight_probe = self.connections.probe(
[nx.ProbeParameter.SYNAPSE_WEIGHT],
probeConditions=[IntervalProbeCondition(dt=self.bin_size)],
)
if debug:
(self.spike_probe,) = self.grp.probe([nx.ProbeParameter.SPIKE])
(self.pair_spike_probe,) = self._pair_grp.probe([nx.ProbeParameter.SPIKE])
self.tag_probe = self.connections.probe(
[nx.ProbeParameter.SYNAPSE_TAG],
probeConditions=[IntervalProbeCondition(dt=self.bin_size)],
)
def _build_board(self):
if self.board is not None:
self.board.disconnect()
compiler = nx.N2Compiler()
self.board = compiler.compile(self.net)
# self.board.sync = True # TODO Validate
self._build_snips()
def __enter__(self):
return self
def __exit__(self, *_):
if self.board is not None:
self.board.disconnect()
if self.net is not None:
self.net.disconnect()
def power_efficiency_run(self, duration: int):
"""Run a simulation for duration timesteps and return power profile dictionary"""
with self:
self._build_board()
buffer_size = 1024 * 2 # from characterization.py
self.energy_probe = self.board.probe(
probeType=nx.ProbeParameter.ENERGY,
probeCondition=nx.PerformanceProbeCondition(
tStart=1,
tEnd=duration,
bufferSize=buffer_size,
binSize=int(np.power(2, np.ceil(np.log2(duration / buffer_size)))),
),
)
self.board.run(duration)
self.board.finishRun()
power_profile_stats = self.board.energyTimeMonitor.powerProfileStats
power_profile_stats["nb_replicas"] = self.nb_replicas
return power_profile_stats
def __call__(self, spike_trains: np.ndarray = None, nb_of_bins=None):
current_time = 0
spike_times = [[] for _ in range(self.input_dim)]
sample_start_times = []
for spike_train in spike_trains:
sample_start_times.append(current_time)
# Pad spike_trains to match bin size
if (spike_train.shape[-1] % self.bin_size) != 0:
padding = self.bin_size - (spike_train.shape[-1] % self.bin_size)
spike_train = np.pad(
spike_train,
((0, 0), (0, padding)),
mode="constant",
constant_values=0,
)
sample_duration = spike_train.shape[-1]
for i, ts in enumerate(spike_train):
current_ts = np.flatnonzero(ts) + current_time
spike_times[i] += current_ts.tolist()
current_time += sample_duration
duration = current_time
nb_samples = len(spike_trains)
self.spike_gen.addSpikes(list(range(self.input_dim)), spikeTimes=spike_times)
self._build_board()
if self.pair_weight_mode & (
self.PairWeightMode.MEAN_VALUE | self.PairWeightMode.HALF_VTH
):
self.board.run(duration * nb_samples, aSync=True)
sample_start_times = np.asarray(sample_start_times)
next_run = 0
sync_every = 100
all_bins = []
for i, t in enumerate(range(0, duration, self.bin_size)):
if self.pair_weight_mode & self.PairWeightMode.BIN_SIZE_SYNC:
if i < 10: # For the first 10 bin_size duration, sync every bin
self.board.run(self.bin_size)
self._update_weights()
elif next_run <= i: # After, sync weights less frequently
while not self.board.isRunComplete():
sleep(0.1)
self._update_weights()
self.board.run(sync_every * self.bin_size, aSync=True)
next_run = sync_every + i
if t + sync_every * self.bin_size >= duration:
sync_every = (duration - t) // self.bin_size
_logger.info("Reading from channel at bin %i", i)
buff = self.spike_cntr_channel.read(1)
_logger.info("Channel read success")
buff = b"".join(
[i.to_bytes(4, "little") for i in buff]
) # Convert int32 to uin8
bins = np.frombuffer(buff, dtype=np.uint8)
all_bins.append(bins)
# Format bin back to samples
binned_output = np.zeros((nb_samples, self.number_of_neurons, nb_of_bins))
prev_id = None
current_bin = 0
i = 0
for t in range(self.bin_size, duration, self.bin_size):
sample_id = np.max(np.where(t > sample_start_times))
if sample_id != prev_id:
current_bin = 0
prev_id = sample_id
binned_output[sample_id, :, current_bin] = all_bins[i]
current_bin += 1
i += 1
self.board.finishRun()
self._update_weights()
return binned_output
def _build_input_gen(self, nb_neurons, input_dim, nb_of_conn_per_input):
if self.get_power_eff:
# Create an input neuron for power/time efficiency as spike injector are time costly
# That will spike at some specific frequency
neuron_spikegen_param = nx.CompartmentPrototype(
biasMant=self.power_eff_input_freq,
biasExp=6,
compartmentVoltageDecay=0,
vThMant=1000,
)
self.spike_gen = self.net.createCompartmentGroup(
size=input_dim, prototype=neuron_spikegen_param
)
else:
self.spike_gen = self.net.createSpikeGenProcess(numPorts=input_dim)
input_proto = nx.ConnectionPrototype(weight=128, weightExponent=6,)
pre = np.arange(input_dim * nb_of_conn_per_input) % input_dim
post = (
np.random.permutation(max(input_dim, nb_neurons) * nb_of_conn_per_input)[
: input_dim * nb_of_conn_per_input
]
% nb_neurons
)
connection_mask = np.zeros((input_dim, nb_neurons), dtype=np.int)
connection_mask[pre, post] = 1
connection_mask = coo_matrix(connection_mask)
self.spike_gen.connect(
self.grp, prototype=input_proto, connectionMask=connection_mask.T
)
def read_weights(self):
weights = [self.weight_probe[i][0].data for i in range(len(self.weight_probe))]
weights = np.asarray(weights)
return weights
def adj_matrix(self):
weights = self.read_weights()
last_recorded = weights[:, -1]
cmask = self.connection_mask_grp_to_pair.todense().T
nonzeros = cmask != 0
cmask[nonzeros] = last_recorded
return cmask.astype(float) / _SCALING_FACTOR
def _update_weights(self):
# TODO: Would be faster in snips
weights = self.read_weights()
last_recorded = weights[:, -1]
# Update local weights from probe output
self.connections.setSynapseState("weight", last_recorded[:, None].tolist())
if self.pair_weight_mode & self.PairWeightMode.BIN_SIZE_SYNC:
# Update pair weights
cmask = self.connection_mask_grp_to_pair.todense().T
nonzeros = cmask != 0
cmask[nonzeros] = last_recorded
cmask = cmask.T
self._grp_to_pair.setSynapseState(
"weight", cmask[nonzeros][0, :, None].tolist()
)
# weights = self.connections.getSynapseState("weight")
# synapses = self.board.n2Chips[0].n2Cores[0].synapses
# weights = [synapses[i].Wgt for i in range(len(synapses.data))]
def _build_neurons(
self,
nb_of_neurons,
nb_of_synapses,
nb_inputs,
v_decay,
c_decay,
v_decay_pair,
c_decay_pair,
v_th,
refractory_period,
alpha,
starting_core=64, # Since we use lmt 0, starting with core 64 reduces barrier sync region
):
proto_args = {
"biasMant": 0,
"biasExp": 0,
"vThMant": v_th,
"compartmentVoltageDecay": v_decay,
"refractoryDelay": refractory_period,
"enableSpikeBackprop": 1,
"enableSpikeBackpropFromSelf": 1,
"compartmentCurrentDecay": c_decay,
"thresholdBehavior": nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET,
"logicalCoreId": 0,
}
proto = nx.CompartmentPrototype(**proto_args)
self.grp = self.net.createCompartmentGroup(size=nb_of_neurons, prototype=proto)
proto_args["vThMant"] = v_th + alpha
if self.pair_weight_mode & self.PairWeightMode.HALF_VTH:
proto_args["vThMant"] += 0.5 * v_th
proto_args["compartmentVoltageDecay"] = v_decay_pair
proto_args["compartmentCurrentDecay"] = c_decay_pair
proto_args["refractoryDelay"] = max(1, refractory_period - 2)
proto_pair = nx.CompartmentPrototype(**proto_args)
self._pair_grp = self.net.createCompartmentGroup(
size=nb_of_neurons, prototype=proto_pair
)
nb_of_cores = calc_minimum_number_of_cores(
nb_of_neurons * 2 + nb_inputs, nb_of_synapses * 2 + nb_inputs
)
_logger.info("Using %i cores" % nb_of_cores)
if nb_of_cores > 64:
starting_core = 0
## TODO: Could be more optimal for nb_of_cores > 16
main_neurons_per_core = int(np.ceil(nb_of_neurons / nb_of_cores))
for i, compartment in enumerate(self.grp):
core = i // main_neurons_per_core + starting_core
compartment.logicalCoreId = core
self._pair_grp[i].logicalCoreId = core
return nb_of_cores
def _build_synapses(self, topology, alpha, beta):
number_of_neurons = topology.number_of_nodes()
weight_matrix = (
netx.adjacency_matrix(topology).tocoo() * _SCALING_FACTOR
).astype(
int
) # Scale sparse weight matrix
_logger.info("number_of_neurons: %i", number_of_neurons)
_logger.info("number_of_synapses: %i", topology.number_of_edges())
_logger.info("Average initial weight: %.3f", np.mean(weight_matrix.data))
_logger.info(
"Average excitatory weights: %.3f",
np.mean(weight_matrix.data[weight_matrix.data > 0]),
)
_logger.info(
"Average inhibitory weights: %.3f",
np.mean(weight_matrix.data[weight_matrix.data < 0]),
)
_logger.info("Excitatory connections: %i", np.sum(weight_matrix.data > 0))
_logger.info("Inhibitory connections: %i", | np.sum(weight_matrix.data < 0) | numpy.sum |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
import sys
import ipdb
import itertools
import warnings
import shutil
import pickle
from pprint import pprint
from types import SimpleNamespace
from math import floor,ceil
from pathlib import Path
import tifffile
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
plt.switch_backend("agg")
from scipy.ndimage import zoom, label
# from scipy.ndimage.morphology import binary_dilation
from skimage.feature import peak_local_max
from skimage.segmentation import find_boundaries
from skimage.measure import regionprops
from skimage.morphology import binary_dilation
from segtools.numpy_utils import collapse2, normalize3, plotgrid
from segtools import color
from segtools.defaults.ipython import moviesave
from utils import point_matcher
import torch_models
## bash command to run this script on the cluster. replace `00x` with uniqe id.
## copy and paste this command into bash to run a job via the job management queueing system.
bashcmd = """
mkdir -p /lustre/projects/project-broaddus/denoise/flower/e01/flower3_11/
cp n2v2_flower.py /lustre/projects/project-broaddus/denoise/flower/e01/flower3_11/
srun -J flw3_10 -n 1 -c 1 --mem=128000 -p gpu --gres=gpu:1 --time=12:00:00 -e std.err.flower3_11 -o std.out.flower3_11 time python3 /lustre/projects/project-broaddus/denoise/flower/e01/flower3_11/n2v2_flower.py &
"""
bashcmd = """
mkdir -p /lustre/projects/project-broaddus/denoise/flower/e02/flower1_6/
cp n2v2_flower.py /lustre/projects/project-broaddus/denoise/flower/e02/flower1_6/
srun -J flw1_1 -n 1 -c 1 --mem=128000 -p gpu --gres=gpu:1 --time=12:00:00 -e std.err.flower1_6 -o std.out.flower1_6 time python3 /lustre/projects/project-broaddus/denoise/flower/e02/flower1_6/n2v2_flower.py &
"""
bashcmd = """
mkdir -p /lustre/projects/project-broaddus/denoise/flower/e03/flower1_1/
cp n2v2_flower.py /lustre/projects/project-broaddus/denoise/flower/e03/flower1_1/
srun -J flw1_1 -n 1 -c 1 --mem=128000 -p gpu --gres=gpu:1 --time=12:00:00 -e std.err.flower1_1 -o std.out.flower1_1 time python3 /lustre/projects/project-broaddus/denoise/flower/e03/flower1_1/n2v2_flower.py &
"""
savedir = Path('/lustre/projects/project-broaddus/denoise/flower/e03/flower1_1') #/flower3_9/')
## lightweight funcs and utils
def init_dirs(savedir):
savedir.mkdir(exist_ok=True)
(savedir/'epochs/').mkdir(exist_ok=True)
(savedir/'epochs_npy/').mkdir(exist_ok=True)
(savedir/'pimgs/').mkdir(exist_ok=True)
(savedir/'pts/').mkdir(exist_ok=True)
(savedir/'movie/').mkdir(exist_ok=True)
(savedir/'counts/').mkdir(exist_ok=True)
(savedir/'models/').mkdir(exist_ok=True)
shutil.copy2('/lustre/projects/project-broaddus/devseg_code/detect/n2v2_flower.py', savedir)
shutil.copy2('/lustre/projects/project-broaddus/devseg_code/detect/torch_models.py', savedir)
def wipe_dirs(savedir):
if savedir.exists():
shutil.rmtree(savedir)
savedir.mkdir()
# for x in (savedir/'epochs/').glob('*.png'): x.unlink()
# for x in (savedir/'rgbs/').glob('*.png'): x.unlink()
# for x in (savedir/'pimgs/').glob('*.png'): x.unlink()
# for x in (savedir/'pts/').glob('*.png'): x.unlink()
# for x in (savedir/'movie/').glob('*.png'): x.unlink()
# for x in (savedir/'counts/').glob('*.png'): x.unlink()
# for x in savedir.glob('*.png'): x.unlink()
# for x in savedir.glob('*.pdf'): x.unlink()
# for x in savedir.glob('*.pkl'): x.unlink()
# for x in savedir.glob('*.py'): x.unlink()
# for x in savedir.glob('*.npz'): x.unlink()
def cat(*args,axis=0): return np.concatenate(args, axis)
def stak(*args,axis=0): return np.stack(args, axis)
def imsave(x, name, **kwargs): return tifffile.imsave(str(name), x, **kwargs)
def imread(name,**kwargs): return tifffile.imread(str(name), **kwargs)
def pklload(name):
return pickle.load(open(name,'rb'))
def pklsave(obj,name):
par = Path(name).parent
par.mkdir(exist_ok=True,parents=True)
pickle.dump(obj,open(name,'wb'))
def i2rgb(img):
if img.shape[-1] == 1: img = img[...,[0,0,0]]
if img.shape[-1] == 2: img = img[...,[0,1,1]]
if img.shape[-1] > 3: img = img[...,None][...,[0,0,0]]
img = img.astype(np.float)
return img
def receptivefield(net):
"calculate and show the receptive field or receptive kernel"
def rfweights(m):
if type(m) == nn.Conv2d:
m.weight.data.fill_(1/(5*5)) ## conv kernel 3*5*5
m.bias.data.fill_(0.0)
net.apply(rfweights);
x0 = np.zeros((256,256)); x0[128,128]=1;
xout = net.cuda()(torch.from_numpy(x0)[None,None].float().cuda()).detach().cpu().numpy()
io.imsave(savedir/'recfield_xy.png',normalize3(xout[0,128]))
io.imsave(savedir/'recfield_xz.png',normalize3(xout[0,:,128]))
def init_weights(m):
"use as arg in net.apply()"
if type(m) == nn.Conv2d:
torch.nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))
m.bias.data.fill_(0.05)
def std_weights(m):
"use as arg in net.apply()"
if type(m) == nn.Conv3d:
print("{:.5f} {:.5f}".format(float(m.weight.std()), float(m.bias.mean())))
def random_slice(img_size, patch_size):
assert len(img_size) == len(patch_size)
def f(d,s):
if s == -1: return slice(None)
start = np.random.randint(0,d-s+1)
end = start + s
return slice(start,end)
return tuple([f(d,s) for d,s in zip(img_size, patch_size)])
## heavier meaty functions
def datagen(savedir=None):
# img = imread(f'/lustre/projects/project-broaddus/devseg_data/raw/artifacts/flower.tif')[:10]
img = imread(f'/lustre/projects/project-broaddus/denoise/flower/e02/pred_flower.tif')[:10]
# img = imread(f'/lustre/projects/project-broaddus/devseg_data/raw/artifacts/shutterclosed.tif')[0]
print(img.shape)
# pmin, pmax = np.random.uniform(1,3), np.random.uniform(99.5,99.8)
pmin, pmax = 2, 99.6
print(f"pmin = {pmin}; pmax = {pmax}")
img = normalize3(img,pmin,pmax).astype(np.float32,copy=False)
data = img.reshape((-1, 4,256,4,256)).transpose((0,1,3,2,4)).reshape((-1,1,256,256))
# patch_size = (256,256)
# slicelist = []
# def random_patch():
# ss = random_slice(img.shape, patch_size)
# ## select patches with interesting content. FIXME
# while img[ss].mean() < 0.0:
# ss = random_slice(img.shape, patch_size)
# x = img[ss].copy()
# slicelist.append(ss)
# ## augment
# # noiselevel = 0.2
# # x += np.random.uniform(0,noiselevel,(1,)*3)*np.random.uniform(-1,1,x.shape)
# # for d in [0,1,2]:
# # if np.random.rand() < 0.5:
# # x = np.flip(x,d)
# return (x,)
# data = np.array([random_patch() for _ in range(24)])
# data = np.load('../../devseg_data/cl_datagen/d003/data.npz')
print("data.shape: ", data.shape)
#SCZYX
if savedir:
rgb = collapse2(data[:,:],'scyx','s,y,x,c')[...,[0,0,0]]
rgb = normalize3(rgb)
rgb = plotgrid([rgb],10)
io.imsave(savedir/'data_xy_flower.png',rgb)
np.savez_compressed(savedir/'data_flower.npz',data=data,pmin=pmin,pmax=pmax)
# pklsave(slicelist, savedir/'slicelist2.pkl')
dg = SimpleNamespace()
dg.data = data
dg.pmin = pmin
dg.pmax = pmax
return dg
def setup(params={}):
wipe_dirs(savedir)
init_dirs(savedir)
# dg = datagen(savedir=savedir); data = dg.data;
# data = np.load('/lustre/projects/project-broaddus/devseg_data/cl_datagen/grid/data_shutter.npz')['data']
data = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/data_flower3.npz')['data']
# data = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/data_flower.npz')['data']
d = SimpleNamespace()
d.net = torch_models.Unet2_2d(16,[[1],[1]],finallayer=nn.ReLU).cuda()
d.net.load_state_dict(torch.load('/lustre/projects/project-broaddus/denoise/flower/models/net_randinit.pt'))
# d.net.apply(init_weights);
d.net2 = torch_models.Unet2_2d(16,[[1],[1]],finallayer=nn.ReLU).cuda()
d.net2.load_state_dict(torch.load('/lustre/projects/project-broaddus/denoise/flower/models/net_randinit.pt'))
# d.net2.apply(init_weights);
d.savedir = savedir
# d.net.load_state_dict(torch.load('/lustre/projects/project-broaddus/devseg_data/cl_datagen/d000/jj000/net250.pt'))
# torch.save(d.net.state_dict(), '/lustre/projects/project-broaddus/devseg_data/cl_datagen/rsrc/net_random_init_unet2.pt')
d.x1_all = torch.from_numpy(data).float().cuda()
return d
def init_training_artifacts():
ta = SimpleNamespace()
ta.losses = []
ta.lossdists = []
ta.e = 0
return ta
def train(d,ta=None,end_epoch=301):
if ta is None: ta = init_training_artifacts()
batch_size = 4
inds = np.arange(0,d.x1_all.shape[0])
# example_xs = d.x1_all[inds[::floor(np.sqrt(len(inds)))]].clone()
example_xs = d.x1_all[[0,3,5,12]].clone()
xs_fft = torch.fft((example_xs-example_xs.mean())[...,None][...,[0,0]],2).norm(p=2,dim=-1)
xs_fft = torch.from_numpy(np.fft.fftshift(xs_fft.cpu(),axes=(-1,-2))).cuda()
opt = torch.optim.Adam(d.net.parameters(), lr = 2e-5)
opt2 = torch.optim.Adam(d.net2.parameters(), lr = 2e-5)
lossdist = torch.zeros(d.x1_all.shape[0]) - 2
patch_size = d.x1_all.shape[2:]
plt.figure()
for e in range(ta.e,end_epoch):
ta.e = e
np.random.shuffle(inds)
ta.lossdists.append(lossdist.numpy().copy())
lossdist[...] = -1
print(f"\r epoch {e}", end="")
for b in range(ceil(d.x1_all.shape[0]/batch_size)):
idxs = inds[b*batch_size:(b+1)*batch_size]
x1 = d.x1_all[idxs] #.cuda()
def random_pixel_mask():
n = int(np.prod(patch_size) * 0.02)
x_inds = np.random.randint(0,patch_size[1],n)
y_inds = np.random.randint(0,patch_size[0],n)
# z_inds = np.random.randint(0,32,64*64*1)
ma = np.zeros(patch_size)
ma[y_inds,x_inds] = 2
return ma
def sparse_3set_mask(p=0.02, xs=[1,2],ys=[]):
"build random mask for small number of central pixels"
n = int(np.prod(patch_size) * p)
x_inds = np.random.randint(0,patch_size[1],n)
y_inds = np.random.randint(0,patch_size[0],n)
ma = np.zeros(patch_size)
# ma = binary_dilation(ma)
for i in xs:
m = x_inds-i >= 0; ma[y_inds[m],x_inds[m]-i] = 1
m = x_inds+i < patch_size[1]; ma[y_inds[m],x_inds[m]+i] = 1
for i in ys:
m = y_inds-i >= 0; ma[y_inds[m]-i,x_inds[m]] = 1
m = y_inds+i < patch_size[0]; ma[y_inds[m]+i,x_inds[m]] = 1
ma = ma.astype(np.uint8)
ma[y_inds,x_inds] = 2
return ma
def checkerboard_mask():
ma = np.indices(patch_size).transpose((1,2,0))
ma = np.floor(ma/(1,256)).sum(-1) %2==0
ma = 2*ma
if e%2==1: ma = 2-ma
return ma
ma = sparse_3set_mask(xs=[1,2]).astype(np.float)
ma2 = sparse_3set_mask(xs=[1,2]).astype(np.float)
# ipdb.set_trace()
## apply mask to input
ma = torch.from_numpy(ma).cuda()
x1_damaged = x1.clone()
x1_damaged[:,:,ma>0] = torch.rand(x1.shape).cuda()[:,:,ma>0]
y1p = d.net(x1_damaged)
ma2 = torch.from_numpy(ma2).cuda()
y1p_damaged = y1p.clone()
y1p_damaged[:,:,ma2>0] = torch.rand(y1p.shape).cuda()[:,:,ma2>0]
y2p = d.net2(y1p)
dims = (1,2,3) ## all dims except batch
tm1 = (ma==2).float().repeat(4,1,1,1) ## target mask
tm2 = (ma2==2).float().repeat(4,1,1,1)
loss_per_patch = (tm1 * torch.abs(y1p-x1)**2).sum(dims) / tm1.sum(dims)
loss_per_patch += (tm2 * torch.abs(y2p-y1p)**2).sum(dims) / tm2.sum(dims)
lossdist[idxs] = loss_per_patch.detach().cpu()
loss = loss_per_patch.mean()
ta.losses.append(float(loss))
opt.zero_grad()
opt2.zero_grad()
loss.backward()
opt.step()
opt2.step()
## predict on examples and save each epoch
with torch.no_grad():
example_yp = d.net(example_xs)
example_yp2 = d.net2(example_yp)
yp_fft = torch.fft((example_yp2 - example_yp2.mean())[...,None][...,[0,0]],2).norm(p=2,dim=-1) #.cpu().detach().numpy()
yp_fft = torch.from_numpy(np.fft.fftshift(yp_fft.cpu(),axes=(-1,-2))).cuda()
# yp_fft = yp_fft/yp_fft.max()
rgb = torch.stack([example_xs,ma.float().repeat(4,1,1,1)/2,xs_fft,example_yp2,yp_fft],0).cpu().detach().numpy()
arr = rgb.copy()
# type,samples,channels,y,x
rgb = normalize3(rgb,axs=(1,2,3,4))
rgb[[2,4]] = normalize3(rgb[[2,4]],pmin=0,pmax=99.0,axs=(1,2,3,4))
# remove channels and permute
rgb = collapse2(rgb[:,:,0],'tsyx','sy,tx')
# arr = collapse2(arr[:,:,0],'tsyx','sy,tx')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if e%10==0: io.imsave(d.savedir / f'epochs/rgb_{e:03d}.png', rgb)
if e%100==0: np.save(d.savedir / f'epochs_npy/arr_{e:03d}.npy', arr)
batches_per_epoch = ceil(d.x1_all.shape[0]/batch_size)
epochs = np.arange(len(ta.losses)) / batches_per_epoch
plt.clf()
plt.plot(epochs,ta.losses)
# plt.ylim(np.mean(ta.losses)-3*np.std(ta.losses),np.mean(ta.losses)+3*np.std(ta.losses))
plt.yscale('log')
plt.xlabel(f'1 epoch = {batches_per_epoch} batches')
plt.savefig(d.savedir/f'loss.png',dpi=300)
if e%100==0:
torch.save(d.net.state_dict(), savedir/f'models/net{e:03d}.pt')
pklsave(ta.losses,d.savedir/f'losses.pkl')
torch.save(d.net.state_dict(), d.savedir/f'models/net{ta.e:03d}.pt')
return ta
def multitrain(d):
if False:
torch.manual_seed(jj)
net.apply(init_weights);
torch.manual_seed(42)
net.load_state_dict(torch.load('/lustre/projects/project-broaddus/devseg_data/cl_datagen/rsrc/net_random_init_unet2.pt'))
np.random.seed(jj)
torch.cuda.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
lossesjj = []
jj=0
for jj in range(j,6):
d.savedir = savedir / f'jj{jj:03d}'; init_dirs(d.savedir)
ta = init_training_artifacts()
train(d,ta,100)
lossesjj.append(ta.losses)
predict_movies(d)
plt.figure()
for loss in lossesjj:
plt.plot(np.convolve(loss,np.ones(50)/50,mode='valid'),lw=1)
plt.yscale('log')
plt.savefig(savedir/'multi_losses.png',dpi=300)
## prediction and analysis
def apply_net_tiled(net,img):
"""
Applies func to image with dims Channels,Z,Y,X
"""
# borders = [8,20,20] ## border width within each patch that is thrown away after prediction
# patchshape_padded = [32,240,240] ## the size of the patch that we feed into the net. must be divisible by 8 or net fails.
# patchshape = [16,200,200] ## must be divisible by 8 to avoid artifacts.
# stride = [16,200,200] ## same as patchshape in this case
def f(n,m): return (ceil(n/m)*m)-n ## f(n,m) gives padding needed for n to be divisible by m
def g(n,m): return (floor(n/m)*m)-n ## f(n,m) gives un-padding needed for n to be divisible by m
b,c = img.shape[1:]
r,s = f(b,8),f(c,8) ## calculate extra border needed for stride % 8 = 0
YPAD,XPAD = 24,24
img_padded = np.pad(img,[(0,0),(YPAD,YPAD+r),(XPAD,XPAD+s)],mode='constant') ## pad for patch borders
output = np.zeros(img.shape)
# zs = np.r_[:a:16]
ys = np.r_[:b:200]
xs = np.r_[:c:200]
for x,y in itertools.product(xs,ys):
re,se = min(y+200,b+r), min(x+200,c+s)
be,ce = min(y+200,b), min(x+200,c)
patch = img_padded[:,y:re+2*YPAD,x:se+2*XPAD]
patch = torch.from_numpy(patch).cuda().float()
with torch.no_grad():
patch = net(patch[None])[0,:,YPAD:-YPAD,XPAD:-XPAD].detach().cpu().numpy()
output[:,y:be,x:ce] = patch[:,:be-y,:ce-x]
return output
def analyze_losses(d,ta):
plt.figure()
plt.plot(ta.losses)
plt.ylim(0,ta.losses[0])
plt.savefig(d.savedir/'loss.pdf')
## plot loss distribution trajectories
lds = ta.lossdists[1::3]
N = len(lds)
colors = color.pastel_colors_RGB(N,max_saturation=0.9,brightness=0.8,shuffle=False)
# colors = np.arange(N)[:,None][:,[0,0,0]] * (15,-15,15) + (15,240,15)
# colors = colors/255
plt.figure()
for i in np.arange(N):
plt.plot(sorted(lds[i]),'.',color=colors[i]+[0.25])
# plt.ylim(0,np.max(lds))
# plt.scatter(np.r_[0:N],np.ones(N)*1,c=colors)
plt.savefig(savedir / 'lossdist.pdf')
plt.figure()
for i in np.arange(N):
plt.plot(lds[i],'.',color=colors[i]+[0.25])
# plt.scatter(np.r_[0:N],np.ones(N)*1,c=colors)
plt.savefig(d.savedir / 'lossdist_unsorted.pdf')
def e01_fig2_flower():
# img1 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_1/epochs_npy/arr_600.npy')
# img2 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_2/epochs_npy/arr_600.npy')
# img3 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_3/epochs_npy/arr_600.npy')
# img4 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_4/epochs_npy/arr_600.npy')
# img5 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_5/epochs_npy/arr_600.npy')
img6 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_6/epochs_npy/arr_600.npy')
img7 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_7/epochs_npy/arr_600.npy')
img8 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_8/epochs_npy/arr_600.npy')
img9 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_9/epochs_npy/arr_600.npy')
img10 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_10/epochs_npy/arr_600.npy')
img11 = np.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_11/epochs_npy/arr_600.npy')
## (N2V, OURS 2class, OURS 3class) , (raw, mask, raw fft, pred, pred fft) , n_samples , channels, y , x
# rgb = stak(img1, img2, img3, img4, img5, img6, img7, img8, img9)
rgb = stak(img6, img7, img8, img9, img10, img11)
# rgb[:,[2,4]] = normalize3(rgb[:,[2,4]], pmin=0, pmax=99.0)
# rgb[:,[2,4]] = normalize3(np.log(rgb[:,[2,4]]+1e-7))
rgb[:,[2,4]] = normalize3(np.log(normalize3(rgb[:,[2,4]],0,99)+1e-7))
rgb[:,[0,3]] = normalize3(rgb[:,[0,3]])
rgb[:,1] = normalize3(rgb[:,1])
## remove channels and pad xy with white
rgb = rgb[:,:,:,0]
# rgb = np.pad(rgb,[(0,0),(0,0),(0,0),(0,1),(0,1)],mode='constant',constant_values=1)
# plt.figure()
# d = np.fft.fftshift(np.fft.fftfreq(256))
# for i,m in enumerate("N2V,OURS 2class,OURS 3class".split(',')):
# plt.plot(d,rgb[i,-1].mean((0,1)),label=f'{m} : avg s,y')
# plt.plot(d,rgb[i,-1].mean((0,2)),label=f'{m} : avg s,x')
# plt.legend()
## reshape to (raw, N2V, ours 2 class, ours 3class) , (real, fft, mask), samples, y, x
# rgb = rgb.reshape((15, 4, 256, 256))[]
rgb = cat(stak(np.zeros(rgb[0,0].shape),rgb[0,0],rgb[0,2])[None],rgb[:,[1,3,4]])
## models, types, samples, y, x
# rgb = collapse2(rgb,'mtsyx','mt,sy,x')
# rgb = rgb[[0,1,2,3,4,6,8,9,11,13,14]]
# rgb = rgb[[0,1,5,8,3,6,9,2,4,7,10,]]
# rgb = collapse2(rgb,'myx','y,mx')
# io.imsave(savedir.parent/'shutterclosed_normalized.png',rgb[:64])
np.savez_compressed('/lustre/projects/project-broaddus/denoise/flower/e01/e01_fig2_flower.npz', rgb=rgb)
return rgb
def e02_fig2_flower():
img1 = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/flower1_1/epochs_npy/arr_400.npy')
img2 = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/flower1_2/epochs_npy/arr_400.npy')
img3 = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/flower1_3/epochs_npy/arr_400.npy')
img4 = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/flower1_4/epochs_npy/arr_400.npy')
img5 = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/flower1_5/epochs_npy/arr_400.npy')
img6 = np.load('/lustre/projects/project-broaddus/denoise/flower/e02/flower1_6/epochs_npy/arr_400.npy')
rgb = stak(img1, img2, img3, img4, img5, img6)
## normalize fft and real space separately
rgb[:,[2,4]] = normalize3(np.log(normalize3(rgb[:,[2,4]],0,99)+1e-7))
rgb[:,[0,3]] = normalize3(rgb[:,[0,3]])
rgb[:,1] = normalize3(rgb[:,1])
## remove channels and pad xy with white
rgb = rgb[:,:,:,0]
# rgb = np.pad(rgb,[(0,0),(0,0),(0,0),(0,1),(0,1)],mode='constant',constant_values=1)
## reshape to (raw, N2V, ours 2 class, ours 3class) , (real, fft, mask), samples, y, x
rgb = cat(stak(np.zeros(rgb[0,0].shape),rgb[0,0],rgb[0,2])[None],rgb[:,[1,3,4]])
np.savez_compressed('/lustre/projects/project-broaddus/denoise/flower/e02/e02_fig2_flower.npz', rgb=rgb)
return rgb
def predict_full():
"make movies scrolling through z"
net = torch_models.Unet2_2d(16,[[1],[1]],finallayer=nn.ReLU).cuda()
# <NAME> (Alana) 540 692 0113
net.load_state_dict(torch.load('/lustre/projects/project-broaddus/denoise/flower/e01/flower3_6/models/net600.pt'))
img = imread(f'/lustre/projects/project-broaddus/devseg_data/raw/artifacts/flower.tif')
# pmin, pmax = np.random.uniform(1,3), np.random.uniform(99.5,99.8)
pmin, pmax = 2, 99.6
img = normalize3(img,pmin,pmax,axs=(1,2)).astype(np.float32,copy=False)
pimg = []
for x in img:
# x = torch.from_numpy(x).cuda()
# x = net(x[None])
x = apply_net_tiled(net,x[None])
pimg.append(x)
pimg = np.array(pimg)
# return img, net, pimg
# pimg = apply_net_tiled(net,img[:,None])
imsave(pimg, savedir/f'pred_flower.tif')
# rgb = cat(img, pimg[0], axis=1)
# rgb = rgb.clip(min=0)
# moviesave(normalize3(rgb), savedir/f'movie/vert{ds}_{i:03d}.mp4', rate=4)
# imsave(pimg, savedir/f'pimgs/pimg{ds}_{i:03d}.tif')
## make histogram of pimg values at points
# for name in sorted((savedir/'pimgs/').glob('*.tif')):
# pimg = imread(savedir/f'pimgs/pimg{i:03d}.tif')
## 2d rgb pngs
# imsave(pimg, savedir/f'pimg/pimg000.tif',compress=8)
# rgb1 = cat(pimg[0,:64].max(0), pimg[0,64:].max(0))[...,None]
# rgb2 = cat(img[0,:64].max(0), img[0,64:].max(0))[...,None][...,[0,0,0]]
# rgb2[...,[0]] += rgb1
# rgb2 = normalize3(rgb2)
# io.imsave(savedir/'rgbs/rgb001.png',rgb2)
def histograms():
"cumulative dist of pixel values in img and pimg"
plt.figure()
x = | np.linspace(0,100,100) | numpy.linspace |
import numpy as np
from utils import lie_algebra
def proj(x):
if x.ndim == 1:
return x[:-1] / x[-1]
elif x.ndim == 2:
return np.divide(x[:, :-1].T, x[:, -1]).T
# ----------------------------- get transformation functions -----------------------------
def getT_axisangle(x):
"""
Get the transformation matrix from the minimal representation where the angle parameters are in axis angle form.
"""
T = np.zeros([4, 4])
T[3, 3] = 1.0
T[0:3, 0:3] = lie_algebra.so3exp(x[3:6])
T[0:3, 3] = x[0:3]
return T
def getT_qt(x):
"""
Get the transformation matrix from the camera position and quaternion parameters.
"""
T = np.zeros([4, 4])
T[3, 3] = 1.0
q = Quaternion(x[3:])
T[0:3, 0:3] = q.rot_matrix()
T[0:3, 3] = x[0:3]
return T
# ---------------------------------- Quaternions -----------------------------------------------
def normalize(v, tolerance=1e-4):
mag2 = sum(n * n for n in v)
if abs(mag2 - 1.0) > tolerance:
mag = np.sqrt(mag2)
v = tuple(n / mag for n in v)
return np.array(v)
class Quaternion:
def __init__(self, q=None, axis=None, angle=None):
axis = normalize(axis) # Normalize the axis vector so we have a unit quaternion
if q is None:
self.w = np.cos(angle / 2)
self.x = np.sin(angle / 2) * axis[0]
self.y = np.sin(angle / 2) * axis[1]
self.z = np.sin(angle / 2) * axis[2]
self.q = np.array([self.w, self.x, self.y, self.z])
if q is not None:
self.q = q
def rotate(self, v):
point = np.array([0, v[0], v[1], v[2]])
return q_multiply(q_multiply(self.q, point), self.conjugate)[1:]
def conjugate(self):
return np.array([self.q[0], -self.q[1], -self.q[2], -self.q[3]])
def rot_matrix(self):
q = self.q
R = [[1 - 2 * (q[2] ** 2 + q[3] ** 2), 2 * (q[1] * q[2] - q[3] * q[0]), 2 * (q[1] * q[3] + q[2] * q[0])],
[2 * (q[1] * q[2] + q[3] * q[0]), 1 - 2 * (q[1] ** 2 + q[3] ** 2), 2 * (q[2] * q[3] - q[1] * q[0])],
[2 * (q[1] * q[3] - q[2] * q[0]), 2 * (q[2] * q[3] + q[1] * q[0]), 1 - 2 * (q[1] ** 2 + q[2] ** 2)]]
return np.array(R)
def q_multiply(q1, q2):
"""
Multiply together two quaternions
:return: product of two quaternions
"""
w1, x1, y1, z1 = q1
w2, x2, y2, z2 = q2
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2
z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2
return np.array([w, x, y, z])
# ---------------------------------------- Euler -----------------------------------------------
def eulerAnglesToRotationMatrix(theta):
"""
Calculates Rotation Matrix given euler angles.
"""
R_x = np.array([[1, 0, 0],
[0, np.cos(theta[0]), -np.sin(theta[0])],
[0, np.sin(theta[0]), np.cos(theta[0])]
])
R_y = np.array([[np.cos(theta[1]), 0, np.sin(theta[1])],
[0, 1, 0],
[-np.sin(theta[1]), 0, np.cos(theta[1])]
])
R_z = np.array([[np.cos(theta[2]), -np.sin(theta[2]), 0],
[np.sin(theta[2]), np.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, | np.dot(R_y, R_x) | numpy.dot |
# Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import json
import pickle
import numpy as np
import scipy.sparse as sp
from galileo.platform.data_source.data_source import DataSource
from galileo.platform.data_source.utils import download_url
from galileo.platform.log import log
class Planetoid(DataSource):
r'''
The citation network datasets 'Cora', 'CiteSeer' and 'PubMed'
from 'Revisiting Semi-Supervised Learning with Graph Embeddings'
<https://arxiv.org/abs/1603.08861>
Nodes represent documents and edges represent citation links.
'''
url = 'https://github.com/kimiyoung/planetoid/raw/master/data'
def __init__(self, root_dir, name, **kwargs):
super().__init__(root_dir, name, **kwargs)
@property
def raw_dir(self):
return os.path.join(self.root_dir, self.name, 'raw')
@property
def raw_file_names(self):
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph', 'test.index']
return ['ind.{}.{}'.format(self.name, name) for name in names]
def download(self):
for name in self.raw_file_names:
download_url('{}/{}'.format(self.url, name), self.raw_dir)
log.info(f'download {self.name} done')
def read_data(self):
'''
files:
x: feature vectors of training
y: one-hot labels of training
tx: feature vectors of test
ty: one-hot labels of test
allx, ally
graph: dict, neighbors of nodes
test.index: the indices of test instances in graph
'''
data = []
for path in self.raw_paths:
if path.endswith('test.index'):
data.append([int(line.strip()) for line in open(path)])
else:
with open(path, 'rb') as f:
data.append(pickle.load(f, encoding='latin1'))
x, y, tx, ty, allx, ally, graph, test_idx = tuple(data)
test_idx_range = np.sort(test_idx)
if self.name == 'citeseer':
# There are some isolated nodes in the Citeseer graph,
# resulting in none consecutive test indices.
# We need to identify them and add them as zero vectors
# to `tx` and `ty`.
min_test_idx = min(test_idx)
len_test_idx = max(test_idx) - min_test_idx + 1
tx_extended = sp.lil_matrix((len_test_idx, tx.shape[1]))
ty_extended = | np.zeros((len_test_idx, ty.shape[1])) | numpy.zeros |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from scipy.integrate import simps
def transfer_fuction(xplane2dec,control_DATA,flight_data_for_integrate):
Phi_sp = control_DATA[1]*180
Theta_sp = control_DATA[0]*180
Psi_sp = control_DATA[2]*180
h_sp = 300
sp_array = np.asarray([Phi_sp,Theta_sp,Psi_sp,h_sp])
T_roll = 1
T_pitch = 1
g = 9.8 #m/s**2
K_i = np.asarray([1e-5,1e-5, 1e-5, 1e-6])
K_p = np.asarray([0.4,0.01, 1e-10, 0.002])
K_ff = np.asarray([0.25, 0.5, 0.1, 0.08])
buffer_a = 1e-10
flight_data_for_integrate = flight_data_for_integrate.as_matrix()
difference_flight_data = sp_array - flight_data_for_integrate[:,0:4]
flight_time = flight_data_for_integrate[:,4]
integrate_roll = simps(difference_flight_data[:,0])
integrate_pitch = simps(difference_flight_data[:,1])
integrate_yaw = simps(difference_flight_data[:,2])
integrate_alt = simps(difference_flight_data[:,3])
diff_roll = np.diff(flight_data_for_integrate[:,0])
diff_pitch = np.diff(flight_data_for_integrate[:,1])
diff_yaw = np.diff(flight_data_for_integrate[:,2])
diff_alt = np.diff(flight_data_for_integrate[:,3])
diff_array = np.asarray([diff_roll[len(diff_roll)-1] , diff_pitch[len(diff_roll)-1] , diff_yaw[len(diff_roll)-1] , diff_alt[len(diff_roll)-1]])
Phi = float(xplane2dec['roll']) + buffer_a # roll
Theta = float(xplane2dec['pitch']) + buffer_a #pitch
Psi = float(xplane2dec['hding_true']) + buffer_a #yaw
u = float(xplane2dec['vX']) + buffer_a #x acxis speed
v = float(xplane2dec['vY']) + buffer_a #y acxis speed
w = float(xplane2dec['vZ']) + buffer_a #z acxis speed
p = float(xplane2dec['Q_rad/s']) + buffer_a #roll moment
q = float(xplane2dec['P_rad/s']) + buffer_a #pitch moment
r = float(xplane2dec['R_rad/s']) + buffer_a #yaw moment
h = float(xplane2dec['alt_ftagl']) + buffer_a #altitude
#moment sp
p_sp = (Phi_sp - Phi) / T_roll
q_sp = (Theta_sp - Theta) / T_pitch
r_sp = ((w*q_sp) + (g*np.sin(Phi)*np.cos(Theta)) + (u*q_sp*Phi))/((u*np.cos(Phi)*np.sin(Theta)) + (w*np.sin(Theta)))
t_sp = h_sp -h
#control
delta_a = p_sp * K_p[0] + diff_array[0]*K_ff[0]# + integrate_roll*K_i[0]
delta_e = q_sp * K_p[1] + diff_array[1]*K_ff[1]# + ( t_sp * K_p[3] + diff_array[3] * K_ff[3] + integrate_alt*K_i[3])
delta_r = control_DATA[2]
#delta_r = q_sp * K_p[2] + diff_array[2]*K_ff[2]
delta_th = control_DATA[3]#t_sp * K_p[3] + diff_array[3]*K_ff[3]
delta = [delta_e, delta_a, delta_r, delta_th]
print(delta)
return delta
def convet_matrix_posture():
R = create_state_array(Phi,Theta,Psi)
R_sp = create_state_array(Phi_sp,Theta_sp,Psi_sp)
R_z = np.asarray([R[0,2],R[1,2],R[2,2]])
R_z_sp =np.asarray([R_sp[0,2],R_sp[1,2],R_sp[2,2]])
C = np.cross(R_z,R_z_sp)
e_R = np.dot(R.T,C)
cos = np.dot(R_z , R_z_sp)
sin = np.linalg.norm(e_R)
tan = sin / cos
angle = np.arctan(tan)
axis = np.matrix(e_R) / sin
e_R = axis * angle
cp1 =np.asarray([0 , 0 , axis[0,1] ])
cp2 =np.asarray([0, 0 , -1*axis[0,0] ])
cp3 =np.asarray([-1*axis[0,1] , axis[0,0] , 0 ])
e_R_cp = np.matrix([cp1,cp2,cp3])
cos_theta = np.cos(angle)
sin_theta = np.sin(angle)
A = np.matrix([[1,0,0],[0,1,0],[0,0,1]])
R_rp =R *(A + (e_R_cp * sin + (e_R_cp*e_R_cp*(1-cos) )))
#yaw
R_x_sp = np.asarray([1,0,0])
R_x_rp = np.asarray([R_rp[0,0],R_rp[1,0],R_rp[2,0]])
sinx = np.dot(np.cross(R_x_rp,R_x_sp),R_z_sp)
cosx = np.dot(R_x_rp,R_x_sp)
tanx = sinx/cosx
yaw_w = A[2,2] * A[2,2]
e_R[0,2] = np.arctan(tanx) * yaw_w
eP = np.matrix([1,1,1])
rates_sp = [e_R[0,0]*eP[0,0],e_R[0,1]*eP[0,1],e_R[0,2]*eP[0,2]]
rates = np.matrix([Phi,Theta,Psi])
rate_err = rates_sp - rates
att_control = K_p[0:3]*np.asarray(rate_err) + K_ff[0:3] * diff_array[0:3] #+ integrate_array * K_i[0:3]
#delta = [att_control[0,0],att_control[0,1],att_control[0,2], delta_th]
#print(e_R)
#delta_2 = delta_a - float(att_control[0,1])
#print(delta_2)
return delta
def create_state_array(Phi,Theta,Psi):
F1 = np.asarray([np.cos(Theta)*np.cos(Psi), np.cos(Theta)*np.sin(Psi), -1 * np.sin(Theta)])
F2 = np.asarray([np.sin(Phi)*np.sin(Theta)*np.cos(Psi) - np.cos(Phi)*np.sin(Psi), np.sin(Phi)*np.sin(Theta)*np.sin(Psi) + np.cos(Phi)*np.cos(Psi), np.sin(Phi)*np.cos(Theta)])
F3 = np.asarray([np.cos(Phi)*np.sin(Theta)*np.cos(Psi) + np.sin(Phi)*np.sin(Psi), | np.cos(Phi) | numpy.cos |
from __future__ import print_function, division
import os, sys, warnings, platform
from time import time
import numpy as np
#if "PyPy" not in platform.python_implementation():
# from scipy.io import loadmat, savemat
from Kuru.Tensor import unique2d, itemfreq, in2d, makezero
#from Florence.Utils import insensitive
#from .vtk_writer import write_vtu
#try:
# import meshpy.triangle as triangle
# has_meshpy = True
#except ImportError:
# has_meshpy = False
from .HigherOrderMeshing import *
from .NodeArrangement import *
#from .GeometricPath import *
from warnings import warn
from copy import deepcopy
"""
Mesh class providing most of the pre-processing functionalities of the Core module
<NAME> - 13/06/2015
"""
class Mesh(object):
"""Mesh class provides the following functionalities:
1. Generating higher order meshes based on a linear mesh, for tris, tets, quads and hexes
2. Generating linear tri and tet meshes based on meshpy back-end
3. Generating linear tri meshes based on distmesh back-end
4. Finding bounary edges and faces for tris and tets, in case they are not provided by the mesh generator
5. Reading Salome meshes in binary (.dat/.txt/etc) format
6. Reading gmsh files .msh
7. Checking for node numbering order of elements and fixing it if desired
8. Writing meshes to unstructured vtk file format (.vtu) in xml and binary formats,
including high order elements
"""
def __init__(self, element_type=None):
super(Mesh, self).__init__()
# self.faces and self.edges ARE BOUNDARY FACES
# AND BOUNDARY EDGES, RESPECTIVELY
self.degree = None
self.ndim = None
self.edim = None
self.nelem = None
self.nnode = None
self.elements = None
self.points = None
self.corners = None
self.edges = None
self.faces = None
self.element_type = element_type
self.face_to_element = None
self.edge_to_element = None
self.boundary_edge_to_element = None
self.boundary_face_to_element = None
self.all_faces = None
self.all_edges = None
self.interior_faces = None
self.interior_edges = None
# TYPE OF BOUNDARY FACES/EDGES
self.boundary_element_type = None
# FOR GEOMETRICAL CURVES/SURFACES
self.edge_to_curve = None
self.face_to_surface = None
self.spatial_dimension = None
self.reader_type = None
self.reader_type_format = None
self.reader_type_version = None
self.writer_type = None
self.filename = None
self.element_to_set = None
def GetEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetEdgesTri()
elif self.element_type == "quad":
self.GetEdgesQuad()
elif self.element_type == "pent":
self.GetEdgesPent()
elif self.element_type == "tet":
self.GetEdgesTet()
elif self.element_type == "hex":
self.GetEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.all_edges
def GetBoundaryEdges(self):
assert self.element_type is not None
if self.element_type == "tri":
self.GetBoundaryEdgesTri()
elif self.element_type == "quad":
self.GetBoundaryEdgesQuad()
elif self.element_type == "pent":
self.GetBoundaryEdgesPent()
elif self.element_type == "tet":
self.GetBoundaryEdgesTet()
elif self.element_type == "hex":
self.GetBoundaryEdgesHex()
else:
raise ValueError('Type of element not understood')
return self.edges
def GetEdgesQuad(self):
"""Find the all edges of a quadrilateral mesh.
Sets all_edges property and returns it
returns:
arr: numpy ndarray of all edges"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_edges,np.ndarray):
if self.all_edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_edges.shape[1]==2 and p > 1:
pass
else:
return self.all_edges
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)
# REMOVE DUPLICATES
edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)
edge_to_element = np.zeros((edges.shape[0],2),np.int64)
edge_to_element[:,0] = idx % self.elements.shape[0]
edge_to_element[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_to_element
# DO NOT SET all_edges IF THE CALLER FUNCTION IS GetBoundaryEdgesHex
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)[1][3]
if calframe != "GetBoundaryEdgesHex":
self.all_edges = edges
return edges
def GetBoundaryEdgesQuad(self):
"""Find boundary edges (lines) of a quadrilateral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]
self.edges = self.edges.astype(np.uint64)
self.boundary_edge_to_element = boundary_edge_to_element
return self.edges
def GetBoundaryEdgesHex(self):
"""Find boundary edges (lines) of hexahedral mesh.
"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.edges,np.ndarray):
if self.edges.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.edges.shape[1] == 2 and p > 1:
pass
else:
return
# FIRST GET BOUNDARY FACES
if not isinstance(self.faces,np.ndarray):
self.GetBoundaryFacesHex()
# BUILD A 2D MESH
tmesh = Mesh()
tmesh.element_type = "quad"
tmesh.elements = self.faces
tmesh.nelem = tmesh.elements.shape[0]
del tmesh.faces
del tmesh.points
# ALL THE EDGES CORRESPONDING TO THESE BOUNDARY FACES ARE BOUNDARY EDGES
self.edges = tmesh.GetEdgesQuad()
@property
def Bounds(self):
"""Returns bounds of a mesh i.e. the minimum and maximum coordinate values
in every direction
"""
assert self.points is not None
if self.points.shape[1] == 3:
bounds = np.array([[np.min(self.points[:,0]),
np.min(self.points[:,1]),
np.min(self.points[:,2])],
[np.max(self.points[:,0]),
np.max(self.points[:,1]),
np.max(self.points[:,2])]])
makezero(bounds)
return bounds
elif self.points.shape[1] == 2:
bounds = np.array([[np.min(self.points[:,0]),
np.min(self.points[:,1])],
[np.max(self.points[:,0]),
np.max(self.points[:,1])]])
makezero(bounds)
return bounds
elif self.points.shape[1] == 1:
bounds = np.array([[np.min(self.points[:,0])],
[np.max(self.points[:,0])]])
makezero(bounds)
return bounds
else:
raise ValueError("Invalid dimension for mesh coordinates")
def GetElementsEdgeNumberingQuad(self):
"""Finds edges of elements and their flags saying which edge they are [0,1,2,3].
At most a quad can have all its four edges on the boundary.
output:
edge_elements: [1D array] array containing elements which have edges
on the boundary
Note that this method sets the self.edge_to_element to edge_elements,
so the return value is not strictly necessary
"""
if isinstance(self.edge_to_element,np.ndarray):
if self.edge_to_element.shape[0] > 1:
return self.edge_to_element
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
if self.all_edges is None:
self.GetEdgesQuad()
p = self.InferPolynomialDegree()
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.int64)
all_edges, idx = unique2d(all_edges,consider_sort=True,order=False, return_index=True)
edge_elements = np.zeros((all_edges.shape[0],2),dtype=np.int64)
# edge_elements = np.zeros((self.edges.shape[0],2),dtype=np.int64)
edge_elements[:,0] = idx % self.elements.shape[0]
edge_elements[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_elements
return self.edge_to_element
def GetFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetFacesTet()
elif self.element_type == "hex":
self.GetFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.all_faces
def GetBoundaryFaces(self):
assert self.element_type is not None
if self.element_type == "tet":
self.GetBoundaryFacesTet()
elif self.element_type == "hex":
self.GetBoundaryFacesHex()
elif self.element_type=="tri" or self.element_type=="quad":
raise ValueError("2D mesh does not have faces")
else:
raise ValueError('Type of element not understood')
return self.faces
def GetBoundaryFacesHex(self):
"""Find boundary faces (surfaces) of a hexahedral mesh"""
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.faces,np.ndarray):
if self.faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.faces.shape[1] == 4 and p > 1:
pass
else:
return
node_arranger = NodeArrangementHex(p-1)[0]
# CONCATENATE ALL THE FACES MADE FROM ELEMENTS
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_faces,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY FACES
freqs_inv = itemfreq(inv)
faces_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
self.faces = uniques[faces_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_face_to_element = np.zeros((faces_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF FACES
all_faces_in_faces = in2d(all_faces,self.faces,consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
# boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.faces = self.elements[boundary_face_to_element[:,0][:,None],node_arranger[boundary_face_to_element[:,1],:]]
self.faces = self.faces.astype(np.uint64)
self.boundary_face_to_element = boundary_face_to_element
def GetElementsWithBoundaryEdgesQuad(self):
"""Finds elements which have edges on the boundary.
At most a quad can have all its four edges on the boundary.
output:
boundary_edge_to_element: [2D array] array containing elements which have face
on the boundary [cloumn 0] and a flag stating which edges they are [column 1]
"""
if isinstance(self.boundary_edge_to_element,np.ndarray):
if self.boundary_edge_to_element.shape[1] > 1 and self.boundary_edge_to_element.shape[0] > 1:
return self.boundary_edge_to_element
# DO NOT COMPUTE EDGES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.edges is not None
p = self.InferPolynomialDegree()
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(self.edges.dtype)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.boundary_edge_to_element = boundary_edge_to_element
return self.boundary_edge_to_element
def GetElementsWithBoundaryFacesHex(self):
"""Finds elements which have faces on the boundary.
At most a hexahedral can have all its 8 faces on the boundary.
output:
boundary_face_to_element: [2D array] array containing elements which have face
on the boundary [column 0] and a flag stating which faces they are [column 1]
"""
# DO NOT COMPUTE FACES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.faces is not None
if self.boundary_face_to_element is not None:
return self.boundary_face_to_element
# THIS METHOD ALWAYS RETURNS THE FACE TO ELEMENT ARRAY, AND DOES NOT CHECK
# IF THIS HAS BEEN COMPUTED BEFORE, THE REASON BEING THAT THE FACES CAN COME
# EXTERNALLY WHOSE ARRANGEMENT WOULD NOT CORRESPOND TO THE ONE USED INTERNALLY
# HENCE THIS MAPPING BECOMES NECESSARY
C = self.InferPolynomialDegree() - 1
node_arranger = NodeArrangementHex(C)[0]
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(self.faces.dtype)
all_faces_in_faces = in2d(all_faces,self.faces[:,:4],consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# SO FAR WE HAVE COMPUTED THE ELEMENTS THAT CONTAIN FACES, HOWEVER
# NOTE THAT WE STILL HAVE NOT COMPUTED A MAPPING BETWEEN ELEMENTS AND
# FACES. WE ONLY KNOW WHICH ELEMENTS CONTAIN FACES FROM in2d.
# WE NEED TO FIND THIS MAPPING NOW
# WE NEED TO DO THIS DUMMY RECONSTRUCTION OF FACES BASED ON ELEMENTS
faces = self.elements[boundary_face_to_element[:,0][:,None],
node_arranger[boundary_face_to_element[:,1],:]].astype(self.faces.dtype)
# CHECK FOR THIS CONDITION AS ARRANGEMENT IS NO LONGER MAINTAINED
assert np.sum(faces[:,:4].astype(np.int64) - self.faces[:,:4].astype(np.int64)) == 0
# NOW GET THE ROW MAPPING BETWEEN OLD FACES AND NEW FACES
from Kuru.Tensor import shuffle_along_axis
row_mapper = shuffle_along_axis(faces[:,:4],self.faces[:,:4],consider_sort=True)
# UPDATE THE MAP
boundary_face_to_element[:,:] = boundary_face_to_element[row_mapper,:]
self.boundary_face_to_element = boundary_face_to_element
return self.boundary_face_to_element
def GetFacesHex(self):
"""Find all faces (surfaces) in the hexahedral mesh (boundary & interior).
Sets all_faces property and returns it
returns:
arr: numpy ndarray of all faces
"""
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
# DO NOT COMPUTE IF ALREADY COMPUTED
if isinstance(self.all_faces,np.ndarray):
if self.all_faces.shape[0] > 1:
# IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION
if self.all_faces.shape[1] == 4 and p > 1:
pass
else:
return self.all_faces
node_arranger = NodeArrangementHex(p-1)[0]
fsize = int((p+1)**3)
# GET ALL FACES FROM THE ELEMENT CONNECTIVITY
faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(np.int64)
# REMOVE DUPLICATES
self.all_faces, idx = unique2d(faces,consider_sort=True,order=False,return_index=True)
face_to_element = np.zeros((self.all_faces.shape[0],2),np.int64)
face_to_element[:,0] = idx % self.elements.shape[0]
face_to_element[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_to_element
return self.all_faces
def GetHighOrderMesh(self,p=1, silent=True, **kwargs):
"""Given a linear tri, tet, quad or hex mesh compute high order mesh based on it.
This is a static method linked to the HigherOrderMeshing module"""
if not isinstance(p,int):
raise ValueError("p must be an integer")
else:
if p < 1:
raise ValueError("Value of p={} is not acceptable. Provide p>=1.".format(p))
if self.degree is None:
self.InferPolynomialDegree()
C = p-1
if 'C' in kwargs.keys():
if kwargs['C'] != p - 1:
raise ValueError("Did not understand the specified interpolation degree of the mesh")
del kwargs['C']
# DO NOT COMPUTE IF ALREADY COMPUTED FOR THE SAME ORDER
if self.degree == None:
self.degree = self.InferPolynomialDegree()
if self.degree == p:
return
# SITUATIONS WHEN ANOTHER HIGH ORDER MESH IS REQUIRED, WITH ONE HIGH
# ORDER MESH ALREADY AVAILABLE
if self.degree != 1 and self.degree - 1 != C:
dum = self.GetLinearMesh(remap=True)
self.__dict__.update(dum.__dict__)
if not silent:
print('Generating p = '+str(C+1)+' mesh based on the linear mesh...')
t_mesh = time()
# BUILD A NEW MESH BASED ON THE LINEAR MESH
if self.element_type == 'line':
nmesh = HighOrderMeshLine(C,self,**kwargs)
if self.element_type == 'tri':
if self.edges is None:
self.GetBoundaryEdgesTri()
# nmesh = HighOrderMeshTri(C,self,**kwargs)
nmesh = HighOrderMeshTri_SEMISTABLE(C,self,**kwargs)
elif self.element_type == 'tet':
# nmesh = HighOrderMeshTet(C,self,**kwargs)
nmesh = HighOrderMeshTet_SEMISTABLE(C,self,**kwargs)
elif self.element_type == 'quad':
if self.edges is None:
self.GetBoundaryEdgesTri()
nmesh = HighOrderMeshQuad(C,self,**kwargs)
elif self.element_type == 'hex':
nmesh = HighOrderMeshHex(C,self,**kwargs)
self.points = nmesh.points
self.elements = nmesh.elements.astype(np.uint64)
if isinstance(self.corners,np.ndarray):
# NOT NECESSARY BUT GENERIC
self.corners = nmesh.corners.astype(np.uint64)
if isinstance(self.edges,np.ndarray):
self.edges = nmesh.edges.astype(np.uint64)
if isinstance(self.faces,np.ndarray):
if isinstance(nmesh.faces,np.ndarray):
self.faces = nmesh.faces.astype(np.uint64)
self.nelem = nmesh.nelem
self.nnode = self.points.shape[0]
self.element_type = nmesh.info
self.degree = C+1
self.ChangeType()
if not silent:
print('Finished generating the high order mesh. Time taken', time()-t_mesh,'sec')
def Line(self, left_point=0., right_point=1., n=10, p=1):
"""Creates a mesh of on a line for 1D rods/beams"""
self.__reset__()
assert p > 0
if not isinstance(left_point,float):
if not isinstance(left_point,int):
raise ValueError("left_point must be a number")
if not isinstance(right_point,float):
if not isinstance(right_point,int):
raise ValueError("right_point must be a number")
left_point = float(left_point)
right_point = float(right_point)
n = int(n)
if n <= 0:
raise ValueError("Number of discretisation cannot be zero or negative: n={}".format(n))
self.element_type = "line"
self.points = np.linspace(left_point,right_point,p*n+1)[:,None]
self.elements = np.zeros((n,p+1),dtype=np.int64)
for i in range(p+1):
self.elements[:,i] = p*np.arange(0,n)+i
self.nelem = self.elements.shape[0]
self.nnode = self.points.shape[0]
def Rectangle(self,lower_left_point=(0,0), upper_right_point=(2,1),
nx=5, ny=5, element_type="tri"):
"""Creates a quad/tri mesh of a rectangle"""
if element_type != "tri" and element_type != "quad":
raise ValueError("Element type should either be tri or quad")
if self.elements is not None and self.points is not None:
self.__reset__()
if (lower_left_point[0] > upper_right_point[0]) or \
(lower_left_point[1] > upper_right_point[1]):
raise ValueError("Incorrect coordinate for lower left and upper right vertices")
nx, ny = int(nx), int(ny)
if nx <= 0 or ny <= 0:
raise ValueError("Number of discretisation cannot be zero or negative: nx={} ny={}".format(nx,ny))
from scipy.spatial import Delaunay
x= | np.linspace(lower_left_point[0],upper_right_point[0],nx+1) | numpy.linspace |
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch, torch.utils.data
import glob, math, os
import scipy, scipy.ndimage
import sparseconvnet as scn
if not os.path.exists('train_val/'):
print('Downloading data ...')
os.system('bash download_and_split_data.sh')
categories=["02691156", "02773838", "02954340", "02958343",
"03001627", "03261776", "03467517", "03624134",
"03636649", "03642806", "03790512", "03797390",
"03948459", "04099429", "04225987", "04379243"]
classes=['Airplane', 'Bag', 'Cap', 'Car',
'Chair', 'Earphone', 'Guitar', 'Knife',
'Lamp', 'Laptop', 'Motorbike', 'Mug',
'Pistol', 'Rocket', 'Skateboard', 'Table']
nClasses=[4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
classOffsets=np.cumsum([0]+nClasses)
def init(c,resolution=50,sz=50*8+8,batchSize=16):
globals()['categ']=c
globals()['resolution']=resolution
globals()['batchSize']=batchSize
globals()['spatialSize']=torch.LongTensor([sz]*3)
if categ==-1:
print('All categories: 50 classes')
globals()['nClassesTotal']=int(classOffsets[-1])
else:
print('categ ',categ,classes[categ])
globals()['nClassesTotal']=int(nClasses[categ])
def load(xF, c, classOffset, nc):
xl=np.loadtxt(xF[0])
xl/= ((xl**2).sum(1).max()**0.5)
y = np.loadtxt(xF[0][:-9]+'seg').astype('int64')+classOffset-1
return (xF[0], xl, y, c, classOffset, nc, np.random.randint(1e6))
def train():
d=[]
if categ==-1:
for c in range(16):
for x in torch.utils.data.DataLoader(
glob.glob('train_val/'+categories[c]+'/*.pts.train'),
collate_fn=lambda x: load(x, c, classOffsets[c],nClasses[c]),
num_workers=12):
d.append(x)
else:
for x in torch.utils.data.DataLoader(
glob.glob('train_val/'+categories[categ]+'/*.pts.train'),
collate_fn=lambda x: load(x, categ, 0, nClasses[categ]),
num_workers=12):
d.append(x)
def merge(tbl):
xl_=[]
xf_=[]
y_=[]
categ_=[]
mask_=[]
classOffset_=[]
nClasses_=[]
nPoints_=[]
np_random=np.random.RandomState([x[-1] for x in tbl])
for _, xl, y, categ, classOffset, nClasses, idx in tbl:
m=np.eye(3,dtype='float32')
m[0,0]*=np_random.randint(0,2)*2-1
m=np.dot(m,np.linalg.qr(np_random.randn(3,3))[0])
xl=np.dot(xl,m)
xl+=np_random.uniform(-1,1,(1,3)).astype('float32')
xl=np.floor(resolution*(4+xl)).astype('int64')
xf=np.ones((xl.shape[0],1)).astype('float32')
xl_.append(xl)
xf_.append(xf)
y_.append(y)
categ_.append(np.ones(y.shape[0],dtype='int64')*categ)
classOffset_.append(classOffset)
nClasses_.append(nClasses)
mask=np.zeros((y.shape[0],nClassesTotal),dtype='float32')
mask[:,classOffset:classOffset+nClasses]=1
mask_.append(mask)
nPoints_.append(y.shape[0])
xl_=[np.hstack([x,idx*np.ones((x.shape[0],1),dtype='int64')]) for idx,x in enumerate(xl_)]
return {'x': [torch.from_numpy(np.vstack(xl_)),torch.from_numpy(np.vstack(xf_))],
'y': torch.from_numpy(np.hstack(y_)),
'categ': torch.from_numpy(np.hstack(categ_)),
'classOffset': classOffset_,
'nClasses': nClasses_,
'mask': torch.from_numpy(np.vstack(mask_)),
'xf': [x[0] for x in tbl],
'nPoints': nPoints_}
return torch.utils.data.DataLoader(d,batch_size=batchSize, collate_fn=merge, num_workers=10, shuffle=True)
def valid():
d=[]
if categ==-1:
for c in range(16):
for x in torch.utils.data.DataLoader(
glob.glob('train_val/'+categories[c]+'/*.pts.valid'),
collate_fn=lambda x: load(x, c, classOffsets[c],nClasses[c]),
num_workers=12):
d.append(x)
else:
for x in torch.utils.data.DataLoader(
glob.glob('train_val/'+categories[categ]+'/*.pts.valid'),
collate_fn=lambda x: load(x, categ, 0, nClasses[categ]),
num_workers=12):
d.append(x)
print(len(d))
def merge(tbl):
xl_=[]
xf_=[]
y_=[]
categ_=[]
mask_=[]
classOffset_=[]
nClasses_=[]
nPoints_=[]
np_random=np.random.RandomState([x[-1] for x in tbl])
for _, xl, y, categ, classOffset, nClasses, idx in tbl:
m=np.eye(3,dtype='float32')
m[0,0]*=np_random.randint(0,2)*2-1
m=np.dot(m,np.linalg.qr(np_random.randn(3,3))[0])
xl=np.dot(xl,m)
xl+=np_random.uniform(-1,1,(1,3)).astype('float32')
xl= | np.floor(resolution*(4+xl)) | numpy.floor |
import os
import glob
import numpy as np
import pandas as pd
from functools import partial
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from ashrae.blenders import load_preds, GeneralizedMeanBlender
from ashrae.utils import OUTPUT_PATH, load_data, rmsle, timer
MODEL_LIST = [
f"{OUTPUT_PATH}/lgb-split_meter-no_normalization.npy",
f"{OUTPUT_PATH}/lgb-split_meter-target_normalization.npy",
f"{OUTPUT_PATH}/lgb-split_primary_use-no_normalization.npy",
f"{OUTPUT_PATH}/lgb-split_primary_use-target_normalization.npy",
f"{OUTPUT_PATH}/lgb-split_site-no_normalization.npy",
f"{OUTPUT_PATH}/lgb-split_site-target_normalization.npy",
f"{OUTPUT_PATH}/cb-split_meter-no_normalization.npy",
f"{OUTPUT_PATH}/cb-split_meter-target_normalization.npy",
f"{OUTPUT_PATH}/cb-split_primary_use-no_normalization.npy",
f"{OUTPUT_PATH}/cb-split_primary_use-target_normalization.npy",
f"{OUTPUT_PATH}/cb-split_site-no_normalization.npy",
f"{OUTPUT_PATH}/cb-split_site-target_normalization.npy",
f"{OUTPUT_PATH}/mlp-split_meter-no_normalization.npy",
f"{OUTPUT_PATH}/submission_cleanup.csv",
f"{OUTPUT_PATH}/submission_kfold.csv",
f"{OUTPUT_PATH}/submission_meter.csv",
]
if __name__ == "__main__":
"""
python scripts/05_blend_predictions.py
"""
# load test data
with timer("load test data"):
test = load_data("test_clean")
leak = load_data("is_leak")
target = leak["meter_reading"].values
# load predictions
with timer("load predictions"):
preds_matrix = [np.load(x) for x in MODEL_LIST if ".npy" in x]
replace_inds = (test.site_id == 0) & (test.meter == 0)
if len([x for x in MODEL_LIST if ".csv" in x]) > 0:
preds_matrix += [pd.read_csv(x).meter_reading.values for x in MODEL_LIST if ".csv" in x]
preds_matrix = np.vstack(preds_matrix).T
preds_matrix[preds_matrix < 0] = 0
# blend predictions
with timer("blend predictions"):
gmb = GeneralizedMeanBlender()
gmb.p = 0.11375872112626925
gmb.c = 0.99817730007820798
gmb.weights = [0.01782498, 0.03520153, 0.03286305, 0.00718961,
0.01797213, 0.0004982 , 0.14172883, 0.12587602,
0.08538773, 0.09482115, 0.09476288, 0.10101228,
0.15306998, 0.03358389, 0.00719679, 0.05101097]
test_preds = 0.99576627605010293*np.expm1(gmb.transform(np.log1p(preds_matrix)))
# create submission
with timer("create submission"):
subm = load_data("sample_submission")
subm["meter_reading"] = test_preds
subm.loc[subm.meter_reading < 0, "meter_reading"] = 0
subm.loc[~np.isnan(target), "meter_reading"] = target[~ | np.isnan(target) | numpy.isnan |
import numpy as np
import keras.backend as K
import tensorflow as tf
##########################################################################################################################
# see: https://ilmonteux.github.io/2019/05/10/segmentation-metrics.html
# https://gist.github.com/ilmonteux/8340df952722f3a1030a7d937e701b5a#file-segmentation_metrics-py
##########################################################################################################################
def metrics_np(y_true, y_pred, metric_name, metric_type='standard', drop_last = True, mean_per_class=False, verbose=False):
"""
Compute mean metrics of two segmentation masks, via numpy.
IoU(A,B) = |A & B| / (| A U B|)
Dice(A,B) = 2*|A & B| / (|A| + |B|)
Args:
y_true: true masks, one-hot encoded.
y_pred: predicted masks, either softmax outputs, or one-hot encoded.
metric_name: metric to be computed, either 'iou' or 'dice'.
metric_type: one of 'standard' (default), 'soft', 'naive'.
In the standard version, y_pred is one-hot encoded and the mean
is taken only over classes that are present (in y_true or y_pred).
The 'soft' version of the metrics are computed without one-hot
encoding y_pred.
The 'naive' version return mean metrics where absent classes contribute
to the class mean as 1.0 (instead of being dropped from the mean).
drop_last = True: boolean flag to drop last class (usually reserved
for background class in semantic segmentation)
mean_per_class = False: return mean along batch axis for each class.
verbose = False: print intermediate results such as intersection, union
(as number of pixels).
Returns:
IoU/Dice of y_true and y_pred, as a float, unless mean_per_class == True
in which case it returns the per-class metric, averaged over the batch.
Inputs are B*W*H*N tensors, with
B = batch size,
W = width,
H = height,
N = number of classes
"""
assert y_true.shape == y_pred.shape, 'Input masks should be same shape, instead are {}, {}'.format(y_true.shape, y_pred.shape)
assert len(y_pred.shape) == 4, 'Inputs should be B*W*H*N tensors, instead have shape {}'.format(y_pred.shape)
flag_soft = (metric_type == 'soft')
flag_naive_mean = (metric_type == 'naive')
num_classes = y_pred.shape[-1]
# if only 1 class, there is no background class and it should never be dropped
drop_last = drop_last and num_classes>1
if not flag_soft:
if num_classes>1:
# get one-hot encoded masks from y_pred (true masks should already be in correct format, do it anyway)
y_pred = np.array([ np.argmax(y_pred, axis=-1)==i for i in range(num_classes) ]).transpose(1,2,3,0)
y_true = np.array([ np.argmax(y_true, axis=-1)==i for i in range(num_classes) ]).transpose(1,2,3,0)
else:
y_pred = (y_pred > 0).astype(int)
y_true = (y_true > 0).astype(int)
# intersection and union shapes are batch_size * n_classes (values = area in pixels)
axes = (1,2) # W,H axes of each image
intersection = np.sum(np.abs(y_pred * y_true), axis=axes) # or, np.logical_and(y_pred, y_true) for one-hot
mask_sum = np.sum( | np.abs(y_true) | numpy.abs |
import numpy as np
import cv2
from homography import calcHomographyLinear, calcHomography, stitchPanorama, cylindericlMap
def DEBUG(*args):
if LVL >= 1:
print("[DEBUG]", *args)
class Model(object):
__slots__ = ('val', 'th', 'd', 'n')
def fit(self, X, Y):
raise NotImplementedError
def fwd(self, X):
raise NotImplementedError
def dist(self, predY, trueY):
raise NotImplementedError
class HomoModel(Model):
def __init__(self, th=5, d=50, n=4):
self.th = th
self.d = d
self.n = n
self.val = np.empty((3,3),dtype=np.float32)
def fit(self, X, Y, collective=False):
"""
@brief fit homography model
@param[in] X - observation input 3x4 or 2x4
@param[in] Y - observation output 3x4 or 2x4
"""
nx, mx = X.shape
ny, my = Y.shape
assert ((mx == my) and (mx == self.n)) or ((mx == my) and (mx > self.n) and collective) , "invalid data size should be %d" % self.n
assert (nx == ny) and nx in [2, 3], "invalid input dimension for row numbers"
"""if nx == 2:
x = np.ones((3, mx),dtype=np.float32)
y = np.ones((3, my),dtype=np.float32)
x[:2,:] = X
y[:2,:] = Y
else:
x = X
y = Y"""
#self.val = calcHomographyLinear(X.T[:,:2], Y.T[:,:2])
if collective:
self.val = calcHomographyLinear(X.T[:,:2], Y.T[:,:2], True)
else:
self.val = calcHomography(X.T[:,:2], Y.T[:,:2], False)
return self.val
def fwd(self, X):
nx, mx = X.shape
assert nx in [2, 3], "invalid input dimension for row numbers"
if nx == 2:
x = np.ones((3, mx),dtype=np.float32)
x[:2,:] = X
else:
x = X
y = self.val @ x
return y/(y[-1,:] + 1e-10)
def reproj(self, Y):
ny, my = Y.shape
assert ny in [2, 3], "invalid input dimension for row numbers"
if ny == 2:
y = | np.ones((3, my),dtype=np.float32) | numpy.ones |
import numpy as np
import tensorflow as tf
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
from mpl_toolkits import mplot3d
from tensorflow.python import debug as tf_debug
# Returns uniformly generated numpy array with generate [ x | t | random] where x and t are shuffled
# Shape of the generated x_arr and generated t_arr should be the same
def generateData(x_start, x_end, dx, t_start, t_end, dt):
x_arr = np.arange(start=x_start, stop=x_end, step=dx, dtype=np.float32).reshape(-1,1)
np.random.shuffle(x_arr)
t_arr = np.arange(start=t_start, stop=t_end, step=dt, dtype=np.float32).reshape(-1,1)
| np.random.shuffle(t_arr) | numpy.random.shuffle |
import matplotlib
matplotlib.rcParams['text.usetex'] = True
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
import math
from matplotlib import rc
import sys, os
sys.path.append(os.path.dirname(sys.path[0]))
from linearSolvers import AMORE
from elementLibrary import shapeFunction, invShapeFunction, stiffnessMatrix
from plotTools import pM
from linearSolvers import AMORE
def getGraphDataOFE(coord,coordinates,incidentElements,edge,displacement,DMatrix,nSampling):
X=np.zeros((nSampling,nSampling))
Y=np.zeros((nSampling,nSampling))
U=np.zeros((nSampling,nSampling))
V=np.zeros((nSampling,nSampling))
Sxx=np.zeros((nSampling,nSampling))
Syy=np.zeros((nSampling,nSampling))
Sxy=np.zeros((nSampling,nSampling))
r=np.linspace(-1,1,nSampling)
s=np.linspace(-1,1,nSampling)
if len(coord)==3:
newCoord=np.zeros((4,2))
newCoord[0:3,:]=coord
newCoord[3,:]=coord[2,:]
for i in range(nSampling):
for j in range(nSampling):
Nmat,_=invShapeFunction.quadShapeFunction([r[i],s[j]])
xy=(Nmat@newCoord).reshape(-1)
X[i,j]=xy[0]
Y[i,j]=xy[1]
isoCoord=invShapeFunction.invTri(coord,xy)
for k in range(len(incidentElements)):
if incidentElements[k]:
numbering=incidentElements[k].numbering
fNumbering=getFullNumber(numbering)
tempCoord=AMORE.getCoord(coordinates,numbering)
rho=AMORE.getRho(edge,k)
LCmat=stiffnessMatrix.getLC(coord,rho)
rhoValue=isoCoord[0]*rho[0]+isoCoord[1]*rho[1]+(1.0-isoCoord[0]-isoCoord[1])*rho[2]
if len(tempCoord)==4:
newIsoCoord=invShapeFunction.invQuad(tempCoord,xy)
Nmat,Bmat,_=shapeFunction.shapeQuadFE(tempCoord,newIsoCoord[0],newIsoCoord[1])
localDisp=rhoValue*Nmat@displacement[fNumbering]
localStress=rhoValue*DMatrix@Bmat@displacement[fNumbering]+DMatrix@LCmat@Nmat@displacement[fNumbering]
U[i,j]+=localDisp[0]
V[i,j]+=localDisp[1]
Sxx[i,j]+=localStress[0]
Syy[i,j]+=localStress[1]
Sxy[i,j]+=localStress[2]
elif len(tempCoord)==9:
newIsoCoord=invShapeFunction.invQuadQuad(tempCoord,xy)
Nmat,Bmat,_=shapeFunction.shapeQuadQuadFE(tempCoord,newIsoCoord[0],newIsoCoord[1])
localDisp=rhoValue*Nmat@displacement[fNumbering]
localStress=rhoValue*DMatrix@Bmat@displacement[fNumbering]+DMatrix@LCmat@Nmat@displacement[fNumbering]
U[i,j]+=localDisp[0]
V[i,j]+=localDisp[1]
Sxx[i,j]+=localStress[0]
Syy[i,j]+=localStress[1]
Sxy[i,j]+=localStress[2]
else: raise ValueError
elif len(coord)==6:
newCoord=np.zeros((9,2))
newCoord[0:3,:]=coord[0:3,:]
newCoord[6,:]=newCoord[3,:]=coord[2,:]
newCoord[4,:]=coord[3,:]
newCoord[5,:]=coord[4,:]
newCoord[7,:]=newCoord[8,:]=coord[5,:]
for i in range(nSampling):
for j in range(nSampling):
Nmat,_=invShapeFunction.quadQuadShapeFunction([r[i],s[j]])
xy=(Nmat@newCoord).reshape(-1)
X[i,j]=xy[0]
Y[i,j]=xy[1]
isoCoord=invShapeFunction.invTriQuad(coord,xy)
triNmat,_=invShapeFunction.triQuadShapeFunction(isoCoord)
for k in range(len(incidentElements)):
if incidentElements[k]:
numbering=incidentElements[k].numbering
fNumbering=getFullNumber(numbering)
tempCoord=AMORE.getCoord(coordinates,numbering)
rho= | np.zeros(6) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 <NAME> <<EMAIL>>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
#
# HDP inference code is adapted from the onlinehdp.py script by
# <NAME> (chongw at cs.princeton.edu).
# http://www.cs.princeton.edu/~chongw/software/onlinehdp.tar.gz
#
"""Module for `online Hierarchical Dirichlet Processing
<http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
The core estimation code is directly adapted from the `blei-lab/online-hdp <https://github.com/blei-lab/online-hdp>`_
from `<NAME>: "Online Variational Inference for the Hierarchical Dirichlet Process", JMLR (2011)
<http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
Examples
--------
Train :class:`~gensim.models.hdpmodel.HdpModel`
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.models import HdpModel
>>>
>>> hdp = HdpModel(common_corpus, common_dictionary)
You can then infer topic distributions on new, unseen documents, with
.. sourcecode:: pycon
>>> unseen_document = [(1, 3.), (2, 4)]
>>> doc_hdp = hdp[unseen_document]
To print 20 topics with top 10 most probable words.
.. sourcecode:: pycon
>>> topic_info = hdp.print_topics(num_topics=20, num_words=10)
The model can be updated (trained) with new documents via
.. sourcecode:: pycon
>>> hdp.update([[(1, 2)], [(1, 1), (4, 5)]])
"""
from __future__ import with_statement
import logging
import time
import warnings
import numpy as np
from scipy.special import gammaln, psi # gamma function utils
from six.moves import zip, range
from gensim import interfaces, utils, matutils
from gensim.matutils import dirichlet_expectation, mean_absolute_difference
from gensim.models import basemodel, ldamodel
from gensim.utils import deprecated
logger = logging.getLogger(__name__)
meanchangethresh = 0.00001
rhot_bound = 0.0
def expect_log_sticks(sticks):
r"""For stick-breaking hdp, get the :math:`\mathbb{E}[log(sticks)]`.
Parameters
----------
sticks : numpy.ndarray
Array of values for stick.
Returns
-------
numpy.ndarray
Computed :math:`\mathbb{E}[log(sticks)]`.
"""
dig_sum = psi(np.sum(sticks, 0))
ElogW = psi(sticks[0]) - dig_sum
Elog1_W = psi(sticks[1]) - dig_sum
n = len(sticks[0]) + 1
Elogsticks = np.zeros(n)
Elogsticks[0: n - 1] = ElogW
Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
return Elogsticks
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
r"""Performs EM-iteration on a single document for calculation of likelihood for a maximum iteration of `max_iter`.
Parameters
----------
doc_word_ids : int
Id of corresponding words in a document.
doc_word_counts : int
Count of words in a single document.
alpha : numpy.ndarray
Lda equivalent value of alpha.
beta : numpy.ndarray
Lda equivalent value of beta.
max_iter : int, optional
Maximum number of times the expectation will be maximised.
Returns
-------
(numpy.ndarray, numpy.ndarray)
Computed (:math:`likelihood`, :math:`\gamma`).
"""
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = | np.array(doc_word_counts) | numpy.array |
#coding:utf-8
import os
from PIL import Image
import numpy as np
#源目录
MyPath = '/media/allen/orange/00第二篇论文实验结果/upernet_swin_base_potsdam_normal_240k/small_img/'
#输出目录
OutPath = '/media/allen/orange/00第二篇论文实验结果/upernet_swin_base_potsdam_normal_240k/rgb_img/'
width=600
height=600
width1=6000
height1=6000
width_num=width1 // width
height_num=height1 // height
def splitList(list_all):
list_unique = []
list_name_group = []
for i in range(len(list_all)):
ss = list_all[i].split('_')
if ss[2] not in list_unique:
list_unique.append(ss[2])
list_name_group.append([])
list_name_group[-1].append(list_all[i])
else:
list_name_group[list_unique.index(ss[2])].append(list_all[i])
for i in range(len(list_name_group)):
list_name_group[i].sort()
return list_name_group
def run():
#切换到源目录,遍历源目录下所有图片
os.chdir(MyPath)
listName = os.listdir(os.getcwd())
listNameGroup = splitList(listName)
mask_whole = np.zeros((height1,width1,3),dtype=np.uint8)
mask_whole1 = np.zeros((height1,width1,3),dtype=np.uint8)
for img_num in range(len(listNameGroup)):
for i in range(width_num):
for j in range(height_num):
print('name: %s' % listNameGroup[img_num][i*width_num+j])
img = Image.open(MyPath + listNameGroup[img_num][i*width_num+j])
img_array = | np.asarray(img) | numpy.asarray |
"""
Module of functions involving great circles
(thus assuming spheroid model of the earth)
with points given in longitudes and latitudes.
"""
from __future__ import print_function
import math
import numpy
import numpy.random
# Equatorial radius of the earth in kilometers
EARTH_ER = 6378.137
# Authalic radius of the earth in kilometers
EARTH_AR = 6371.007
# Meridional radius of the earth in kilometers
EARTH_MR = 6367.449
# Polar radius of the earth in kilometers
EARTH_PR = 6356.752
DEG2RAD = math.pi / 180.0
RAD2DEG = 180.0 / math.pi
KM2MI = 0.6213712
MI2KM = 1.609344
def lonlatdistance(pt1lon, pt1lat, pt2lon, pt2lat):
"""
Compute the great circle distance between two points
on a sphere using the haversine formula.
Arguments:
pt1lon - longitude(s) of the first point
pt1lat - latitude(s) of the first point
pt2lon - longitude(s) of the second point
pt2lat - latitude(s) of the second point
Returns:
The great circle distance(s) in degrees [0.0, 180.0]
"""
lon1 = numpy.deg2rad(numpy.asarray(pt1lon, dtype=float))
lat1 = numpy.deg2rad(numpy.asarray(pt1lat, dtype=float))
lon2 = numpy.deg2rad(numpy.asarray(pt2lon, dtype=float))
lat2 = numpy.deg2rad(numpy.asarray(pt2lat, dtype=float))
dellat = numpy.power(numpy.sin(0.5 * (lat2 - lat1)), 2.0)
dellon = numpy.cos(lat1) * numpy.cos(lat2) * \
numpy.power(numpy.sin(0.5 * (lon2 - lon1)), 2.0)
dist = 2.0 * numpy.arcsin(numpy.power(dellon + dellat, 0.5))
return numpy.rad2deg(dist)
def lonlatintersect(gc1lon1, gc1lat1, gc1lon2, gc1lat2,
gc2lon1, gc2lat1, gc2lon2, gc2lat2):
"""
Compute the intersections of two great circles. Uses the line of
intersection between the two planes of the great circles.
Arguments:
gc1lon1 - longitude(s) of the first point on the first great circle
gc1lat1 - latitude(s) of the first point on the first great circle
gc1lon2 - longitude(s) of the second point on the first great circle
gc1lat2 - latitude(s) of the second point on the first great circle
gc2lon1 - longitude(s) of the first point on the second great circle
gc2lat1 - latitude(s) of the first point on the second great circle
gc2lon2 - longitude(s) of the second point on the second great circle
gc2lat2 - latitude(s) of the second point on the second great circle
Returns:
( (pt1lon, pt1lat), (pt2lon, pt2lat) ) - the longitudes and latitudes
of the two intersections of the two great circles. NaN will
be returned for both longitudes and latitudes if a great
circle is not well-defined, or the two great-circles coincide.
"""
# Minimum acceptable norm of a cross product
# arcsin(1.0E-7) = 0.02" or 0.64 m on the Earth
MIN_NORM = 1.0E-7
# Convert longitudes and latitudes to points on a unit sphere
# The "+ 0.0 * ptlonr" is to broadcast gcz if needed
ptlonr = numpy.deg2rad(numpy.asarray(gc1lon1, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc1lat1, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc1xyz1 = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(gc1lon2, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc1lat2, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc1xyz2 = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(gc2lon1, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc2lat1, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc2xyz1 = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(gc2lon2, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc2lat2, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc2xyz2 = numpy.array([gcx, gcy, gcz])
# Get the unit-perpendicular to the plane going through the
# origin and the two points on each great circle. If the
# norm of the cross product is too small, the great circle
# is not well-defined, so zero it out so NaN is produced.
gc1pp = numpy.cross(gc1xyz1, gc1xyz2, axis=0)
norm = (gc1pp[0]**2 + gc1pp[1]**2 + gc1pp[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
gc1pp /= norm
gc2pp = numpy.cross(gc2xyz1, gc2xyz2, axis=0)
norm = (gc2pp[0]**2 + gc2pp[1]**2 + gc2pp[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
gc2pp /= norm
# The line of intersection of the two planes is perpendicular
# to the two plane-perpendiculars and goes through the origin.
# Points of intersection are the points on this line one unit
# from the origin. If the norm of the cross product is too
# small, the two planes are practically indistinguishable from
# each other (coincide).
pt1xyz = numpy.cross(gc1pp, gc2pp, axis=0)
norm = (pt1xyz[0]**2 + pt1xyz[1]**2 + pt1xyz[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
pt1xyz /= norm
pt2xyz = -1.0 * pt1xyz
# Convert back to longitudes and latitudes
pt1lats = numpy.rad2deg(numpy.arcsin(pt1xyz[2]))
pt1lons = numpy.rad2deg(numpy.arctan2(pt1xyz[1], pt1xyz[0]))
pt2lats = numpy.rad2deg(numpy.arcsin(pt2xyz[2]))
pt2lons = numpy.rad2deg(numpy.arctan2(pt2xyz[1], pt2xyz[0]))
return ( (pt1lons, pt1lats), (pt2lons, pt2lats) )
def lonlatfwdpt(origlon, origlat, endlon, endlat, fwdfact):
"""
Find the longitude and latitude of a point that is a given factor
times the distance along the great circle from an origination point
to an ending point.
Note that the shorter great circle arc from the origination point
to the ending point is always used.
If O is the origination point, E is the ending point, and P is
the point returned from this computation, a factor value of:
0.5: P bisects the great circle arc between O and E
2.0: E bisects the great circle arc between O and P
-1.0: O bisects the great circle arc between P and E
Arguments:
origlon - longitude(s) of the origination point
origlat - latitude(s) of the origination point
endlon - longitude(s) of the ending point
endlat - latitude(s) of the ending point
fwdfact - forward distance factor(s)
Returns:
(ptlon, ptlat) - longitude and latitude of the computed point(s).
NaN will be returned for both the longitude and
latitude if the great circle is not well-defined.
"""
# Minimum acceptable norm of a cross product
# arcsin(1.0E-7) = 0.02" or 0.64 m on the Earth
MIN_NORM = 1.0E-7
# Convert longitudes and latitudes to points on a unit sphere
# The "+ 0.0 * ptlonr" is to broadcast gcz if needed
ptlonr = numpy.deg2rad(numpy.asarray(origlon, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(origlat, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
origxyz = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(endlon, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(endlat, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
endxyz = numpy.array([gcx, gcy, gcz])
# Determine the rotation matrix about the origin that takes
# origxyz to (1,0,0) (equator and prime meridian) and endxyz
# to (x,y,0) with y > 0 (equator in eastern hemisphere).
#
# The first row of the matrix is origxyz.
#
# The third row of the matrix is the normalized cross product
# of origxyz and endxyz. (The great circle plane perpendicular.)
# If the norm of this cross product is too small, the great
# circle is not well-defined, so zero it out so NaN is produced.
gcpp = numpy.cross(origxyz, endxyz, axis=0)
norm = (gcpp[0]**2 + gcpp[1]**2 + gcpp[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
gcpp /= norm
# The second row of the matrix is the cross product of the
# third row (gcpp) and the first row (origxyz). This will
# have norm 1.0 since gcpp and origxyz are perpendicular
# unit vectors.
fwdax = numpy.cross(gcpp, origxyz, axis=0)
# Get the coordinates of the rotated end point.
endtrx = origxyz[0] * endxyz[0] + origxyz[1] * endxyz[1] + origxyz[2] * endxyz[2]
endtry = fwdax[0] * endxyz[0] + fwdax[1] * endxyz[1] + fwdax[2] * endxyz[2]
# Get the angle along the equator of the rotated end point, multiply
# by the given factor, and convert this new angle back to coordinates.
fwdang = numpy.arctan2(endtry, endtrx)
fwdang *= numpy.asarray(fwdfact, dtype=float)
fwdtrx = numpy.cos(fwdang)
fwdtry = numpy.sin(fwdang)
# Rotate the new point back to the original coordinate system
# The inverse rotation matrix is the transpose of that matrix.
fwdx = origxyz[0] * fwdtrx + fwdax[0] * fwdtry
fwdy = origxyz[1] * fwdtrx + fwdax[1] * fwdtry
fwdz = origxyz[2] * fwdtrx + fwdax[2] * fwdtry
# Convert the point coordinates into longitudes and latitudes
ptlat = numpy.rad2deg(numpy.arcsin(fwdz))
ptlon = numpy.rad2deg(numpy.arctan2(fwdy, fwdx))
return (ptlon, ptlat)
def equidistscatter(min_lon, min_lat, max_lon, max_lat, min_gcdist, dfactor=5.0):
"""
Create a roughly equidistant set of points in a specified region.
This is done by creating a dense "grid" of points, then repeatedly
randomly selecting a point from that collection and eliminating
points too close to that selected point. For the special cases
where min_lon and max_lon, or min_lat and max_lat, are very close
relative to min_gcdist, the maximum number of evenly spaced points
that can be put on the line described is computed and assigned.
Arguments:
min_lon - minimum longitude of the region
min_lat - minimum latitude of the region
max_lon - maximum longitude of the region
max_lat - maximum latitude of the region
min_gcdist - minimum distance, in great circle degrees,
between returned points
dfactor - the number of axis points in the dense "grid"
compared to the desired "grid". Larger value will
generally increase the uniformity of the returned
points but will also increase the time required
for the calculation.
Returns:
(pt_lons, pt_lats) - ptlons is an array of longitudes and ptlats
is an array of latitudes of (somewhat random) points in
the specified region that are roughly equidistant from
each other but not closer than min_gcdist to each other.
"""
lonmin = float(min_lon)
lonmax = float(max_lon)
if math.fabs(lonmax - lonmin) > 180.0:
raise ValueError("Difference between max_lon and min_lon is more than 180.0")
latmin = float(min_lat)
if math.fabs(latmin) > 90.0:
raise ValueError("min_lat is not in [-90.0,90.0]")
latmax = float(max_lat)
if math.fabs(latmax) > 90.0:
raise ValueError("max_lat is not in [-90.0,90.0]")
mindeg = float(min_gcdist)
if (mindeg <= 0.0) or (mindeg >= 90.0):
raise ValueError("min_gcdist is not in (0.0,90.0)")
dfact = float(dfactor)
if dfact < 1.0:
raise ValueError("dfactor is less than one");
# If lonmin is relatively close to lonmax, directly
# compute the points. Distance on a meridian is the
# difference in latitudes.
if math.fabs(lonmax - lonmin) < (0.05 * mindeg):
lon = 0.5 * (lonmax + lonmin)
dellat = mindeg
numlats = int( (math.fabs(latmax - latmin) + dellat) / dellat )
if latmax < latmin:
dellat *= -1.0
hdiff = 0.5 * ( (latmax - latmin) - (numlats - 1) * dellat )
latvals = numpy.linspace(latmin + hdiff, latmax - hdiff, numlats)
lonvals = | numpy.ones((numlats,), dtype=float) | numpy.ones |
import numpy as np
import cv2
def img_clahe(img):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
img = clahe.apply(img)
return img
def img_clahe_cm(img):
b,g,r = cv2.split(img)
clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(8,8))
b = clahe.apply(b)
g = clahe.apply(g)
r = clahe.apply(r)
output = cv2.merge((b,g,r))
return output
def img_normalized(img):
std = np.std(img)
mean = np.mean(img)
img_normalized = (img - mean) / (std + 1e-10)
return img_normalized
def convert_16to8(img):
img = (img - np.mean(img)) / np.std(img)
img = (img - np.min(img)) / (np.max(img) - | np.min(img) | numpy.min |
import numpy as np
from scipy.spatial.transform import Rotation as R
import magpylib as magpy
from magpylib._src.exceptions import MagpylibBadUserInput
from magpylib._src.exceptions import MagpylibMissingInput
###########################################################
###########################################################
# OBJECT INPUTS
def test_input_objects_position_good():
"""good input: magpy.Sensor(position=inp)"""
goods = [
(1, 2, 3),
(0, 0, 0),
((1, 2, 3), (2, 3, 4)),
[(2, 3, 4)],
[2, 3, 4],
[[2, 3, 4], [3, 4, 5]],
[(2, 3, 4), (3, 4, 5)],
np.array((1, 2, 3)),
np.array(((1, 2, 3), (2, 3, 4))),
]
for good in goods:
sens = magpy.Sensor(position=good)
np.testing.assert_allclose(sens.position, np.squeeze(np.array(good)))
def test_input_objects_position_bad():
"""bad input: magpy.Sensor(position=inp)"""
bads = [
(1, 2),
(1, 2, 3, 4),
[(1, 2, 3, 4)] * 2,
(((1, 2, 3), (1, 2, 3)), ((1, 2, 3), (1, 2, 3))),
"x",
["x", "y", "z"],
dict(woot=15),
True,
]
for bad in bads:
np.testing.assert_raises(MagpylibBadUserInput, magpy.Sensor, bad)
def test_input_objects_pixel_good():
"""good input: magpy.Sensor(pixel=inp)"""
goods = [
(1, -2, 3),
(0, 0, 0),
((1, 2, 3), (2, 3, 4)),
(((1, 2, 3), (2, -3, 4)), ((1, 2, 3), (2, 3, 4))),
[(2, 3, 4)],
[2, 3, 4],
[[-2, 3, 4], [3, 4, 5]],
[[[2, 3, 4], [3, 4, 5]]] * 4,
[(2, 3, 4), (3, 4, 5)],
np.array((1, 2, -3)),
np.array(((1, -2, 3), (2, 3, 4))),
]
for good in goods:
sens = magpy.Sensor(pixel=good)
np.testing.assert_allclose(sens.pixel, good)
def test_input_objects_pixel_bad():
"""bad input: magpy.Sensor(pixel=inp)"""
bads = [
(1, 2),
(1, 2, 3, 4),
[(1, 2, 3, 4)] * 2,
"x",
["x", "y", "z"],
dict(woot=15),
True,
]
for bad in bads:
np.testing.assert_raises(MagpylibBadUserInput, magpy.Sensor, (0, 0, 0), bad)
def test_input_objects_orientation_good():
"""good input: magpy.Sensor(orientation=inp)"""
goods = [
None,
(0.1, 0.2, 0.3),
(0, 0, 0),
[(0.1, 0.2, 0.3)],
[(0.1, 0.2, 0.3)] * 5,
]
for good in goods:
if good is None:
sens = magpy.Sensor(orientation=None)
np.testing.assert_allclose(sens.orientation.as_rotvec(), (0, 0, 0))
else:
sens = magpy.Sensor(orientation=R.from_rotvec(good))
np.testing.assert_allclose(
sens.orientation.as_rotvec(), np.squeeze(np.array(good))
)
def test_input_objects_orientation_bad():
"""bad input: magpy.Sensor(orientation=inp)"""
bads = [
(1, 2),
(1, 2, 3, 4),
[(1, 2, 3, 4)] * 2,
"x",
["x", "y", "z"],
dict(woot=15),
True,
]
for bad in bads:
np.testing.assert_raises(
MagpylibBadUserInput, magpy.Sensor, (0, 0, 0), (0, 0, 0), bad
)
def test_input_objects_current_good():
"""good input: magpy.current.Loop(inp)"""
goods = [
None,
0,
1,
1.2,
np.array([1, 2, 3])[1],
-1,
-1.123,
True,
]
for good in goods:
src = magpy.current.Loop(good)
if good is None:
assert src.current is None
else:
np.testing.assert_allclose(src.current, good)
def test_input_objects_current_bad():
"""bad input: magpy.current.Loop(inp)"""
bads = [
(1, 2),
[(1, 2, 3, 4)] * 2,
"x",
["x", "y", "z"],
dict(woot=15),
]
for bad in bads:
np.testing.assert_raises(MagpylibBadUserInput, magpy.current.Loop, bad)
def test_input_objects_diameter_good():
"""good input: magpy.current.Loop(diameter=inp)"""
goods = [
None,
0,
1,
1.2,
np.array([1, 2, 3])[1],
True,
]
for good in goods:
src = magpy.current.Loop(diameter=good)
if good is None:
assert src.diameter is None
else:
np.testing.assert_allclose(src.diameter, good)
def test_input_objects_diameter_bad():
"""bad input: magpy.current.Loop(diameter=inp)"""
bads = [
(1, 2),
[(1, 2, 3, 4)] * 2,
"x",
["x", "y", "z"],
dict(woot=15),
-1,
-1.123,
]
for bad in bads:
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.current.Loop(diameter=bad)
def test_input_objects_vertices_good():
"""good input: magpy.current.Line(vertices=inp)"""
goods = [
None,
((0, 0, 0), (0, 0, 0)),
((1, 2, 3), (2, 3, 4)),
[(2, 3, 4), (-1, -2, -3)] * 2,
[[2, 3, 4], [3, 4, 5]],
np.array(((1, 2, 3), (2, 3, 4))),
]
for good in goods:
src = magpy.current.Line(vertices=good)
if good is None:
assert src.vertices is None
else:
np.testing.assert_allclose(src.vertices, good)
def test_input_objects_vertices_bad():
"""bad input: magpy.current.Line(vertices=inp)"""
bads = [
(1, 2),
[(1, 2, 3, 4)] * 2,
[(1, 2, 3)],
"x",
["x", "y", "z"],
dict(woot=15),
0,
-1.123,
True,
]
for bad in bads:
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.current.Line(vertices=bad)
def test_input_objects_magnetization_moment_good():
"""
good input:
magpy.magnet.Cuboid(magnetization=inp),
magpy.misc.Dipole(moment=inp)
"""
goods = [
None,
(1, 2, 3),
(0, 0, 0),
[-1, -2, -3],
np.array((1, 2, 3)),
]
for good in goods:
src = magpy.magnet.Cuboid(good)
src2 = magpy.misc.Dipole(good)
if good is None:
assert src.magnetization is None
assert src2.moment is None
else:
np.testing.assert_allclose(src.magnetization, good)
np.testing.assert_allclose(src2.moment, good)
def test_input_objects_magnetization_moment_bad():
"""
bad input:
magpy.magnet.Cuboid(magnetization=inp),
magpy.misc.Dipole(moment=inp)
"""
bads = [
(1, 2),
[1, 2, 3, 4],
[(1, 2, 3)] * 2,
np.array([(1, 2, 3)] * 2),
"x",
["x", "y", "z"],
dict(woot=15),
0,
-1.123,
True,
]
for bad in bads:
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.magnet.Cuboid(magnetization=bad)
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.misc.Dipole(moment=bad)
def test_input_objects_dimension_cuboid_good():
"""good input: magpy.magnet.Cuboid(dimension=inp)"""
goods = [
None,
(1, 2, 3),
[11, 22, 33],
np.array((1, 2, 3)),
]
for good in goods:
src = magpy.magnet.Cuboid(dimension=good)
if good is None:
assert src.dimension is None
else:
np.testing.assert_allclose(src.dimension, good)
def test_input_objects_dimension_cuboid_bad():
"""bad input: magpy.magnet.Cuboid(dimension=inp)"""
bads = [
[-1, 2, 3],
(0, 1, 2),
(1, 2),
[1, 2, 3, 4],
[(1, 2, 3)] * 2,
np.array([(1, 2, 3)] * 2),
"x",
["x", "y", "z"],
dict(woot=15),
0,
True,
]
for bad in bads:
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.magnet.Cuboid(dimension=bad)
def test_input_objects_dimension_cylinder_good():
"""good input: magpy.magnet.Cylinder(dimension=inp)"""
goods = [
None,
(1, 2),
[11, 22],
np.array((1, 2)),
]
for good in goods:
src = magpy.magnet.Cylinder(dimension=good)
if good is None:
assert src.dimension is None
else:
np.testing.assert_allclose(src.dimension, good)
def test_input_objects_dimension_cylinder_bad():
"""bad input: magpy.magnet.Cylinder(dimension=inp)"""
bads = [
[-1, 2],
(0, 1),
(1,),
[1, 2, 3],
[(1, 2)] * 2,
np.array([(2, 3)] * 2),
"x",
["x", "y"],
dict(woot=15),
0,
True,
]
for bad in bads:
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.magnet.Cylinder(dimension=bad)
def test_input_objects_dimension_cylinderSegment_good():
"""good input: magpy.magnet.CylinderSegment(dimension=inp)"""
goods = [
None,
(0, 2, 3, 0, 50),
(1, 2, 3, 40, 50),
[11, 22, 33, 44, 360],
[11, 22, 33, -44, 55],
np.array((1, 2, 3, 4, 5)),
[11, 22, 33, -44, -33],
(0, 2, 3, -10, 0),
]
for good in goods:
src = magpy.magnet.CylinderSegment(dimension=good)
if good is None:
assert src.dimension is None
else:
np.testing.assert_allclose(src.dimension, good)
def test_input_objects_dimension_cylinderSegment_bad():
"""good input: magpy.magnet.CylinderSegment(dimension=inp)"""
bads = [
(1, 2, 3, 4),
(1, 2, 3, 4, 5, 6),
(0, 0, 3, 4, 5),
(2, 1, 3, 4, 5),
(-1, 2, 3, 4, 5),
(1, 2, 0, 4, 5),
(1, 2, -1, 4, 5),
(1, 2, 3, 5, 4),
[(1, 2, 3, 4, 5)] * 2,
np.array([(1, 2, 3, 4, 5)] * 2),
"x",
["x", "y", "z", 1, 2],
dict(woot=15),
0,
True,
]
for bad in bads:
with np.testing.assert_raises(MagpylibBadUserInput):
magpy.magnet.CylinderSegment(dimension=bad)
def test_input_objects_field_func_good():
"""good input: magpy.misc.CustomSource(field_func=f)"""
# pylint: disable=unused-argument
# init empty = None
src = magpy.misc.CustomSource()
np.testing.assert_raises(MagpylibMissingInput, src.getB, (1, 2, 3))
np.testing.assert_raises(MagpylibMissingInput, src.getH, (1, 2, 3))
# None
src = magpy.misc.CustomSource(field_func=None)
np.testing.assert_raises(MagpylibMissingInput, src.getB, (1, 2, 3))
np.testing.assert_raises(MagpylibMissingInput, src.getH, (1, 2, 3))
# acceptable func with B and H return
def f(field, observers):
"""3 in 3 out"""
return observers
src = magpy.misc.CustomSource(field_func=f)
np.testing.assert_allclose(src.getB((1, 2, 3)), (1, 2, 3))
np.testing.assert_allclose(src.getH((1, 2, 3)), (1, 2, 3))
# acceptable func with only B return
def ff(field, observers):
"""3 in 3 out"""
if field == "B":
return observers
return None
src = magpy.misc.CustomSource(field_func=ff)
np.testing.assert_allclose(src.getB((1, 2, 3)), (1, 2, 3))
np.testing.assert_raises(MagpylibMissingInput, src.getH, (1, 2, 3))
# acceptable func with only B return
def fff(field, observers):
"""3 in 3 out"""
if field == "H":
return observers
return None
src = magpy.misc.CustomSource(field_func=fff)
np.testing.assert_raises(MagpylibMissingInput, src.getB, (1, 2, 3))
np.testing.assert_allclose(src.getH((1, 2, 3)), (1, 2, 3))
def test_input_objects_field_func_bad():
"""bad input: magpy.misc.CustomSource(field_func=f)"""
# pylint: disable=unused-argument
# non callable
np.testing.assert_raises(MagpylibBadUserInput, magpy.misc.CustomSource, 1)
# bad arg names
def ff(fieldd, observers, whatever):
"""ff"""
np.testing.assert_raises(MagpylibBadUserInput, magpy.misc.CustomSource, ff)
# no ndarray return on B
def fff(field, observers):
"""fff"""
if field == "B":
return 1
np.testing.assert_raises(MagpylibBadUserInput, magpy.misc.CustomSource, fff)
# no ndarray return on H
def ffff(field, observers):
"""ffff"""
if field == "H":
return 1
return observers
np.testing.assert_raises(MagpylibBadUserInput, magpy.misc.CustomSource, ffff)
# bad return shape on B
def g(field, observers):
"""g"""
if field == "B":
return np.array([1, 2, 3])
np.testing.assert_raises(MagpylibBadUserInput, magpy.misc.CustomSource, g)
# bad return shape on H
def gg(field, observers):
"""gg"""
if field == "H":
return np.array([1, 2, 3])
return observers
np.testing.assert_raises(MagpylibBadUserInput, magpy.misc.CustomSource, gg)
###########################################################
###########################################################
# DISPLAY
def test_input_show_zoom_bad():
"""bad show zoom inputs"""
x = magpy.Sensor()
bads = [
(1, 2, 3),
-1,
]
for bad in bads:
np.testing.assert_raises(MagpylibBadUserInput, magpy.show, x, zoom=bad)
def test_input_show_animation_bad():
"""bad show animation inputs"""
x = magpy.Sensor()
bads = [
(1, 2, 3),
-1,
]
for bad in bads:
np.testing.assert_raises(MagpylibBadUserInput, magpy.show, x, animation=bad)
def test_input_show_backend_bad():
"""bad show backend inputs"""
x = magpy.Sensor()
bads = [
(1, 2, 3),
-1,
"x",
True,
]
for bad in bads:
np.testing.assert_raises(MagpylibBadUserInput, magpy.show, x, backend=bad)
def test_input_show_missing_parameters1():
"""missing inputs"""
s = magpy.magnet.Cuboid()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.magnet.Cylinder()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.magnet.CylinderSegment()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.magnet.Sphere()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.current.Loop()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.current.Line()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.misc.Dipole()
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
def test_input_show_missing_parameters2():
"""missing inputs"""
s = magpy.magnet.Cuboid(dimension=(1, 2, 3))
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.magnet.Cylinder(dimension=(1, 2))
np.testing.assert_raises(MagpylibMissingInput, magpy.show, s)
s = magpy.magnet.CylinderSegment(dimension=(1, 2, 3, 4, 5))
| np.testing.assert_raises(MagpylibMissingInput, magpy.show, s) | numpy.testing.assert_raises |
#!/usr/bin/env python
# A Global import to make code python 2 and 3 compatible
from __future__ import print_function
def make_graphs(graph_dir, mat_dict, centroids, aparc_names, n_rand=1000): #mat_dict comes from make_corr_matrices.py
'''
A function that makes all the required graphs from the correlation
matrices in mat_dict. These include the full graph with all
connections including weights, and binarized graphs at 30 different
costs betwen 1% to 30%. These graphs are fully connected because the
minimum spanning tree is used before the strongest edges are added
up to the required density.
If the graphs do not already exist they are saved as gpickle files in
graph_dir. If they do exist then they're read in from those files.
In addition, files with values for the nodal topological measures and
global topological measures are created and saved or loaded as
appropriate.
The function requires the centroids and aparc_names values in order
to calculate the nodal measures. The value n_rand is the number of
random graphs to calculate for the global and nodal measure
calculations.
The function returns a dictionary of graphs, nodal measures and
global measures
'''
#==========================================================================
# IMPORTS
#==========================================================================
import os
import networkx as nx
import numpy as np
import pickle
#==========================================================================
# Print to screen what you're up to
#==========================================================================
print ("--------------------------------------------------")
print ("Making or loading graphs")
#==========================================================================
# Create an empty dictionary
#==========================================================================
graph_dict = {}
#==========================================================================
# Loop through all the matrices in mat_dict
#==========================================================================
for k in mat_dict.keys():
print (' {}'.format(k))
# Read in the matrix
M = mat_dict[k]
# Get the covars name
mat_name, covars_name = k.split('_COVARS_')
#-------------------------------------------------------------------------
# Make the full graph first
#-------------------------------------------------------------------------
# Define the graph's file name and its dictionary key
g_filename = os.path.join(graph_dir,
'COVARS_{}'.format(covars_name),
'Graph_{}_COST_100.gpickle'.format(mat_name))
g_key = '{}_COST_100'.format(k)
print (' Loading COST: 100',)
# If it already exists just read it in from the pickled file
if os.path.isfile(g_filename):
graph_dict[g_key] = nx.read_gpickle(g_filename)
# Otherwise you'll have to create it using the graph_at_cost function above
else:
graph_dict[g_key] = full_graph(M)
# Save it as a gpickle file so you don't have to do this next time!
dirname = os.path.dirname(g_filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
nx.write_gpickle(graph_dict[g_key], g_filename)
#-------------------------------------------------------------------------
# Then for all the different costs between 1% and 30%
#-------------------------------------------------------------------------
for cost in [2] + range(5,21,5):
#-------------------------------------------------------------------------
# Define the graph's file name along with those of the the associated
# global and nodal dictionaries
#-------------------------------------------------------------------------
g_filename = os.path.join(graph_dir,
'COVARS_{}'.format(covars_name),
'Graph_{}_COST_{:02.0f}.gpickle'.format(mat_name, cost))
global_dict_filename = os.path.join(graph_dir,
'COVARS_{}'.format(covars_name),
'GlobalDict_{}_COST_{:02.0f}.p'.format(mat_name, cost))
nodal_dict_filename = os.path.join(graph_dir,
'COVARS_{}'.format(covars_name),
'NodalDict_{}_COST_{:02.0f}.p'.format(mat_name, cost))
rich_club_filename = os.path.join(graph_dir,
'COVARS_{}'.format(covars_name),
'RichClub_{}_COST_{:02.0f}.p'.format(mat_name, cost))
g_key = '{}_COST_{:02.0f}'.format(k, cost)
#-------------------------------------------------------------------------
# Make or load the graph
#-------------------------------------------------------------------------
# If the graph already exists just read it in from the pickled file
if os.path.isfile(g_filename):
graph_dict[g_key] = nx.read_gpickle(g_filename)
# Otherwise you'll have to create it using the graph_at_cost function
else:
graph_dict[g_key] = graph_at_cost(M, cost)
# Save it as a gpickle file so you don't have to do this next time!
nx.write_gpickle(graph_dict[g_key], g_filename)
#-------------------------------------------------------------------------
# Make or load the global and nodal measures dictionaries
#-------------------------------------------------------------------------
# If the rich_club measures files already exists just read it
# and the nodal and global measures files in
if os.path.isfile(rich_club_filename):
# Print to screen so you know where you're up to
if cost == 20:
print ('- {:02.0f}'.format(cost))
else:
print ('- {:02.0f}'.format(cost),)
graph_dict['{}_GlobalMeasures'.format(g_key)] = pickle.load(open(global_dict_filename))
graph_dict['{}_NodalMeasures'.format(g_key)] = pickle.load(open(nodal_dict_filename))
graph_dict['{}_RichClub'.format(g_key)] = pickle.load(open(rich_club_filename))
# Otherwise you'll have to create them using the calculate_global_measures
# and calculate_nodal_measures functions
else:
G = graph_dict[g_key]
print ('\n Calculating COST: {:02.0f}'.format(cost))
# You need to calculate the same nodal partition for the global
# and nodal measures
nodal_partition = calc_nodal_partition(G)
# And you'll also want the same list of random graphs
R_list, R_nodal_partition_list = make_random_list(G, n_rand=n_rand)
graph_dict['{}_GlobalMeasures'.format(g_key)] = calculate_global_measures(G,
R_list=R_list,
nodal_partition=nodal_partition,
R_nodal_partition_list=R_nodal_partition_list)
(graph_dict[g_key],
graph_dict['{}_NodalMeasures'.format(g_key)]) = calculate_nodal_measures(G,
centroids,
aparc_names,
nodal_partition=nodal_partition)
graph_dict['{}_RichClub'.format(g_key)] = rich_club(G, R_list=R_list)
# Save them as pickle files so you don't have to do this next time!
pickle.dump(graph_dict['{}_GlobalMeasures'.format(g_key)], open(global_dict_filename, "wb"))
pickle.dump(graph_dict['{}_NodalMeasures'.format(g_key)], open(nodal_dict_filename, "wb"))
pickle.dump(graph_dict['{}_RichClub'.format(g_key)], open(rich_club_filename, "wb"))
nx.write_gpickle(graph_dict[g_key], g_filename)
# Return the full graph dictionary
return graph_dict
def full_graph(M):
'''
Very easy, set the diagonals to 0
and then save the graph
'''
import numpy as np
import networkx as nx
# Make a copy of the matrix
thr_M = np.copy(M)
# Set all diagonal values to 0
thr_M[np.diag_indices_from(thr_M)] = 0
# Read this full matrix into a graph G
G = nx.from_numpy_matrix(thr_M)
return G
def graph_at_cost(M, cost):
'''
A function that first creates the minimum spanning tree
for the graph, and then adds in edges according to their
connection strength up to a particular cost
'''
import numpy as np
import networkx as nx
# Make a copy of the matrix
thr_M = | np.copy(M) | numpy.copy |
from typing import List, Tuple, Dict
import torch
import numpy as np
from data_loader.dataset.pas_dataset import PASDataset
from torch.utils.data import DataLoader
class PASDataLoader(DataLoader):
def __init__(self,
dataset: PASDataset,
batch_size: int,
shuffle: bool,
num_workers: int,
pin_memory: bool,
):
init_kwargs = {
'dataset': dataset,
'batch_size': batch_size,
'shuffle': shuffle,
'collate_fn': broadcast_collate_fn,
'num_workers': num_workers,
'pin_memory': pin_memory,
}
super().__init__(**init_kwargs)
def broadcast_collate_fn(batch: List[Tuple[np.ndarray, ...]]) -> Dict[str, torch.Tensor]:
input_ids, attention_mask, segment_ids, ng_token_mask, target, deps, task, overt_mask = zip(*batch) # Tuple[list]
ng_token_mask = np.broadcast_arrays(*ng_token_mask)
target = np.broadcast_arrays(*target)
deps = np.broadcast_arrays(*deps)
inputs = (input_ids, attention_mask, segment_ids, ng_token_mask, target, deps, task, overt_mask)
labels = ('input_ids', 'attention_mask', 'segment_ids', 'ng_token_mask', 'target', 'deps', 'task', 'overt_mask')
return {label: torch.as_tensor( | np.stack(elem, axis=0) | numpy.stack |
import numpy as np
import matplotlib.pyplot as plt
def gen_perlin(x, y, seed=None):
if seed is not None:
np.random.seed(seed)
p = np.arange(256, dtype=int)
np.random.shuffle(p)
p = np.stack([p,p]).flatten()
x1, y1 = x.astype(int), y.astype(int)
xf = x - x1
yf = y - y1
u = fade(xf)
v = fade(yf)
n00 = gradient(p[p[x1] + y1], xf, yf)
n01 = gradient(p[p[x1] + y1 + 1], xf, yf -1)
n11 = gradient(p[p[x1 + 1] + y1 + 1], xf -1, yf-1)
n10 = gradient(p[p[x1+1] + y1], xf -1, yf)
x1 = lerp(n00, n10, u)
x2 = lerp(n01, n11, u)
return lerp(x1, x2, v)
def lerp(a, b, x):
return a + x * (b-a)
def fade(t):
return 6 * t**5 - 15 * t **4 + 10 *t **3
def gradient(h, x, y):
vectors = | np.array([[0,1], [0,-1], [1,0], [-1,0]]) | numpy.array |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = | N.array([0,0,1]) | numpy.array |
"""
Disctrict Cooling Network Calculations.
Calculate which technologies need to be activated to meet the cooling energy demand and determine the cost and emissions
that result from the activation of these cooling technologies.
"""
import numpy as np
import pandas as pd
from cea.constants import HOURS_IN_YEAR
from cea.optimization.constants import VCC_T_COOL_IN, ACH_T_IN_FROM_CHP_K
from cea.optimization.master import cost_model
from cea.optimization.slave.cooling_resource_activation import calc_vcc_CT_operation, cooling_resource_activator
from cea.technologies.storage_tank_pcm import Storage_tank_PCM
from cea.technologies.chiller_vapor_compression import VaporCompressionChiller
from cea.technologies.cogeneration import calc_cop_CCGT
from cea.technologies.chiller_absorption import AbsorptionChiller
from cea.technologies.supply_systems_database import SupplySystemsDatabase
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def district_cooling_network(locator,
config,
master_to_slave_variables,
network_features,
weather_features):
"""
Computes the parameters for the cooling of the complete DCN, including:
- cost for cooling energy supply
- hourly cooling energy supply
- hourly electricity generation (from trigen) and demand (for VCCs)
- hourly combustion fuel demand (for trigen)
- installed capacity of each cooling technology
:param locator: paths to cea input files and results folders
:param master_to_slave_variables: all the important information on the energy system configuration of an individual
(buildings [connected, non-connected], heating technologies, cooling technologies,
storage etc.)
:param config: configurations of cea
:param network_features: characteristic parameters (pumping energy, mass flow rate, thermal losses & piping cost)
of the district cooling/heating network
:param weather_features: important environmental parameters (e.g. ambient & ground temperature)
:type locator: cea.inputlocator.InputLocator class object
:type master_to_slave_variables: cea.optimization.slave_data.SlaveData class object
:type config: cea.config.Configuration class object
:type network_features: cea.optimization.distribution.network_optimization_features.NetworkOptimizationFeatures
class object
:type weather_features: cea.optimization.preprocessing.preprocessing_main.WeatherFeatures class object
:return district_cooling_costs: costs of all district cooling energy technologies (investment and operational costs
of generation, storage and network)
:return district_cooling_generation_dispatch: hourly thermal energy supply by each component of the district
cooling energy system.
:return district_cooling_electricity_requirements_dispatch: hourly electricity demand of each component of the
district cooling energy generation system.
:return district_cooling_fuel_requirements_dispatch: hourly combustion fuel demand of each component of the
district cooling energy system (i.e. in the current setup only
natural gas demand of the CCGT of the trigeneration system)
:return district_cooling_capacity_installed: capacity of each district-scale cooling technology installed
(corresponding to the given individual)
:rtype district_cooling_costs: dict (27 x float)
:rtype district_cooling_generation_dispatch: dict (15 x 8760-ndarray)
:rtype district_cooling_electricity_requirements_dispatch: dict (6 x 8760-ndarray)
:rtype district_cooling_fuel_requirements_dispatch: dict (1 x 8760-ndarray)
:rtype district_cooling_capacity_installed: dict (9 x float)
"""
if master_to_slave_variables.DCN_exists:
print("DISTRICT COOLING OPERATION")
# THERMAL STORAGE + NETWORK
# Import Temperatures from Network Summary:
Q_thermal_req_W, \
T_district_cooling_return_K, \
T_district_cooling_supply_K, \
mdot_kgpers = calc_network_summary_DCN(master_to_slave_variables)
# Initialize daily storage class
T_ground_K = weather_features.ground_temp
daily_storage = Storage_tank_PCM(activation=master_to_slave_variables.Storage_cooling_on,
size_Wh=master_to_slave_variables.Storage_cooling_size_W,
database_model_parameters= pd.read_excel(locator.get_database_conversion_systems(), sheet_name="TES"),
T_ambient_K = np.average(T_ground_K),
type_storage = config.optimization.cold_storage_type,
debug = master_to_slave_variables.debug
)
# Import Data - cooling energy potential from water bodies
if master_to_slave_variables.WS_BaseVCC_on == 1 or master_to_slave_variables.WS_PeakVCC_on == 1:
water_body_potential = pd.read_csv(locator.get_water_body_potential())
Q_therm_water_body = np.array(water_body_potential['QLake_kW']) * 1E3
total_WS_VCC_installed = master_to_slave_variables.WS_BaseVCC_size_W + \
master_to_slave_variables.WS_PeakVCC_size_W
# TODO: the following line assumes that the thermal energy from the water body is used 1:1 by the VCC.
# i.e. thermal_energy_in = thermal_energy_out for the VCC. Check if this assumption is correct.
Q_therm_water_body_W = [x if x < total_WS_VCC_installed else total_WS_VCC_installed for x in
Q_therm_water_body]
T_source_average_water_body_K = np.array(water_body_potential['Ts_C']) + 273
else:
Q_therm_water_body_W = np.zeros(HOURS_IN_YEAR)
T_source_average_water_body_K = np.zeros(HOURS_IN_YEAR)
# get properties of technology used in this script
absorption_chiller = AbsorptionChiller(
pd.read_excel(locator.get_database_conversion_systems(), sheet_name="Absorption_chiller"), 'double')
CCGT_prop = calc_cop_CCGT(master_to_slave_variables.NG_Trigen_ACH_size_W, ACH_T_IN_FROM_CHP_K, "NG")
VC_chiller = VaporCompressionChiller(locator, scale='DISTRICT')
# initialize variables
Q_Trigen_NG_gen_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_WS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_WS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_AS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_AS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_DailyStorage_content_W = np.zeros(HOURS_IN_YEAR)
Q_DailyStorage_to_storage_W = np.zeros(HOURS_IN_YEAR)
Q_DailyStorage_from_storage_W = np.zeros(HOURS_IN_YEAR)
E_Trigen_NG_gen_W = np.zeros(HOURS_IN_YEAR)
E_BaseVCC_AS_req_W = np.zeros(HOURS_IN_YEAR)
E_PeakVCC_AS_req_W = np.zeros(HOURS_IN_YEAR)
E_BaseVCC_WS_req_W = np.zeros(HOURS_IN_YEAR)
E_PeakVCC_WS_req_W = np.zeros(HOURS_IN_YEAR)
NG_Trigen_req_W = np.zeros(HOURS_IN_YEAR)
Q_BackupVCC_AS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_Trigen_NG_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_WS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_WS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_AS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_AS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_BackupVCC_AS_directload_W = np.zeros(HOURS_IN_YEAR)
for hour in range(HOURS_IN_YEAR): # cooling supply for all buildings excluding cooling loads from data centers
daily_storage.hour = hour
if master_to_slave_variables.debug is True: print("\nHour {:.0f}".format(hour))
if Q_thermal_req_W[hour] > 0.0:
# only if there is a cooling load!
daily_storage, \
thermal_output, \
electricity_output, \
gas_output = cooling_resource_activator(Q_thermal_req_W[hour],
T_district_cooling_supply_K[hour],
T_district_cooling_return_K[hour],
Q_therm_water_body_W[hour],
T_source_average_water_body_K[hour],
T_ground_K[hour],
daily_storage,
absorption_chiller,
VC_chiller,
CCGT_prop,
master_to_slave_variables)
Q_DailyStorage_content_W[hour] = thermal_output['Qc_DailyStorage_content_W']
Q_DailyStorage_to_storage_W[hour] = thermal_output['Qc_DailyStorage_to_storage_W']
Q_DailyStorage_from_storage_W[hour] = thermal_output['Qc_DailyStorage_from_storage_W']
Q_Trigen_NG_gen_directload_W[hour] = thermal_output['Qc_Trigen_NG_gen_directload_W']
Q_BaseVCC_WS_gen_directload_W[hour] = thermal_output['Qc_BaseVCC_WS_gen_directload_W']
Q_PeakVCC_WS_gen_directload_W[hour] = thermal_output['Qc_PeakVCC_WS_gen_directload_W']
Q_BaseVCC_AS_gen_directload_W[hour] = thermal_output['Qc_BaseVCC_AS_gen_directload_W']
Q_PeakVCC_AS_gen_directload_W[hour] = thermal_output['Qc_PeakVCC_AS_gen_directload_W']
Q_BackupVCC_AS_directload_W[hour] = thermal_output['Qc_BackupVCC_AS_directload_W']
Q_Trigen_NG_gen_W[hour] = thermal_output['Qc_Trigen_NG_gen_W']
Q_BaseVCC_WS_gen_W[hour] = thermal_output['Qc_BaseVCC_WS_gen_W']
Q_PeakVCC_WS_gen_W[hour] = thermal_output['Qc_PeakVCC_WS_gen_W']
Q_BaseVCC_AS_gen_W[hour] = thermal_output['Qc_BaseVCC_AS_gen_W']
Q_PeakVCC_AS_gen_W[hour] = thermal_output['Qc_PeakVCC_AS_gen_W']
Q_BackupVCC_AS_gen_W[hour] = thermal_output['Qc_BackupVCC_AS_gen_W']
E_BaseVCC_WS_req_W[hour] = electricity_output['E_BaseVCC_WS_req_W']
E_PeakVCC_WS_req_W[hour] = electricity_output['E_PeakVCC_WS_req_W']
E_BaseVCC_AS_req_W[hour] = electricity_output['E_BaseVCC_AS_req_W']
E_PeakVCC_AS_req_W[hour] = electricity_output['E_PeakVCC_AS_req_W']
E_Trigen_NG_gen_W[hour] = electricity_output['E_Trigen_NG_gen_W']
NG_Trigen_req_W[hour] = gas_output['NG_Trigen_req_W']
# calculate the electrical capacity as a function of the peak produced by the turbine
master_to_slave_variables.NG_Trigen_CCGT_size_electrical_W = E_Trigen_NG_gen_W.max()
# BACK-UPP VCC - AIR SOURCE
master_to_slave_variables.AS_BackupVCC_size_W = np.amax(Q_BackupVCC_AS_gen_W)
size_chiller_CT = master_to_slave_variables.AS_BackupVCC_size_W
if master_to_slave_variables.AS_BackupVCC_size_W != 0.0:
master_to_slave_variables.AS_BackupVCC_on = 1
Q_BackupVCC_AS_gen_W, \
E_BackupVCC_AS_req_W = np.vectorize(calc_vcc_CT_operation)(Q_BackupVCC_AS_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K,
VCC_T_COOL_IN,
size_chiller_CT,
VC_chiller)
else:
E_BackupVCC_AS_req_W = np.zeros(HOURS_IN_YEAR)
# CAPEX (ANNUAL, TOTAL) AND OPEX (FIXED, VAR, ANNUAL) GENERATION UNITS
supply_systems = SupplySystemsDatabase(locator)
mdotnMax_kgpers = np.amax(mdot_kgpers)
performance_costs_generation, \
district_cooling_capacity_installed \
= cost_model.calc_generation_costs_capacity_installed_cooling(locator,
master_to_slave_variables,
supply_systems,
mdotnMax_kgpers
)
# CAPEX (ANNUAL, TOTAL) AND OPEX (FIXED, VAR, ANNUAL) STORAGE UNITS
performance_costs_storage = cost_model.calc_generation_costs_cooling_storage(master_to_slave_variables,
daily_storage
)
# CAPEX (ANNUAL, TOTAL) AND OPEX (FIXED, VAR, ANNUAL) NETWORK
performance_costs_network, \
E_used_district_cooling_network_W = cost_model.calc_network_costs_cooling(locator,
master_to_slave_variables,
network_features,
"DC")
# MERGE COSTS AND EMISSIONS IN ONE FILE
performance = dict(performance_costs_generation, **performance_costs_storage)
district_cooling_costs = dict(performance, **performance_costs_network)
else:
Q_thermal_req_W = np.zeros(HOURS_IN_YEAR)
Q_DailyStorage_from_storage_W = np.zeros(HOURS_IN_YEAR)
Q_DailyStorage_content_W = np.zeros(HOURS_IN_YEAR)
Q_DailyStorage_to_storage_W = np.zeros(HOURS_IN_YEAR)
Q_Trigen_NG_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_WS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_WS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_AS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_AS_gen_directload_W = np.zeros(HOURS_IN_YEAR)
Q_BackupVCC_AS_directload_W = np.zeros(HOURS_IN_YEAR)
Q_Trigen_NG_gen_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_WS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_WS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_BaseVCC_AS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_PeakVCC_AS_gen_W = np.zeros(HOURS_IN_YEAR)
Q_BackupVCC_AS_gen_W = np.zeros(HOURS_IN_YEAR)
E_Trigen_NG_gen_W = np.zeros(HOURS_IN_YEAR)
E_used_district_cooling_network_W = np.zeros(HOURS_IN_YEAR)
E_BaseVCC_WS_req_W = np.zeros(HOURS_IN_YEAR)
E_PeakVCC_WS_req_W = np.zeros(HOURS_IN_YEAR)
E_BaseVCC_AS_req_W = np.zeros(HOURS_IN_YEAR)
E_PeakVCC_AS_req_W = np.zeros(HOURS_IN_YEAR)
E_BackupVCC_AS_req_W = | np.zeros(HOURS_IN_YEAR) | numpy.zeros |
"""
Wrappers around qcodes.utils.dataset.doNd functions that live-plot
data during the sweep.
"""
import re
import sys
import inspect
import functools
import itertools
import numpy as np
from typing import Any, Optional, Union, Tuple, List, Mapping
from dataclasses import dataclass, field
from qcodes import config
from qcodes.dataset.data_set import DataSet
from qcodes.dataset.descriptions.param_spec import ParamSpecBase
from qcodes.instrument.parameter import _BaseParameter
import qcodes.utils.dataset.doNd as doNd
from ..plot import PlotWindow, PlotItem, PlotDataItem, ImageItem, TableWidget
from ..plot.plot_tools import save_figure
from ..logging import get_logger
# Get access to module level variables
this = sys.modules[__name__]
this.current = None
logger = get_logger("tools.doNd")
# Utility functions for parsing parameter names from plot titles
_param = r"(\w+)\s+\([^)]+\)"
_single_id = r"(\d+)(?:-(\d+))?(?:, (?=\d))?"
_id = r"(\(id:\s+(?:\d+(?:-\d+)?(?:, (?=\d))?)+\))"
_id_re = re.compile(_single_id, re.IGNORECASE)
_plot_title_re = re.compile(r"("+_param+r"\s+v\.(?:<br>|\s)+"+_param+r")\s+"+_id, re.MULTILINE|re.IGNORECASE)
_single_param_title_re = re.compile(r"("+_param+r")\s*"+_id, re.MULTILINE)
def _get_window(append, size=(1000, 600)):
"""
Return a handle to a plot window to use for this plot.
If append is False, create a new plot window, otherwise return
a handle to the given window, or the last created window.
Args:
append (Union[bool, PlotWindow]): If true, return the last
created plot window, if PlotWindow, return that window, otherwise
a new window will be created.
size (Tuple[int, int]): The size in px of the new plot window. If append
is not false, this parameter has no effect.
"""
# Set up a plotting window
if append is None or append is False:
win = PlotWindow()
win.win_title = 'ID: '
win.resize(*size)
elif isinstance(append, PlotWindow):
# Append to the given window
win = append
elif isinstance(append, bool):
# Append to the last trace if true
win = PlotWindow.getWindows()[-1]
else:
raise ValueError("Unknown argument to append. Either give a plot window"
" or true to append to the last plot")
return win
def _explode_ids(ids_str: str) -> List[int]:
"""
Explode a list of ids from a plot title into a list of all
ids.
"""
ids = []
for match in _id_re.finditer(ids_str):
start, stop = match.groups()
if stop is None:
ids.append(int(start))
else:
ids.extend(range(int(start), int(stop)+1))
return tuple(ids)
def _reduce_ids(ids: List[int]):
strings = []
i = 1
r = 0
while i < len(ids):
if ids[i] == ids[i-1]+1:
i += 1
else:
if i-1 == r:
strings.append(f"{ids[r]}")
else:
strings.append(f"{ids[r]}-{ids[i-1]}")
r = i
i += 1
if i-1 == r:
strings.append(f"{ids[r]}")
else:
strings.append(f"{ids[r]}-{ids[i-1]}")
return strings
def _parse_title(title) -> Tuple[str, Tuple[str], Tuple[int]]:
match = _plot_title_re.fullmatch(title)
if not match:
# Might be a single title re
match = _single_param_title_re.fullmatch(title)
if not match:
return None
paramstr, param_name, ids = match.groups()
ids = _explode_ids(ids)
return(paramstr, (param_name,), ids)
paramstr, param1_name, param2_name, ids = match.groups()
ids = _explode_ids(ids)
return (paramstr, (param1_name, param2_name), ids)
def _compatible_plot_item(win: PlotWindow,
p_bot: ParamSpecBase,
p_left: Optional[ParamSpecBase] = None) -> Optional[PlotItem]:
"""
Returns a compatible plot item if found
"""
if p_left is not None:
axes = (p_bot.name, p_left.name)
else:
axes = (p_bot.name, )
for item in win.items:
if isinstance(item, PlotItem):
_, params, _ = _parse_title(item.plot_title)
if params == axes:
return item
return None
def _register_subscriber():
"""
Register live plotting in the qcodes config object.
"""
if "qcm" not in config.subscription.subscribers:
logger.info("Registering qcm as a default subscriber")
config.subscription.subscribers["qcm"] = {
'factory': 'qcodes_measurements.tools.doNd.subscriber',
'factory_kwargs': {},
'subscription_kwargs': {
'min_wait': 10,
'min_count': 0,
'callback_kwargs': {}
}
}
config.subscription.default_subscribers.append("qcm")
# Tuple for live plotting
@dataclass(frozen=False)
class LivePlotWindow:
plot_window: Optional[PlotWindow]
stack: bool = False
append: bool = False
dataset: DataSet = None
datacount: Mapping[str, int] = field(default_factory=dict)
table_items: Mapping[str, Union[int, float]] = None
plot_items: Mapping[str, Union[PlotDataItem, ImageItem]] = field(default_factory=dict)
plot_params: List[_BaseParameter] = None
def do_nothing(new_data, data_len, state):
"""
Function that does nothing
"""
return
def update_plots(new_data, data_len, state):
"""
Function that updates plots when live plotting
"""
write_count = this.current.dataset.cache._write_status
# Don't update if we haven't started measuring yet
if not write_count or any(wc == 0 for wc in write_count.values()): return
run_desc = this.current.dataset.description
data_cache = this.current.dataset.cache.data()
params = run_desc.interdeps
shapes = run_desc.shapes
plot_items = this.current.plot_items.items()
table_items = this.current.table_items.items() if this.current.table_items is not None else ()
for param, plotitem in itertools.chain(plot_items, table_items):
# Keep track of how much of the plot we've written, and only update
# parameters that are being measured.
if param not in write_count:
continue
if param not in this.current.datacount:
this.current.datacount[param] = write_count[param]
elif write_count[param] == this.current.datacount[param]:
continue
else:
this.current.datacount[param] = write_count[param]
# Update plots
if shapes[param] == (1,):
val = data_cache[param][param][0]
if isinstance(val, (float, np.float16, np.float32, np.float64)):
val = | np.format_float_scientific(val) | numpy.format_float_scientific |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
def test_symmetric_difference(self, closed):
idx = self.create_index(closed=closed)
result = idx[1:].symmetric_difference(idx[:-1])
expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# test errors
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
with tm.assert_raises_regex(ValueError, msg):
set_op(Index([1, 2, 3]))
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
def test_datetime(self):
dates = date_range('2000', periods=3)
idx = IntervalIndex.from_breaks(dates)
tm.assert_index_equal(idx.left, dates[:2])
tm.assert_index_equal(idx.right, dates[-2:])
expected = date_range('2000-01-01T12:00', periods=2)
tm.assert_index_equal(idx.mid, expected)
assert Timestamp('2000-01-01T12') not in idx
assert Timestamp('2000-01-01T12') not in idx
target = date_range('1999-12-31T12:00', periods=7, freq='12H')
actual = idx.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', overwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
class TestIntervalRange(object):
def test_construction_from_numeric(self, closed, name):
# combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
np.arange(0, 6), name=name, closed=closed)
result = interval_range(start=0, end=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=5, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with freq
expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
name=name, closed=closed)
result = interval_range(start=0, end=6, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=6, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
name=name, closed=closed)
result = interval_range(start=0, end=4, freq=1.5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timestamp(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
breaks = date_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2017-01-08')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with non-fixed freq
freq = 'M'
start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2018-01-15')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timedelta(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timedelta('1 day'), Timedelta('6 days')
breaks = timedelta_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timedelta('1 day'), Timedelta('7 days')
breaks = timedelta_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timedelta('7 days 1 hour')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_constructor_coverage(self):
# float value for periods
expected = pd.interval_range(start=0, periods=10)
result = pd.interval_range(start=0, periods=10.5)
tm.assert_index_equal(result, expected)
# equivalent timestamp-like start/end
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15')
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pydatetime(),
end=end.to_pydatetime())
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timestamp
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1),
DateOffset(days=1)]
for freq in equiv_freq:
result = pd.interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
# equivalent timedelta-like start/end
start, end = Timedelta(days=1), Timedelta(days=10)
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pytimedelta(),
end=end.to_pytimedelta())
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timedelta
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)]
for freq in equiv_freq:
result = pd.interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = ('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0)
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=5)
with tm.assert_raises_regex(ValueError, msg):
interval_range(periods=2)
with tm.assert_raises_regex(ValueError, msg):
interval_range()
# too many params
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0, end=5, periods=6)
# mixed units
msg = 'start, end, freq need to be type compatible'
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=Timestamp('20130101'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=Timedelta('1 day'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'), end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'),
end=Timedelta('1 day'), freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'),
end=Timestamp('20130110'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'), end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'),
end=Timestamp('20130110'), freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'),
end=Timedelta('10 days'), freq=2)
# invalid periods
msg = 'periods must be a number, got foo'
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, periods='foo')
# invalid start
msg = 'start must be numeric or datetime-like, got foo'
with tm.assert_raises_regex(ValueError, msg):
interval_range(start='foo', periods=10)
# invalid end
msg = r'end must be numeric or datetime-like, got \(0, 1\]'
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=Interval(0, 1), periods=10)
# invalid freq for datetime-like
msg = 'freq must be numeric or convertible to DateOffset, got foo'
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0, end=10, freq='foo')
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=Timestamp('20130101'), periods=10, freq='foo')
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=Timedelta('1 day'), periods=10, freq='foo')
class TestIntervalTree(object):
def setup_method(self, method):
gentree = lambda dtype: IntervalTree(np.arange(5, dtype=dtype),
np.arange(5, dtype=dtype) + 2)
self.tree = gentree('int64')
self.trees = {dtype: gentree(dtype)
for dtype in ['int32', 'int64', 'float32', 'float64']}
def test_get_loc(self):
for dtype, tree in self.trees.items():
tm.assert_numpy_array_equal(tree.get_loc(1),
np.array([0], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(tree.get_loc(2)),
np.array([0, 1], dtype='int64'))
with pytest.raises(KeyError):
tree.get_loc(-1)
def test_get_indexer(self):
for dtype, tree in self.trees.items():
tm.assert_numpy_array_equal(
tree.get_indexer(np.array([1.0, 5.5, 6.5])),
np.array([0, 4, -1], dtype='int64'))
with pytest.raises(KeyError):
tree.get_indexer(np.array([3.0]))
def test_get_indexer_non_unique(self):
indexer, missing = self.tree.get_indexer_non_unique(
np.array([1.0, 2.0, 6.5]))
tm.assert_numpy_array_equal(indexer[:1],
np.array([0], dtype='int64'))
tm.assert_numpy_array_equal( | np.sort(indexer[1:3]) | numpy.sort |
from __future__ import print_function, division
from warnings import warn
import pandas as pd
import numpy as np
import pickle
import copy
from nilmtk.utils import find_nearest
from nilmtk.feature_detectors import cluster
from nilmtk.disaggregate import Disaggregator
from nilmtk.datastore import HDFDataStore
# Fix the seed for repeatability of experiments
SEED = 42
np.random.seed(SEED)
class CombinatorialOptimisation(Disaggregator):
"""1 dimensional combinatorial optimisation NILM algorithm.
Attributes
----------
model : list of dicts
Each dict has these keys:
states : list of ints (the power (Watts) used in different states)
training_metadata : ElecMeter or MeterGroup object used for training
this set of states. We need this information because we
need the appliance type (and perhaps some other metadata)
for each model.
state_combinations : 2D array
Each column is an appliance.
Each row is a possible combination of power demand values e.g.
[[0, 0, 0, 0],
[0, 0, 0, 100],
[0, 0, 50, 0],
[0, 0, 50, 100], ...]
MIN_CHUNK_LENGTH : int
"""
def __init__(self):
self.model = []
self.state_combinations = None
self.MIN_CHUNK_LENGTH = 100
self.MODEL_NAME = 'CO'
def train(self, metergroup, num_states_dict=None, **load_kwargs):
"""Train using 1D CO. Places the learnt model in the `model` attribute.
Parameters
----------
metergroup : a nilmtk.MeterGroup object
num_states_dict : dict
**load_kwargs : keyword arguments passed to `meter.power_series()`
Notes
-----
* only uses first chunk for each meter (TODO: handle all chunks).
"""
if num_states_dict is None:
num_states_dict = {}
if self.model:
raise RuntimeError(
"This implementation of Combinatorial Optimisation"
" does not support multiple calls to `train`.")
num_meters = len(metergroup.meters)
if num_meters > 12:
max_num_clusters = 2
else:
max_num_clusters = 3
for i, meter in enumerate(metergroup.submeters().meters):
print("Training model for submeter '{}'".format(meter))
power_series = meter.power_series(**load_kwargs)
chunk = next(power_series)
num_total_states = num_states_dict.get(meter)
if num_total_states is not None:
num_on_states = num_total_states - 1
else:
num_on_states = None
self.train_on_chunk(chunk, meter, max_num_clusters, num_on_states)
# Check to see if there are any more chunks.
# TODO handle multiple chunks per appliance.
try:
next(power_series)
except StopIteration:
pass
else:
warn("The current implementation of CombinatorialOptimisation"
" can only handle a single chunk. But there are multiple"
" chunks available. So have only trained on the"
" first chunk!")
print("Done training!")
def train_on_chunk(self, chunk, meter, max_num_clusters, num_on_states):
# Check if we've already trained on this meter
meters_in_model = [d['training_metadata'] for d in self.model]
if meter in meters_in_model:
raise RuntimeError(
"Meter {} is already in model!"
" Can't train twice on the same meter!"
.format(meter))
states = cluster(chunk, max_num_clusters, num_on_states)
self.model.append({
'states': states,
'training_metadata': meter})
def _set_state_combinations_if_necessary(self):
"""Get centroids"""
# If we import sklearn at the top of the file then auto doc fails.
if (self.state_combinations is None or
self.state_combinations.shape[1] != len(self.model)):
from sklearn.utils.extmath import cartesian
centroids = [model['states'] for model in self.model]
self.state_combinations = cartesian(centroids)
def disaggregate(self, mains, output_datastore,**load_kwargs):
'''Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : nilmtk.ElecMeter or nilmtk.MeterGroup
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
sample_period : number, optional
The desired sample period in seconds. Set to 60 by default.
sections : TimeFrameGroup, optional
Set to mains.good_sections() by default.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
'''
load_kwargs = self._pre_disaggregation_checks(load_kwargs)
load_kwargs.setdefault('sample_period', 60)
load_kwargs.setdefault('sections', mains.good_sections())
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = building_path + '/elec/meter1'
data_is_available = False
for chunk in mains.power_series(**load_kwargs):
# Check that chunk is sensible size
if len(chunk) < self.MIN_CHUNK_LENGTH:
continue
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
appliance_powers = self.disaggregate_chunk(chunk)
for i, model in enumerate(self.model):
appliance_power = appliance_powers.iloc[:, i]
if len(appliance_power) == 0:
continue
data_is_available = True
cols = pd.MultiIndex.from_tuples([chunk.name])
meter_instance = model['training_metadata'].instance()
df = pd.DataFrame(
appliance_power.values, index=appliance_power.index,
columns=cols)
key = '{}/elec/meter{}'.format(building_path, meter_instance)
output_datastore.append(key, df)
# Copy mains data to disag output
mains_df = pd.DataFrame(chunk, columns=cols)
output_datastore.append(key=mains_data_location, value=mains_df)
if data_is_available:
self._save_metadata_for_disaggregation(
output_datastore=output_datastore,
sample_period=load_kwargs['sample_period'],
measurement=measurement,
timeframes=timeframes,
building=mains.building(),
meters=[d['training_metadata'] for d in self.model]
)
def disaggregate_chunk(self, mains):
"""In-memory disaggregation.
Parameters
----------
mains : pd.Series
Returns
-------
appliance_powers : pd.DataFrame where each column represents a
disaggregated appliance. Column names are the integer index
into `self.model` for the appliance in question.
"""
if not self.model:
raise RuntimeError(
"The model needs to be instantiated before"
" calling `disaggregate`. The model"
" can be instantiated by running `train`.")
if len(mains) < self.MIN_CHUNK_LENGTH:
raise RuntimeError("Chunk is too short.")
# Because CombinatorialOptimisation could have been trained using
# either train() or train_on_chunk(), we must
# set state_combinations here.
self._set_state_combinations_if_necessary()
"""
# Add vampire power to the model
if vampire_power is None:
vampire_power = get_vampire_power(mains)
if vampire_power > 0:
print("Including vampire_power = {} watts to model..."
.format(vampire_power))
n_rows = self.state_combinations.shape[0]
vampire_power_array = np.zeros((n_rows, 1)) + vampire_power
state_combinations = np.hstack(
(self.state_combinations, vampire_power_array))
else:
state_combinations = self.state_combinations
"""
state_combinations = self.state_combinations
summed_power_of_each_combination = | np.sum(state_combinations, axis=1) | numpy.sum |
# !/usr/bin/env python
"""
JADE.py
Description: the implemention of modified JADE
Refrence paper:
<NAME>, and <NAME>.
"JADE: adaptive differential evolution with optional external archive."
Member variables:
Name: JADE
FERuntime: time for fitness evaluation
FENum: number of fitness evaluation
runtime: time for whole algorithm
optimalX: optimal solution for problem
optimalY: optimal value for problem
convergeCurve: the procedure of convergence
convergeCurveInterval: inverval between two saved points
muF: the initial mean of F distribution
muCR: the initial mean of CR distribution
p: top p inidividuals
c: postive constant for muCR
Member function:
setParameters(weight, learningRate): setting parameters
optimize(cfp, ap, printLog): the main process of optimization
cfp: config for continue function parameters
ap: config for algorithm parameters
printLog: determine whether to print Log after opitmization
(true default)
Example:
agent = JADE()
agent.optimize(cfp, ap, printLog=True) # cfp ap need to config at first
"""
from function import continueFunction as cF
import numpy as np
import time
import sys
import copy
__all__ = ['JADE']
class JADE:
def __init__(self):
self.Name = "JADE"
self.FERuntime = 0
self.FENum = 0
self.setParameters()
# setting muF, muCR, p, c
def setParameters(self, muF=0.5, muCR=0.5, p=0.05, c=0.1):
self.muF = muF
self.muCR = muCR
self.p = p
self.c = c
def optimize(self, cfp, ap, printLog=True):
runtimeStart = time.clock()
self.__mainLoop(cfp, ap, printLog)
self.runtime = time.clock() - runtimeStart
def __mainLoop(self, cfp, ap, printLog):
np.random.seed(ap.initialSeed)
popSize = ap.populationSize
Dim = cfp.funcDim
function = getattr(cF, cfp.funcName)
lowerBoundX = np.kron(np.ones((popSize, 1)), cfp.funcLowerBound)
upperBoundX = np.kron(np.ones((popSize, 1)), cfp.funcUpperBound)
lowerInitBoundX = np.kron(
| np.ones((popSize, 1)) | numpy.ones |
from __future__ import absolute_import, division, print_function
import os
import math
import matplotlib.pyplot as plt
import numpy as np
import random
import sys
import battleship_utils_py as bscpp
from pydrake.all import (AutoDiffXd,
GurobiSolver,
RigidBodyTree,
RigidBody)
from pydrake.solvers import ik
from pydrake.multibody.joints import PrismaticJoint, RevoluteJoint
from pydrake.multibody.shapes import Box, VisualElement
from pydrake.multibody.collision import CollisionElement
from underactuated import PlanarRigidBodyVisualizer
def nullspace(A, atol=1e-13, rtol=0):
"""Compute an approximate basis for the nullspace of A.
The algorithm used by this function is based on the singular value
decomposition of `A`.
Parameters
----------
A : ndarray
A should be at most 2-D. A 1-D array with length k will be treated
as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than `atol` are considered to be zero.
rtol : float
The relative tolerance. Singular values less than rtol*smax are
considered to be zero, where smax is the largest singular value.
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than `tol` are considered to be zero.
Return value
------------
ns : ndarray
If `A` is an array with shape (m, k), then `ns` will be an array
with shape (k, n), where n is the estimated dimension of the
nullspace of `A`. The columns of `ns` are a basis for the
nullspace; each element in numpy.dot(A, ns) will be approximately
zero.
"""
A = np.atleast_2d(A)
u, s, vh = np.linalg.svd(A)
tol = max(atol, rtol * s[0])
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
return ns
class BattleshipBoardVisualizer(PlanarRigidBodyVisualizer):
def __init__(self, width, height, *args, **kwargs):
PlanarRigidBodyVisualizer.__init__(self, *args, **kwargs)
self.width = width
self.height = height
self.ax.autoscale(enable=False, axis='both')
self.ax.set_aspect('equal', adjustable='box')
self.ax.axis('on')
#self.ax.set_aspect('equal', 'datalim')
self.ax.set_xticks(range(0, self.width+1))
self.ax.set_xticks(np.arange(-0.5, self.width, 1.0), minor=True)
self.ax.set_yticks(range(0, self.height+1))
self.ax.set_yticks(np.arange(-0.5, self.height, 1.0), minor=True)
self.ax.grid(which="major", color="b", linestyle="--")
self.ax.grid(which="minor", color="b", linestyle="-")
self.ax.set_xlim([-1, self.width+1])
self.ax.set_ylim([-1, self.height+1])
def draw(self, context):
PlanarRigidBodyVisualizer.draw(self, context)
def draw_board_state(ax, rbt, q, board_width, board_height):
Tview = np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.]])
viz = BattleshipBoardVisualizer(
board_width, board_height, rbt, Tview, xlim=[0., board_width],
ylim=[0., board_height], ax=ax)
viz.draw(q)
return viz
def spawn_rbt(width, height, max_length, N, random_length=True):
rbt = RigidBodyTree()
q0 = np.zeros(N*3)
world_body = rbt.get_body(0)
color_generator = iter(plt.cm.rainbow(np.linspace(0, 1, N+1)))
for i in range(N):
q0[i*3+0] = np.random.uniform(0., width)
# q0[i*3+1] = height / 2.
q0[i*3+1] = np.random.uniform(0., height)
q0[i*3+2] = np.random.uniform(0., math.pi*2.)
color = next(color_generator)
if random_length:
length = random.randrange(1, max_length + 1)
else:
length = max_length
joint_link_x = RigidBody()
joint_link_x.set_name("ship_%d_joint_link_x" % i)
joint_link_x.add_joint(
world_body, PrismaticJoint("x", np.eye(4), np.array([1., 0., 0.])))
rbt.add_rigid_body(joint_link_x)
joint_link_y = RigidBody()
joint_link_y.set_name("ship_%d_joint_link_y" % i)
joint_link_y.add_joint(
joint_link_x, PrismaticJoint("y", np.eye(4), np.array([0., 1., 0.])))
rbt.add_rigid_body(joint_link_y)
ship_link = RigidBody()
ship_link.set_name("ship_%d_ship_link" % i)
ship_link.add_joint(
joint_link_y, RevoluteJoint("theta", np.eye(4), np.array([0., 0., 1.])))
boxElement = Box([1.0, length, 1.0])
boxVisualElement = VisualElement(boxElement, np.eye(4), color)
ship_link.AddVisualElement(boxVisualElement)
# necessary so this last link isn't pruned by the rbt.compile() call
ship_link.set_spatial_inertia(np.eye(6))
# get welded
rbt.add_rigid_body(ship_link)
boxCollisionElement = CollisionElement(boxElement, np.eye(4))
boxCollisionElement.set_body(ship_link)
rbt.addCollisionElement(boxCollisionElement, ship_link, "default")
# Add wall visual + collision elements
wall_color = next(color_generator)
wallWidthElement = Box([width+2, 100.0, 1.0])
tf = np.eye(4)
tf[0, 3] = width/2.
tf[1, 3] = -50
world_body.AddVisualElement(VisualElement(
wallWidthElement, tf, wall_color))
wall_ny_collision = CollisionElement(wallWidthElement, tf)
wall_ny_collision.set_body(world_body)
rbt.addCollisionElement(wall_ny_collision, world_body, "default")
tf[0, 3] = width/2.
tf[1, 3] = height+50
world_body.AddVisualElement(VisualElement(
wallWidthElement, tf, wall_color))
wall_py_collision = CollisionElement(wallWidthElement, tf)
wall_py_collision.set_body(world_body)
rbt.addCollisionElement(wall_py_collision, world_body, "default")
wallHeightElement = Box([100.0, height+2, 1.0])
tf = np.eye(4)
tf[0, 3] = -50
tf[1, 3] = height/2.
world_body.AddVisualElement(VisualElement(
wallHeightElement, tf, wall_color))
wall_nx_collision = CollisionElement(wallHeightElement, tf)
wall_nx_collision.set_body(world_body)
rbt.addCollisionElement(wall_nx_collision, world_body, "default")
tf = np.eye(4)
tf[0, 3] = width+50
tf[1, 3] = height/2.
world_body.AddVisualElement(VisualElement(
wallHeightElement, tf, wall_color))
wall_px_collision = CollisionElement(wallHeightElement, tf)
wall_px_collision.set_body(world_body)
rbt.addCollisionElement(wall_px_collision, world_body, "default")
rbt.compile()
return rbt, q0
def projectToFeasibilityWithIK(rbt, q0, board_width, board_height):
constraints = []
constraints.append(ik.MinDistanceConstraint(
model=rbt, min_distance=0.01, active_bodies_idx=[],
active_group_names=set()))
for body_i in range(rbt.get_num_bodies()-1):
# All corners on body must be inside of the
# bounds of the board
body = rbt.get_body(body_i+1)
visual_elements = body.get_visual_elements()
if len(visual_elements) > 0:
points = visual_elements[0].getGeometry().getPoints()
lb = np.tile(np.array([0., 0., -100.]), (points.shape[1], 1)).T
ub = np.tile(np.array([board_width, board_height, 100.]),
(points.shape[1], 1)).T
constraints.append(ik.WorldPositionConstraint(
rbt, body_i+1, points, lb, ub))
options = ik.IKoptions(rbt)
options.setDebug(True)
options.setMajorIterationsLimit(10000)
options.setIterationsLimit(100000)
results = ik.InverseKin(
rbt, q0, q0, constraints, options)
qf = results.q_sol[0]
info = results.info[0]
dqf_dq0 = np.eye(qf.shape[0])
#if info != 1:
# print("Warning: returned info = %d != 1" % info)
if True or info == 1 or info == 100:
# We've solved an NLP of the form:
# qf = argmin_q || q - q_0 ||
# s.t. phi(q) >= 0
#
# which projects q_0 into the feasible set $phi(q) >= 0$.
# We want to return the gradient of qf w.r.t. q_0.
# We'll tackle an approximation of this (which isn't perfect,
# but is a start):
# We'll build a linear approximation of the active set at
# the optimal value, and project the incremental dq_0 into
# the null space of this active set.
# These vectors all point in directions that would
# bring q off of the constraint surfaces.
constraint_violation_directions = []
cache = rbt.doKinematics(qf)
for i, constraint in enumerate(constraints):
c, dc = constraint.eval(0, cache)
lb, ub = constraint.bounds(0)
phi_lb = c - lb
phi_ub = ub - c
for k in range(c.shape[0]):
if phi_lb[k] < -1E-6 or phi_ub[k] < -1E-6:
print("Bounds violation detected, "
"solution wasn't feasible")
print("%f <= %f <= %f" % (lb[k], c[k], ub[k]))
print("Constraint type ", type(constraint))
return qf, info, dqf_dq0
if phi_lb[k] < 1E-6:
# Not allowed to go down
constraint_violation_directions.append(-dc[k, :])
# If ub = lb and ub is active, then lb is also active,
# and we don't need to double-add this vector.
if phi_ub[k] < 1E-6 and phi_ub[k] != phi_lb[k]:
# Not allowed to go up
constraint_violation_directions.append(dc[k, :])
# Build a full matrix C(q0_new - qstar) = 0
# that approximates the feasible set.
if len(constraint_violation_directions) > 0:
C = np.vstack(constraint_violation_directions)
ns = nullspace(C)
dqf_dq0 = np.eye(qf.shape[0])
dqf_dq0 = np.dot(np.dot(dqf_dq0, ns), ns.T)
else:
# No null space so movements
dqf_dq0 = np.eye(qf.shape[0])
return qf, info, dqf_dq0
def projectToFeasibilityWithNLP(rbt, q0, board_width, board_height):
# More generic than above... instead of using IK to quickly
# assembly the nlp solve that goes to snopt, build it ourselves.
# (Gives us lower-level control at the cost of complexity.)
print("TODO, I think this requires a few new drake bindings"
" for generic nonlinear constraints")
if __name__ == "__main__":
np.set_printoptions(precision=4, suppress=True)
os.system("mkdir -p figs")
fig, (ax1, ax2) = plt.subplots(1, 2)
scatter_fig, scatter_ax = plt.subplots(1, 1)
board_width = 10
board_height = 10
for i in range(20):
info_0 = -1
while info_0 != 1:
ax1.clear()
ax2.clear()
rbt, q0 = spawn_rbt(board_width, board_height, 5, 5)
draw_board_state(ax1, rbt, q0, board_width, board_height)
q_sol_0, info_0, dqf_dq0_0 = \
projectToFeasibilityWithIK(rbt, q0, board_width, board_height)
error_pairs = []
for j in range(50):
info = -1
while info != 1:
noise = np.random.normal(loc=0.0, scale=0.1/(j+1),
size=q0.shape)
q_sol, info, dqf_dq0 = \
projectToFeasibilityWithIK(
rbt, q0+noise, board_width, board_height)
# Did our linearization predict this solution very well?
expected_q_sol_new = np.dot(dqf_dq0_0, noise) + q_sol_0
est_error = np.linalg.norm(expected_q_sol_new - q_sol)
ini_error = np.linalg.norm(noise)
print("\nError in estimate: ", est_error)
print("Error in initial: ", ini_error)
error_pairs.append([ini_error, est_error])
draw_board_state(ax2, rbt, q_sol, board_height, board_width)
plt.draw()
plt.pause(1e-6)
fig.savefig('figs/plot_run_%d_ik.png' % i)
all_error_pairs = np.vstack(error_pairs).T
scatter_ax.clear()
scatter_ax.scatter(all_error_pairs[0, :], all_error_pairs[1, :])
scatter_ax.plot([-10.0, 10.0], [-10.0, 10.0], '--')
scatter_ax.set_xlim([0., 1.1*np.max(all_error_pairs[0, :])])
scatter_ax.set_ylim([0., 1.1* | np.max(all_error_pairs[1, :]) | numpy.max |
# -*- coding: utf-8 -*_
"""
fbe.py
=========================================================================
FBE module provides several utilities and signal parametrization methods.
"""
__author__ = '<NAME>'
import spectrum
import numpy as np
from typing import Tuple
from scipy.io import wavfile
import scipy.signal
import os
import soundfile as sf
class sin2cos2:
"""
Class for computing signal windowing function with sin(x)^2 and cos(x)^2 tails.
:param frame: the frame length in samples
:type frame: int
:param overlap: the size of the overlaping part of the window (the length of the tails on both sides)
:type overlap: int
:return: nothing
"""
def __init__(self, frame : int = 512, overlap : int = 50):
self._win = np.zeros((frame,))
self._frame = frame
self._overlap = overlap
self._compute_window()
def _compute_window(self):
for i in range(self._overlap):
self._win[i] = np.sin(2*np.pi/(4*(self._overlap+2))*(i+1))**2
for i in range(self._overlap,self._frame-self._overlap):
self._win[i] = 1
for i in range(self._frame-self._overlap,self._frame):
self._win[i] = np.cos(2*np.pi/(4*(self._overlap+2))*(i-self._frame+self._overlap+1))**2
def window(self):
"""
Method returning the vector of window's values.
:return: the window
:rtype: numpy array of length frame
"""
return self._win
class fbe:
"""
Versatile class computing various speech signal representations, mostly based on AR modelling and Mel Frequency
Filterbanks.
:param frame_zero_adding: required length of the sequence after zero adding operation, defaults to None, which indicates no zero adding
:type frame_zero_adding: int
:param frame: frame length in samples
:type frame: int
:param sr: sampling frequency in Hz
:type sr: float
:param preem_alfa: the preemphasis coefficient
:type preem_alfa: float
:param freq_range: frequency range in which the mel frequency filterbanks should be computed
:type freq_range: np.ndarray two elemements vector of floats
:param filts_num: number of mel frequency triangular filters in the filterbank
:type filts_num: int
:param window: the windowing function
:type window: np.ndarray, numpy vector of floats, defaults to None, which causes using of rectangular window
:param ar_order: the AR model order
:type ar_order: int
:param cepstral_lifter: the cepstral lifter in MFCC computation
:type cepstral_lifter: int
:param num_ceps: number of cepstra
:type num_ceps: int
:returns: nothing
.. note:: PSD is abbreviation for power spectral density in the whole documentation. AR is abbreviation for
autoregressive in the whole documentation.
"""
def __init__(self, frame_zero_adding=None, frame=512, sr=16000, preem_alfa=0.95, overlap=0,
freq_range=[20., 8000.], filts_num=23, num_gfs=70, spl_of_max_amplitude=88,
window=None, ar_order=16, cepstral_lifter=22, num_ceps=13):
if overlap==0 or overlap > frame/2:
overlap = frame/2
if window is None:
window = np.ones((frame,))
if frame != len(window):
print("ERROR in fbe, frame and window lengths do not match, program exits ...")
sys.exit(1)
self.sr = sr # sampling frequency in Hz
self.frame = frame # number of samples in the frame
self.num_ceps = num_ceps
if not frame_zero_adding is None:
self._nfft = frame_zero_adding # fft length, sets the self._nfft atribute
else:
self._nfft = frame
self.preem_alfa = preem_alfa # preemphasis coefficient
self.freq_range = freq_range # frequency range in Hz
self.filts_num = filts_num # number of triangular filterbank channels
self.K = int(self._nfft / 2.) + 1 # length of the unique part of the FFT
self.f_min = 0
self.f_max = float(sr) / 2.
self.f_low = self.freq_range[0]
self.f_high = self.freq_range[1]
# matrices
self._tfb = self._tfb() # compute the mel-frequency triangular filterbank, sets the H atribute
self._pinv_tfb = self._pinv_tfb()
self._wgh_mat = self._wgh_mat()
self._inv_wgh_mat = self._inv_wgh_mat()
# window
self._window = window
self._ar_order = ar_order
# compute cepstral lifter
L = cepstral_lifter
N = num_ceps
self.cepstral_lifter = 1+0.5*L*np.sin(np.pi*np.asarray(range(N))/float(L))
# dct matrix
self.dctmat = np.zeros((self.num_ceps,self.filts_num))
for i in range(self.num_ceps):
for j in range(self.filts_num):
self.dctmat[i,j] = np.sqrt(2./self.filts_num) * np.cos(np.pi*i/self.filts_num*(j+.5))
self.lst_elm = ['fr','frwin','fft','mag','ang','psd','senmatpsd','senpsd','lpc','var_lpc','armag','arpsd','fbe',\
'fbekaldi','arfbe','wgh','arwgh','sfbe','sarfbe','smag','spsd','sarmag','sarpsd','sfbewgh',\
'smagwgh','spsdwgh','senmatspsdwgh','senspsdwgh','sarfbewgh','sarpsdwgh','psdspl']
self.results = {}
self._reset()
def _reset(self):
"""
Resets the cache.
"""
for e in self.lst_elm:
self.results[e] = None
def get_frame_len(self) -> int:
"""Returns the frame length in samples
:return: the frame length
:rtype: int
"""
return self.frame
def get_tfb(self) -> np.ndarray:
"""Gets the triangular mel frequency filterbank.
:return: the filter matrix containing in each row a single filter
:rtype: np.ndarray, numpy array with filts_num rows
"""
return self._tfb
def get_wgh(self) -> np.ndarray:
"""Gets the weighting matrix, which is a square of the product of pseudo inverses of the Jacobian of the linear
magnitude spectrum filter banks transform.
:return: the weighting matrix
:rtype: numpy array with dimension filts_num x filts_num
"""
return self._wgh_mat
def get_inv_wgh(self) -> np.ndarray:
"""
Gets pseudo inverse of the weighting matrix.
:returns: the pseudo inverse of the weighting matrix
:rtype: np.ndarray, numpy array with dimension filts_num x filts_num
"""
return self._inv_wgh_mat
def get_pinv_tfb(self) -> np.ndarray:
"""
Gets the pseudoinverse of the filterbanks matrix.
:returns: the pseudo inverse of the weighting matrix
:rtype: np.ndarray, numpy array with dimension filts_num x filts_num
"""
return self._pinv_tfb
def window(self) -> np.ndarray:
"""
Gets the signal windowing function.
:returns: the windowing function
:rtype: np.ndarray, numpy array with dimension 1 x frame
"""
return self._window
def _tfb(self):
"""
Computes the mel frequency triangular filterbank.
"""
# filter cutoff frequencies (Hz) for all filters, size 1x(M+2)
aux = np.linspace(0, self.filts_num + 1, self.filts_num + 2)
c = self._mel2hz(
(self._hz2mel(self.f_low) + aux * (self._hz2mel(self.f_high) - self._hz2mel(self.f_low)) / float(self.filts_num + 1)))
f = np.linspace(self.f_min, self.f_max, self.K)
H = np.zeros((self.filts_num, self.K))
for m in range(self.filts_num):
a = list(f >= c[m])
b = list(f <= c[m + 1])
k = np.array([a[i] and b[i] for i in range(len(f))])
H[m, k] = (f[k] - c[m]) / (c[m + 1] - c[m])
a = list(f >= c[m + 1])
b = list(f <= c[m + 2])
k = np.array([a[i] and b[i] for i in range(len(f))])
H[m, k] = (c[m + 2] - f[k]) / (c[m + 2] - c[m + 1])
return H
def _proj_symmat_pd(self,A : np.ndarray,rcond : float) -> np.ndarray:
"""Projecting matrix A onto space of positive definite matrices.
:param A: matrix to be projected
:type A: np.ndarray
:param rcond: reciprocal condition number of the resulting matrix
:type rcond: float
:return: the projected matrix
:rtype: np.ndarray
"""
A = .5*(A.T+A)
w, v = np.linalg.eigh(A)
w[w<rcond*np.max(w)] = rcond*np.max(w)
f = (np.sqrt(w) * v).T
f = f.T.dot(f)
return f
def _wgh_mat(self):
"""
The weighting matrix.
"""
W = np.dot(self._pinv_tfb.T, self._pinv_tfb)
W = self._proj_symmat_pd(W,10.e-3)
f = np.linalg.cholesky(W).T
return f
def _inv_wgh_mat(self):
"""
The inverse of the weighting matrix
"""
return np.linalg.pinv(self._wgh_mat)
def _pinv_tfb(self):
"""
The pseudoinverse of the mel frequency triangular filter banks matrix
"""
return np.linalg.pinv(self._tfb)
def _nextpow2(self, i):
"""
The next nearest power of 2.
"""
n = 1
while n < i: n *= 2
self._nfft = n
def _hz2mel(self, hz):
"""
Hertz to mel frequency scale
"""
mel = 1127. * np.log(1 + hz / 700.)
return mel
def _mel2hz(self, mel):
"""
Mel to frequency scale
"""
hz = 700. * np.exp(mel / 1127.) - 700.
return hz
def idx2freq(self, i : int) -> float:
"""Converts frequency index to frequency in Hz
:param i: frequency index
:type i: int
:return: frequency in Hz
:rtype: float
"""
f = float(i)/self._nfft*self.sr
return f
def freq2idx(self,f : float) -> int:
"""Converts frequency in Hz to the frequency index
:param f: frequency in Herz
:type f: float
:return: frequency index
:rtype: int
"""
idx = int(np.round(f*float(self._nfft)/self.sr))
return idx
def set_frm(self,fr : np.ndarray):
"""
Sets the frame of the signal - this is then used to compute the all signal representations
:param fr: signal frame
:type fr: np.ndarray, numpy vector of floats
:return: nothing
"""
self._reset()
self.results['fr'] = fr
def set_wgh(self,wgh : np.ndarray):
"""
Set compact spectrum
:param wgh: the compact specturm with filt_num elements
:type wgh: numpy vector of floats
:returns: nothing
"""
self._reset()
self.results['wgh'] = wgh
def set_arwgh(self,arwgh : np.ndarray):
"""
Set AR compact spectrum
:param arwgh: the compact autoregresive specturm with filt_num elements
:type arwgh: np.ndarray, numpy vector of floats
:returns: nothing
"""
self._reset()
self.results['arwgh'] = arwgh
def set_fbe(self,fbe : np.ndarray):
"""
Set filterbank energies
:param fbe: the filter bank energies (vector with filt_num elements)
:type fbe: np.ndarray, numpy vector of floats
:returns: nothing
"""
self._reset()
self.results['fbe'] = fbe
def set_mag(self,mag : np.ndarray):
"""Set magnitude spectrum
:param mag: the magnitude spectrum
:type mag: np.ndarray, numpy vector of floats
:returns: nothing
"""
self._reset()
self.results['mag'] = mag
def set_psd(self,psd : np.ndarray):
"""
Set power density spectrum
:param psd: the power density spectrum
:type psd: np.ndarray, numpy vector of floats
:returns: nothing
"""
self._reset()
self.results['psd'] = psd
def fr(self) -> np.ndarray:
"""
Gets frame
:returns: the frame
:rtype: np.ndarray, numpy vector of floats
"""
if self.results['fr'] is None:
print("Frame not given (emtpy vector), program exits ...")
sys.exit(1)
else:
return self.results['fr']
def fr_win(self) -> np.ndarray:
"""
Gets windowed frame
:returns: the windowed frame
:rtype: np.ndarray, numpy vector of floats
"""
if self.results['frwin'] is None:
self.results['frwin'] = np.zeros((self._nfft,))
self.results['frwin'][:self.frame] = self.fr() * self.window()
else:
pass
return self.results['frwin']
def fft(self) -> np.ndarray:
"""
Gets FFT
:returns: the fft of the, possibly zero added, signal frame
:rtype: np.ndarray, numpy vector of complex floats
"""
if self.results['fft'] is None:
self.results['fft'] = np.fft.fft(self.fr_win())
else:
pass
return self.results['fft']
def mag(self) -> np.ndarray:
"""
Gets magnitude spectrum
:returns: the magnitude of the, possibly zero added, signal frame
:rtype: np.ndarray, numpy vector of floats
"""
if self.results['mag'] is None:
self.results['mag'] = np.abs(self.fft())[ : self.K]
else:
pass
return self.results['mag']
def ang(self) -> np.ndarray:
"""
Gets angular spectrum.
:returns: the angular spectrum of the, possibly zero added, signal frame
:rtype: np.ndarray, numpy vector of floats
"""
if self.results['ang'] is None:
self.results['ang'] = np.angle( self.fft() )
else:
pass
return self.results['ang']
def psd(self) -> np.ndarray:
"""
Gets power density spectrum
:returns: the PSD of the, possibly zero added, signal frame
:rtype: np.ndarray, numpy vector of floats
"""
if self.results['psd'] is None:
self.results['psd'] = self.mag()**2.
else:
pass
return self.results['psd']
def lpc(self) -> np.ndarray:
"""
Gets LPC coefficients aka STP coefficients or AR coefficients
:return: LPC with the leading 1
:rtype: np.ndarray
"""
if self.results['lpc'] is None:
_lpc, self.results['var_lpc'], k = spectrum.aryule(self.fr_win(), self._ar_order)
self.results['lpc'] = np.concatenate((np.array([1]),_lpc))
else:
pass
return self.results['lpc']
def var_lpc(self) -> np.ndarray:
"""
Gets variance of the short term residual spectrum
:return: short term residual variance
:rtype: np.ndarray
"""
if self.results['var_lpc'] is None:
self.results['lpc'], self.results['var_lpc'], k = spectrum.aryule(self.fr_win(), self._ar_order)
else:
pass
return self.results['var_lpc']
def set_ar(self,a,var):
"""Setting AR coefficients and STP residual variance
:param a: AR coefficients with leading one
:param var: variance of the short term residual
"""
"""Sets the AR coefficients"""
self._reset()
self.results['var_lpc'] = var
self.results['lpc'] = a
def armag(self) -> np.ndarray:
"""
Gets AR magnitude spectrum
:return: AR magnitude spectrum
:rtype: np.ndarray, numpy vector of floats of length _nfft/2+1
"""
if self.results['armag'] is None:
p = len(self.lpc())-1
aux = np.concatenate([self.lpc(),np.zeros((self._nfft-p-1,))],axis=0)
fftaux = np.abs(np.fft.fft(aux))
std = np.sqrt(self.var_lpc()*self._nfft)
self.results['armag'] = np.real(std/fftaux[ : self.K])
else:
pass
return self.results['armag']
def arpsd(self) -> np.ndarray:
"""
Gets AR power density spectrum
:return: the AR PSD
:rtype: np.ndarray
"""
if self.results['arpsd'] is None:
self.results['arpsd'] = self.armag() ** 2.
else:
pass
return self.results['arpsd']
def fbe(self) -> np.ndarray:
"""
Gets filter banks outputs based on magnitude spectrum
:return: filter bank filtered magnitude spectrum
:rtype: np.ndarray, numpy vector of floats of length filt_num
"""
if self.results['fbe'] is None:
self.results['fbe'] = np.dot(self.get_tfb(),self.mag())
else:
pass
return self.results['fbe']
def sfbe2mfcc(self) -> np.ndarray:
"""
Converts smoothed filter banks energies to MFCC coefficients
:return: MFCC coefficients
:rtype: np.ndarray, numpy vector of floats (size num_cep)
"""
fbe = self.sfbe()
logfbe = np.log(fbe)
mfcc = np.dot(self.dctmat,logfbe) #scipy.fftpack.dct(logfbe,n=self.num_ceps,norm='ortho')
# liftering
cmfcc = self.cepstral_lifter*mfcc
return cmfcc
def fbe2mfcc(self) -> np.ndarray:
"""
Converts filter banks energies to MFCC coefficients
:return: MFCC coefficients
:rtype: np.ndarray, numpy vector of floats (size num_cep)
"""
fbe = self.fbe()
logfbe = np.log(fbe)
mfcc = np.dot(self.dctmat,logfbe) #scipy.fftpack.dct(logfbe,n=self.num_ceps,norm='ortho')
# here comes liftering
cmfcc = self.cepstral_lifter*mfcc
return cmfcc
def arfbe(self) -> np.ndarray:
"""
AR magnitude spectrum to filter banks energies
:return: filter bank filtered AR magnitude spectrum
:rtype: np.ndarray, numpy vector of floats (size num_filt)
"""
if self.results['arfbe'] is None:
self.results['arfbe'] = np.dot(self.get_tfb(),self.armag())
else:
pass
return self.results['arfbe']
def wgh(self) -> np.ndarray:
"""
Weighted filter bank energies
:return: the magnitude compact spectrum
:rtype: np.ndarray, numpy vector of floats (size num_filt)
"""
if self.results['wgh'] is None:
self.results['wgh'] = np.dot(self.get_wgh(),self.fbe())
else:
pass
return self.results['wgh']
def arwgh(self) -> np.ndarray:
"""
AR weighted filter bank energies
:return: the AR magnitude compact spectrum
:rtype: np.ndarray, numpy vector of floats (size num_filt)
"""
if self.results['arwgh'] is None:
self.results['arwgh'] = np.real(np.dot(self.get_wgh(),self.arfbe()))
else:
pass
return self.results['arwgh']
def smag(self) -> np.ndarray:
"""
Smoothed magnitude spectrum
:return: magnitude spectrum computed from filter bank energies
:rtype: np.ndarray, numpy vector of floats (size _nfft/2+1)
"""
if self.results['smag'] is None:
self.results['smag'] = np.dot(self.get_pinv_tfb(), self.fbe())
else:
pass
return self.results['smag']
def spsd(self) -> np.ndarray:
"""
Smoothed power density spectrum
:return: PSD computed from filter bank energies
:rtype: np.ndarray, numpy vector of floats(size _nfft/2+1)
"""
if self.results['spsd'] is None:
self.results['spsd'] = self.smag()**2.
else:
pass
return self.results['spsd']
def sarmag(self)->np.ndarray:
"""
Smoothed AR magnitude spectrum
:return: smoothed (from arfbe) AR magnitude spectrum (size _nfft/2+1)
:rtype: np.ndarray, numpy vector of floats
"""
if self.results['sarmag'] is None:
self.results['sarmag'] = np.dot(self.get_pinv_tfb(), self.arfbe())
else:
pass
return self.results['sarmag']
def sarpsd(self) -> np.ndarray:
"""
Smoothed AR PSD
:return: smoothed (from arfbe) AR PSD (size _nfft/2+1)
:rtype: np.ndarray, numpy vector of floats
"""
if self.results['sarpsd'] is None:
self.results['sarpsd'] = self.sarmag() ** 2.
else:
pass
return self.results['sarpsd']
def preemphasis(self, signal : np.ndarray) -> np.ndarray:
"""Perform preemphasis on the input signal.
:param signal: The signal to filter.
:type signal: np.ndarray, numpy vector of floats
:param coeff: The preemphasis coefficient. 0 is no filter, default is 0.95.
:type coeff: float
:returns: the filtered signal.
:rtype: numpy vector of floats
"""
return np.asarray(np.append(signal[0], signal[1:] - self.preem_alfa * signal[:-1]))
def sfbe(self) -> np.ndarray:
"""
Smoothed filter bank energies
:return: smoothed, from compact spectrum, filter bank energies
:rtype: np.ndarray, numpy vector of floats (size num_filt)
"""
if self.results['sfbe'] is None:
self.results['sfbe'] = np.dot(self.get_inv_wgh(), self.wgh())
else:
pass
return self.results['sfbe']
def sarfbe(self) -> np.ndarray:
"""
Smoothed AR filter bank energies
:return smoothed, from compact AR spectrum, filter bank energies
:rtype: np.ndarray, numpy vector of floats (size num_filt)
"""
if self.results['sarfbe'] is None:
self.results['sarfbe'] = np.dot(self.get_inv_wgh(), self.arwgh())
else:
pass
return self.results['sarfbe']
def smagwgh(self) -> np.ndarray:
"""
Smoothed magnitude spectrum
:return: computed from compact spectum magnitude spectrum
:rtype: np.ndarray, numpy vector of floats (size _nfft/2+1)
"""
if self.results['smagwgh'] is None:
self.results['smagwgh'] = np.dot(self.get_pinv_tfb(), self.sfbe())
else:
pass
return self.results['smagwgh']
def sarmagwgh(self) -> np.ndarray:
"""
Smoothed AR magnitude spectrum
:return: computed from AR compact spectrum magnitude spectrum
:rtype: np.ndarray, numpy vector of floats (size _nfft/2+1)
"""
if self.results['sarmagwgh'] is None:
self.results['sarmagwgh'] = np.dot(self.get_pinv_tfb(), self.sarfbe())
else:
pass
return self.results['sarmagwgh']
def spsdwgh(self) -> np.ndarray:
"""
Smoothed PSD
:return: computed from compact spectrum PSD
:rtype: np.ndarray, numpy vector of floats (size _nfft/2+1)
"""
if self.results['spsdwgh'] is None:
self.results['spsdwgh'] = self.smagwgh() ** 2.
else:
pass
return self.results['spsdwgh']
def sarpsdwgh(self) -> np.ndarray:
"""
Smoothed AR PSD
:return: PSD computed from AR compact spectra
:rtype: np.ndarray, numpy vector of floats (size _nfft/2+1)
"""
if self.results['sarpsdwgh'] is None:
self.results['sarpsdwgh'] = self.sarmagwgh() ** 2.
else:
pass
return self.results['sarpsdwgh']
def psd2wgh(self,psd : np.ndarray) -> np.ndarray:
"""
PSD -> weighted compact spectrum
:param psd: the PSD
:type psd: np.ndarray, numpy vector of floats (size _nfft/2+1)
:return: compact spectrum based on PSD
:rtype: np.ndarray, numpy vector of floats (size num_filt)
"""
mag = np.sqrt(psd)
fbe = np.dot(self.get_tfb(),mag)
return np.dot(self.get_wgh(),fbe)
def psd2ar(self,psd : np.ndarray,LpcOrder : int) -> Tuple[np.ndarray, float]:
"""
Converting PSD into LPC coefficients and excitation variance
:param psd: left half of the PSD
:type psd: numpy vector of floats (size _nfft/2+1)
:param LpcOrder: AR model order
:type LpcOrder: int
:return: * (`vector of floats`) direct form AR coeff. with leading 1
* (`float`) the variance of the short term residual
"""
D = len(psd)
B = np.concatenate([psd,psd[D-2:0:-1]])
xc = np.real(np.fft.ifft(B))
xc = xc[:LpcOrder+1]
a, var, k = spectrum.LEVINSON(xc)
a = np.concatenate([[1],a])
var = var/(2*D-2)
return a, var
def synth(mag_enh : np.ndarray, angular_r : np.ndarray, an_win : np.ndarray):
"""
Signal synthesis based on magnitude and angular spectra
:param mag_enh: enhanced speech magnitude spectrum
:type psd_enh: np.ndarray, numpy vector of floats
:param angular_r: angular noisy signal frame spectrum
:type angular_r: np.ndarray, numpy vector of floats
:param an_win: windowing function
:type an_win: np.ndarray numpy vector of floats
:return: time domain enhanced signal frame (windowed)
:rtype: numpy vector of floats
"""
# X = np.sqrt( psd_enh )
X = mag_enh
X[-1] = 0
enhDft = np.concatenate( (X, X[-2:0:-1]) ) * np.exp( 1j * angular_r )
an_win = np.sqrt( an_win )
enh_fs = an_win*np.real( np.fft.ifft( enhDft ) )
return enh_fs
def enhance_mag( mag_r, psd_n, psd_s):
"""
The Wiener filter in frequency domain
:param mag_r: noisy, unprocessed signal frame magnitude spectrum
:type psd_r: numpy vector of floats
:param psd_n: noise, estimated noise frame PSD
:type psd_n: numpy vector of floats
:param psd_s: speech, estimated speech frame PSD
:type psd_s: numpy vector of floats
:return: enhanced speech PSD
:rtype: numpy vector of floats
"""
psd_r_smag = psd_s + psd_n
mag_enh = np.maximum(psd_s, 1.e-6) / np.maximum(psd_r_smag, 1.e-4) * mag_r
mag_enh = np.maximum(mag_enh,0.001*mag_r)
mag_enh[-1] = 0 # filter out the most high frequency peak
return mag_enh
def enhance( mag_r, psd_n, psd_s, angular_r, an_win ):
"""
The Wiener filter returning the time frequency signal frame
:param mag_r: noisy, unprocessed signal frame magnitude spectrum
:type mag_r: numpy vector of floats
:param psd_n: noise, estimated noise frame PSD
:type psd_n: numpy vector of floats
:param psd_s: speech, estimated speech frame PSD
:type psd_s: numpy vector of floats
:param angular_r: angular noisy signal frame spectrum
:type angular_r: numpy vector of floats
:param an_win: windowing function
:type an_win: numpy vector of floats
:return: time domain enhanced signal frame (windowed)
:rtype: numpy vector of floats
"""
mag_enh = enhance_mag(mag_r, psd_n, psd_s)
enh_fs = synth(mag_enh,angular_r,an_win)
return enh_fs #, psd_enh
def enhance1 ( mag_r, psd_n, psd_s, angular_r, an_win):
"""
Perceptual enhancement (SETAP p. 245)
:param psd_r: noisy, unprocessed signal frame PSD
:type psd_r: numpy vector of floats
:param psd_n: noise, estimated noise frame PSD
:type psd_n: numpy vector of floats
:param psd_s: speech, estimated speech frame PSD
:type psd_s: numpy vector of floats
:param angular_r: angular noisy signal frame spectrum
:type angular_r: numpy vector of floats
:param an_win: windowing function
:type an_win: numpy vector of floats
:return: time domain enhanced signal frame (windowed)
:rtype: numpy vector of floats
"""
e = psd_s/np.maximum(psd_n,1.e-4)+1.e-6
g = mag_r**2/np.maximum(psd_n,1.e-4)+1.e-6
v = e*g/(1+e)
aux = | np.maximum(.5*v,1.e-2) | numpy.maximum |
""" this uses cnn_trainer.py and Dijkstra algorithm to identify a melody in
scores in """
import time
import math
import numpy as np
import sklearn.metrics
from scipy.sparse import csgraph
from sklearn.model_selection import train_test_split
import misc_tools
import settings
try:
import cPickle as pickle
except Exception:
import pickle
FINAL_VALUE = -0.5
def compute_prob(note, prob_map, THRESHOLD):
"""
compute the probability of *note* referred to *prob_map*.
If the probability is higher than *THRESHOLD*, than the cost will
be > 0, otherwise it will be 0.
"""
pitch, onset, offset, ismelody = note
m = prob_map[pitch, onset: offset]
if m.shape[0] > 2 and settings.OUTLIERS_ON_PROB:
m = m[modified_z_score(m)]
if settings.AVERAGE:
p = m.mean()
else:
p = np.median(m)
if p < THRESHOLD:
return -0.5
else:
return p
def build_graph_matrix(notelist, prob_map, THRESHOLD):
"""
Returns a 2D array containing the matrix relative to the branch costs
in the graph. For each note A in *notelist*, it creates branches to the
notes N_i so that:
1) onset(N_k) == onset(N_l) for each k, l
2) onset(N_i) == min{onset(Y)} where Y: onset(Y) > offset(A), cost(Y) =
1 - probability(Y) <= *THRESHOLD*} for each i
This also adds two new virtual notes representing the first and the last
note.
"""
out = np.full((len(notelist) + 2, len(notelist) + 2),
np.inf,
dtype=settings.floatX)
last_onset = notelist[0][0]
# initialize the starting virtual note
FOUND_NEXT_NOTE = False
for i, note_i in enumerate(notelist, start=1):
pitch_i, onset_i, offset_i, melody_i = note_i
if onset_i > last_onset and FOUND_NEXT_NOTE:
# we have found a note in the previous onset
break
cost_i = -compute_prob(note_i, prob_map, THRESHOLD)
if cost_i > 0:
continue
else:
FOUND_NEXT_NOTE = True
out[0, i] = cost_i
last_onset = onset_i
for i, note_i in enumerate(notelist):
pitch_i, onset_i, offset_i, melody_i = note_i
FOUND_NEXT_NOTE = False
for j, note_j in enumerate(notelist[i + 1:], start=1):
pitch_j, onset_j, offset_j, melody_j = note_j
if onset_j < offset_i:
continue
elif FOUND_NEXT_NOTE and notelist[i + j - 1][0] < onset_j:
break
cost_j = -compute_prob(note_j, prob_map, THRESHOLD)
if cost_j > 0:
continue
else:
FOUND_NEXT_NOTE = True
# i + 1 because we have added a virtual note
out[(i + 1), (i + 1) + j] = cost_j
last_onset = onset_j # this is the last note reachable
if not FOUND_NEXT_NOTE:
# let's jump to the last virtual state
out[(i + 1), -1] = FINAL_VALUE
# making the last notes pointing to the ending virtual note
# is this required??
for i, note_i in enumerate(reversed(notelist), start=2):
if note_i[1] < last_onset:
break
elif note_i[1] > last_onset:
continue
else:
out[-i, -1] = FINAL_VALUE
return out
def _check(notelist, pianoroll_prob):
"""
Just for debugging
"""
WIN_WIDTH = int(settings.ARG_DEFAULT['win_w'])
EPS = misc_tools.EPS(0)
for j, (onset, offset, pitch) in enumerate(notelist):
flag = False
if pianoroll_prob[pitch, onset] < EPS:
for i in range(WIN_WIDTH):
if pianoroll_prob[pitch, onset - i] >= EPS:
print("you're wrong of -" + str(i) +
" for onset of note " + str(j))
flag = True
break
if pianoroll_prob[pitch, onset + i] >= EPS:
print("you're wrong of +" + str(i) +
" for onset of note " + str(j))
flag = True
break
elif pianoroll_prob[pitch, offset - 1] < EPS:
for i in range(WIN_WIDTH):
if pianoroll_prob[pitch, offset - 1 - i] >= EPS:
print("you're wrong of -" + str(i) +
" for offset of note " + str(j))
flag = True
break
if pianoroll_prob[pitch, offset - 1 + i] >= EPS:
print("you're wrong of +" + str(i) +
" for offset of note " + str(j))
flag = True
break
else:
for i in range(onset, offset):
if pianoroll_prob[pitch, i] < EPS:
print("note " + str(j) + " has some internal values set to 0")
flag = True
break
if flag:
return 1
# if not flag:
# print("note " + str(j) + " is correct")
return 0
def modified_z_score(ys):
"""
PARAMETERS :
------------
list-like object, usually 1D np.array
RETURN :
--------
a new 1D np.array containing the indices of elements not ouliers
stolen from http://colingorrie.github.io/outlier-detection.html
"""
threshold = 3.5
median_y = np.median(ys)
median_absolute_deviation_y = np.median([np.abs(y - median_y) for y in ys])
modified_z_scores = [0.6745 * (y - median_y) / median_absolute_deviation_y
for y in ys]
return np.where(np.abs(modified_z_scores) < threshold)[0]
def iqr(ys):
"""
PARAMETERS :
------------
list-like object, usually 1D np.array
RETURN :
--------
a new 1D np.array containing the indices of elements not ouliers
stolen from http://colingorrie.github.io/outlier-detection.html
"""
quartile_1, quartile_3 = np.percentile(ys, [25, 75])
iqr = quartile_3 - quartile_1
lower_bound = quartile_1 - (iqr * 1.5)
upper_bound = quartile_3 + (iqr * 1.5)
return np.where((ys < upper_bound) | (ys > lower_bound))[0]
def set_threshold(arr, CLUSTERING='single'):
print("starting clustering")
arr = arr.reshape(-1)
arr = arr[arr > settings.MIN_TH]
N_CLUSTER = 2
target_cluster = 1
print("max, min: ", arr.max(), arr.min())
arr = arr[iqr(arr)]
if CLUSTERING == 'kmeans':
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=N_CLUSTER,
init=np.array([settings.MIN_TH, arr.max()]).reshape(-1, 1))
labels = kmeans.fit_predict(arr.reshape(-1, 1))
else:
import fastcluster
from scipy.cluster.hierarchy import fcluster
from scipy.spatial.distance import pdist
Z = pdist(arr.reshape(-1, 1))
if CLUSTERING == 'single':
X = fastcluster.single(Z)
elif CLUSTERING == 'average':
X = fastcluster.average(Z)
elif CLUSTERING == 'centroid':
X = fastcluster.centroid(Z)
else:
return settings.THRESHOLD
labels = N_CLUSTER - fcluster(X, N_CLUSTER, 'maxclust')
# setting 0 for the minimum cluster
# np.ma.masked_array returns only values where the mask is 0
index = {}
for i, l in enumerate(labels):
index[l] = arr[i]
if len(index.keys()) == N_CLUSTER:
break
index = sorted(index.items(), key=lambda kv: kv[1]) # list of tuples sorted by values
target_label = index[target_cluster - 1][0] # the label of the desired cluster
th = np.max(arr[np.flatnonzero(labels == target_label)]) # max of the down cluster
print("found threshold: " + str(th))
# print(str(np.ma.masked_array(arr, 1 - labels).min()))
return th
def polyphonic_part(notelist, pianoroll_prob, THRESHOLD):
"""
Returns a list of int: 1 if the note at that index in *notelist* has a
probability > *THRESHOLD*, 0 otherwise.
"""
predicted_labels = []
for note in notelist:
c = compute_prob(note, pianoroll_prob, THRESHOLD)
if np.isnan(c):
# don't know why this happens, we had already discarded nans...
predicted_labels.append(0)
else:
predicted_labels.append(int(math.ceil(c)))
return predicted_labels
def monophonic_part(notelist, pianoroll_prob, THRESHOLD):
"""
Compute a strictly monophonic part by using the shortest path algorithm
specified in `settings`
RETURNS :
a tuple containing :
list(int) : the predicted labels
list(int) : the melody indices
"""
# compute the graph matrix
graph = build_graph_matrix(notelist, pianoroll_prob, THRESHOLD)
# compute the minimum paths
dist_matrix, predecessors = csgraph.shortest_path(graph, method=settings.PATH_METHOD,
directed=True,
indices=[0],
return_predecessors=True)
# building the predicted array label
last = predecessors[0, -1]
predicted_labels = [0 for j in range(len(notelist) + 2)]
melody_indices = []
while last != -9999:
predicted_labels[last] = 1
melody_indices.append(last)
last = predecessors[0, last]
predicted_labels = predicted_labels[1:-1]
return predicted_labels, melody_indices
def predict_labels(pianoroll_prob, in_notelist):
"""
Compute notes in the solo part according to the input notelist
and a pianoroll probability distribution.
PARAMETERS :
pianoroll_prob : 2d np.array
the pianoroll distribution
in_notelist : 2d np.array
the input list of notes as returned by
`utils.pianoroll_utils.make_pianorolls`
RETURNS :
a tuple of 1D arrays :
true labels according to `in_notelist` (1 where there is a 'solo
part', 0 where there isn't)
predicted labels according to `in_notelist`
"""
# ordering notelist by onset:
notelist = in_notelist[in_notelist[:, 1].argsort()]
# changing all nan to 2 * EPS(0)
for i in np.nditer(pianoroll_prob, op_flags=['readwrite']):
if np.isnan(i):
i[...] = 2 * misc_tools.EPS(0)
# np.nan_to_num(pianoroll_prob, copy=False)
# looking for the first non empty column
s = pianoroll_prob.sum(axis=0).nonzero()[0]
# first column with non zero values minus first onset
pad_length = s[0] - in_notelist[0][1]
notelist = [(pitch, onset + pad_length, offset + pad_length, ismelody)
for pitch, onset, offset, ismelody in in_notelist]
# notelist has no more the ground-truth, so we are using in_notelist
true_labels = zip(*in_notelist)[-1]
THRESHOLD = settings.THRESHOLD
if settings.CLUSTERING != 'None':
THRESHOLD = set_threshold(
pianoroll_prob, CLUSTERING=settings.CLUSTERING)
if settings.MONOPHONIC:
# compute the graph matrix
predicted_labels = monophonic_part(
notelist, pianoroll_prob, THRESHOLD)[0]
else:
predicted_labels = polyphonic_part(
notelist, pianoroll_prob, THRESHOLD)
return | np.array(true_labels) | numpy.array |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import numpy as np
import cost_estimation
from tabulate import tabulate
trucks_amount = 16
truck_weight = 124
truck_max_weight = 330
load_time = (33.8 * 5 + 30 + 30 + 75) / 60 * 4 / 3
paths = []
scenario = 'scenario1.xlsx'
df = pd.read_excel(scenario,'Excavator01').convert_objects(convert_numeric=True).fillna(0).round(2)
paths.append(df.iloc[2:,0:5].values.astype(float))
df = pd.read_excel(scenario,'Excavator02').convert_objects(convert_numeric=True).fillna(0).round(2)
paths.append(df.iloc[2:,0:5].values.astype(float))
df = pd.read_excel(scenario,'Excavator03').convert_objects(convert_numeric=True).fillna(0).round(2)
paths.append(df.iloc[2:,0:5].values.astype(float))
#%%
N = 100
G = 100
T = 20
sigma = 10
fric_coef = 0.02
weight = [1,1,1]
hist_cost = np.zeros((N,3))
def fitness(n,paths,loads,assignments):
productivity = 0.0
for i in range(0,3):
cycle_cost = max(cost_estimation.query(paths[i],loads[i],fric_coef) / assignments[i],load_time)
if assignments[i] == 1 :
cycle_cost += load_time
hist_cost[n,i] = cycle_cost
productivity += (loads[i]-124) * (60 / cycle_cost) * weight[i]
return productivity
def rand_pop(N, mu, sigma):
pops = np.zeros((N,6),dtype = int)
pops[:,:3] = np.random.normal(mu, sigma, (N,3)).astype(int)
assignments = np.zeros((N,3), dtype = int)
for i in range(0,N):
for j in range(0,trucks_amount):
assignments[i,np.random.randint(0,3)] += 1
pops[:,3:] = assignments
return pops
def mutate(pop, sigma):
mutated = pop.copy()
mutated[:3] += np.random.normal(0, sigma, 3).astype(int)
np.clip(mutated[:3],124,330,mutated[:3])
i = np.random.randint(3,6)
j = np.random.randint(3,6)
if (mutated[i] != 0):
mutated[i] -= 1
mutated[j] += 1
return mutated
population = rand_pop(N, 180, 40)
for i in range(0,G):
print(i)
fvalues = np.zeros(N)
for j in range(0,N):
fvalues[j] = fitness(j,paths,population[j,:3],population[j,3:])
index = np.flip(fvalues.argsort())
hist_cost = hist_cost[index]
population = population[index,:]
new_pops = np.zeros((N,6),dtype = int)
new_pops[0] = population[0]
for j in range(1,N):
new_pops[j] = mutate(population[np.random.randint(0,T)],sigma)
population = new_pops
print(tabulate([population[0]], headers=[ 'Trucks to E1','Trucks to E2', 'Trucks to E3', 'Payload at E1','Payload at E2', ' Payload at E3'], tablefmt='orgtbl'))
#%%
import matplotlib.pyplot as plt
max_gross = 710
truck_weight = 124
cycle_time = np.zeros(max_gross-truck_weight)
prod = np.zeros(max_gross-truck_weight)
payload = range(0,max_gross-truck_weight)
for i in range(0,max_gross-truck_weight):
cycle_time[i] = cost_estimation.query(paths[2],i + 124,0.02)
prod[i] = i * 60 / cycle_time[i]
plt.figure()
plt.plot(payload,cycle_time)
plt.savefig('pvc.png')
plt.figure()
plt.plot(payload,prod)
plt.savefig('pvp.png')
#%%
prod = np.zeros(100)
fric_coef = np.zeros(100)
for i in range(0,100):
fric_coef[i] = (i + 1) / 1000
prod[i] = 200 * 60 / cost_estimation.query(paths[0],324,fric_coef[i])
plt.figure()
plt.plot(fric_coef,prod)
plt.savefig('fvp.png')
#%%
fric_coef = np.zeros(100)
cost = | np.zeros(100) | numpy.zeros |
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from ...util import transforms
from ...geometry import Rect
from ._util import arg_to_vec4, as_vec4
from .base_transform import BaseTransform
class NullTransform(BaseTransform):
""" Transform having no effect on coordinates (identity transform).
"""
glsl_map = "vec4 null_transform_map(vec4 pos) {return pos;}"
glsl_imap = "vec4 null_transform_imap(vec4 pos) {return pos;}"
Linear = True
Orthogonal = True
NonScaling = True
Isometric = True
@arg_to_vec4
def map(self, obj):
return obj
def imap(self, obj):
return obj
def __mul__(self, tr):
return tr
def __rmul__(self, tr):
return tr
class STTransform(BaseTransform):
""" Transform performing only scale and translate, in that order.
Parameters
----------
scale : array-like
Scale factors for X, Y, Z axes.
translate : array-like
Scale factors for X, Y, Z axes.
"""
glsl_map = """
vec4 st_transform_map(vec4 pos) {
return (pos * $scale) + $translate;
}
"""
glsl_imap = """
vec4 st_transform_imap(vec4 pos) {
return (pos - $translate) / $scale;
}
"""
Linear = True
Orthogonal = True
NonScaling = False
Isometric = False
def __init__(self, scale=None, translate=None):
super(STTransform, self).__init__()
self._update_map = True
self._update_imap = True
self._scale = np.ones(4, dtype=np.float32)
self._translate = | np.zeros(4, dtype=np.float32) | numpy.zeros |
from __future__ import print_function
import pytest
import numpy as np
import random
from numpy.testing import assert_equal, assert_almost_equal
from choreo.interlock import compute_feasible_region_from_block_dir
from pybullet_planning import Euler, Pose, multiply, tform_point
from scipy.optimize import linear_sum_assignment
from scipy.linalg import solve_triangular, norm
def assert_approx_equal_vectors(vec1, vec2, unitize=False, tol_digit=6, exact_eq=False):
assert len(vec1) == len(vec2)
assert tol_digit > 0
for i in range(len(vec1)):
e1 = vec1[i] / np.linalg.norm(vec1) if unitize else vec1[i]
e2 = vec2[i] / np.linalg.norm(vec2) if unitize else vec2[i]
assert_almost_equal(e1, e2, tol_digit)
if exact_eq:
assert_equal(e1, e2)
def is_approx_equal_vectors(vec1, vec2, unitize=False, tol_digit=6, exact_eq=False):
assert len(vec1) == len(vec2)
assert tol_digit > 0
for i in range(len(vec1)):
e1 = vec1[i] / | np.linalg.norm(vec1) | numpy.linalg.norm |
#
#-*- coding: utf-8 -*-
#
# -------------------------------------------------------------------------
#
# -------------------------------------------------------------------------
import math
import numpy as np
# import matplotlib
# matplotlib.use('agg')
# import matplotlib.pylab as plt
def solveForComponents(fc, pm, kphi, kvco, N, gamma, loop_type='passive2'):
"""
:Parameters:
loop_type (str) -
* passive2 - 2nd order passive
* passive3 - 3rd order passive
* passive4 - 4th order passive
* active2 - 2nd order active
* active3 - 3rd order active
* active4 - 4th order active
fc (float) - 0dB crossover frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (1.024 default)
"""
if loop_type == 'passive2':
pll = PllSecondOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d = pll.calc_components()
elif loop_type == 'passive3':
pll = PllThirdOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d = pll.calc_components()
elif loop_type == 'passive4':
pll = PllFourthOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d = pll.calc_components()
return d
class PllSecondOrderPassive( object ):
""" The 2nd order passive phase locked loop object
"""
def __init__(self,
fc,
pm,
kphi,
kvco,
N,
gamma=1.024):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (default=1.024)
"""
self.fc = fc
self.pm = pm
self.kphi = kphi
self.kvco = kvco
self.N = N
self.gamma = gamma
def calc_components(self):
""" return a dict with the component values """
d = {}
d['t1'] = self.calc_t1(self.fc,
self.pm,
self.gamma)
d['t2'] = self.calc_t2(self.fc,
d['t1'],
self.gamma)
d['a0'] = self.calc_a0(self.kphi,
self.kvco,
self.N,
self.fc,
d['t1'],
d['t2'])
d['c1'] = self.calc_c1(d['a0'],
d['t1'],
d['t2'])
d['c2'] = self.calc_c2(d['a0'],
d['c1'])
d['r2'] = self.calc_r2(d['c2'],
d['t2'])
d['a1'] = self.calc_a1(d['c1'],
d['c2'],
d['r2'])
d['a2'] = 0
d['a3'] = 0
d['r3'] = 0
d['r4'] = 0
d['c3'] = 0
d['c4'] = 0
d['t3'] = 0
d['t4'] = 0
return d
def calc_t1(self, fc, pm, gamma=1.024):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
gamma (float) - optimization factor (default=1.024)
"""
omega_c = 2*np.pi*fc
phi = np.pi*pm/180
t1 = (np.sqrt(((1+gamma)**2)*(np.tan(phi))**2 + 4*gamma) - (1+gamma)*np.tan(phi)) / (2*omega_c)
return t1
def calc_t2(self, fc, t1, gamma=1.024):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
t1 (float) - time constant t1 in seconds
gamma (float) - optimization factor (default=1.024)
"""
omega_c = 2*np.pi*fc
return gamma/((omega_c**2)*t1)
def calc_a0(self, kphi, kvco, N, fc, t1, t2):
"""
:Parameters:
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
fc (float) - 0dB crossover frequency in Hz
t1 (float) - time constant t1 in seconds
t2 (float) - time constant t2 in seconds
"""
omega_c = 2*np.pi*fc
x = (kphi*kvco)/(N*omega_c**2)
y_num = np.sqrt(1+(omega_c**2)*(t2**2))
y_den = np.sqrt(1+(omega_c**2)*(t1**2))
a0 = x*y_num/y_den
return a0
def calc_c1(self, a0, t1, t2):
"""
:Parameters:
a0 (float) - loop filter coefficient
t1 (float) - time constant t1 in seconds
(t2 (float) - time constant t2 in seconds
"""
return a0*t1/t2
def calc_c2(self, a0, c1):
"""
:Parameters:
a0 (float) - loop filter coefficient
c1 (float) - capacitor in Farads
"""
return a0-c1
def calc_r2(self, c2, t2):
"""
:Parameters:
c2 (float) - capacitor in Farads
t2 (float) - time constant t2 in seconds
"""
return t2/c2
def calc_a1(self, c1, c2, r2):
"""
:Parameters:
c1 (float) - capacitor in Farads
c2 (float) - capacitor in Farads
r2 (float) - resistor in Ohms
"""
return c1*c2*r2
class PllThirdOrderPassive(PllSecondOrderPassive):
def __init__(self,
fc,
pm,
kphi,
kvco,
N,
gamma=1.136,
t31=0.6):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (default=1.136)
t31 (float) - ratio of T3 to T1 (default=0.6)
"""
self.fc = fc
self.pm = pm
self.kphi = kphi
self.kvco = kvco
self.N = N
self.gamma = gamma
self.t31 = t31
def calc_components(self):
""" return a dict with the component values and coefficients """
d = {}
omega_c = 2*np.pi*self.fc
# solve for time constants
d['t1'] = self.calc_t1(self.fc,
self.pm,
self.gamma)
d['t3'] = d['t1']*self.t31
d['t2'] = self.gamma/( (omega_c**2)*(d['t1'] + d['t3'] ) )
# solve for coefficients
d['a0'] = self.calc_a0(self.kphi,
self.kvco,
self.N,
self.fc,
d['t1'],
d['t2'],
d['t3'])
d['a1'] = d['a0']*(d['t1'] + d['t3'])
d['a2'] = d['a0']*d['t1']*d['t3']
# solve for components
d['c1'] = self.calc_c1(d['a0'],
d['a1'],
d['a2'],
d['t2'])
d['c3'] = self.calc_c3( d['a0'],
d['a1'],
d['a2'],
d['t2'],
d['c1'] )
d['c2'] = d['a0'] - d['c1'] - d['c3']
d['r2'] = d['t2']/d['c2']
d['r3'] = d['a2']/(d['c1']*d['c3']*d['t2'])
d['t4'] = 0
d['a3'] = 0
d['r4'] = 0
d['c4'] = 0
return d
def calc_c3( self,
a0,
a1,
a2,
t2,
c1 ):
return ( -(t2**2)*(c1**2) + t2*a1*c1 - a2*a0 )/( (t2**2)*c1 - a2 )
def calc_c1( self,
a0,
a1,
a2,
t2 ):
return (a2/(t2**2))*(1 + np.sqrt(1 + (t2/a2)*(t2*a0 - a1) ) )
def calc_a0( self,
kphi,
kvco,
N,
fc,
t1,
t2,
t3 ):
omega_c = 2*np.pi*fc
k1 = kphi*kvco/((omega_c**2)*(N))
k2 = np.sqrt( (1+(omega_c*t2)**2)/((1+(omega_c*t1)**2)*(1+(omega_c*t3)**2) ) )
return k1*k2
def calc_t1(self,
fc,
pm,
gamma,
t31=0.6,
num_iters=100):
""" numerically solve for t1 using the bisection method
see: https://en.wikibooks.org/wiki/Numerical_Methods/Equation_Solving
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
gamma (float) - optimization factor (1.136)
num_iters (int) - number of times to loop
"""
a = 1e-15 # initial guess for a
b = 1.0 # initial guess for b
fa = self.func_t1(a,fc,pm,t31=t31,gamma=gamma)
fb = self.func_t1(b,fc,pm,t31=t31,gamma=gamma)
for i in range(num_iters):
guess = (a+b)/2
if (self.func_t1(guess,fc,pm,t31=t31,gamma=gamma) < 0):
b = guess
else:
a = guess
return guess
def func_t1(self,
x,
fc,
pm,
t31=0.6,
gamma=1.136):
""" simulate t1. This function is used to
numerically solve for T1.
Equation 22.31 in Dean Banerjee's Book
:Parameters:
x (float) - guess at t1
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
t31 (float) - ratio of t3 to t1
gamma (float) - optimization factor (1.136)
:Returns:
updated value for t1 based on guess (float)
"""
omega_c = 2*np.pi*fc
phi = pm*np.pi/180
val = np.arctan( gamma/(omega_c*x*(1+t31)) ) - \
np.arctan( omega_c*x ) - \
np.arctan( omega_c*x*t31 ) - phi
return val
def test4thOrderPassive( t31=0.4, t43=0.4 ):
fc = 10e3
pm = 47.8
kphi = 4e-3
kvco = 20e6
fout = 900e6
fpfd = 200e3
N = float(fout)/fpfd
fstart = 10
fstop = 100e6
ptsPerDec = 100
fref = 10e6
R = int(fref/fpfd)
# R = 1
pll = PllFourthOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=1.115,
t31=t31,
t43=t43)
d = pll.calc_components()
# return d
flt = {
'c1':d['c1'],
'c2':d['c2'],
'c3':d['c3'],
'c4':d['c4'],
'r2':d['r2'],
'r3':d['r3'],
'r4':d['r4'],
'flt_type':"passive"
}
f,g,p,fz,pz,ref_cl,vco_cl = simulatePll( fstart,
fstop,
ptsPerDec,
kphi,
kvco,
N,
R,
filt=flt)
return d, fz, pz
class PllFourthOrderPassive( PllSecondOrderPassive ):
def __init__(self,
fc,
pm,
kphi,
kvco,
N,
gamma=1.115,
t31=0.107,
t43=0.107):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (default=1.115)
t31 (float) - ratio of T3 to T1 (default=0.4)
t43 (float) - ratio of T4 to T3 (default=0.4)
note: for a realizable solution, t31 + t43 <= 1
"""
self.fc = fc
self.pm = pm
self.kphi = kphi
self.kvco = kvco
self.N = N
self.gamma = gamma
self.t31 = t31
self.t43 = t43
def calc_components(self):
""" return a dict with the component values """
d = {}
omega_c = 2*np.pi*self.fc
# solve for time constants
d['t1'] = self.calc_t1(self.fc,
self.pm,
self.gamma,
t31=self.t31,
t43=self.t43)
# d['t1'] = 4.0685e-6
# print( 't1 = ' + str(d['t1']) )
d['t3'] = d['t1']*self.t31
d['t4'] = d['t1']*self.t31*self.t43
d['t2'] = self.gamma/( (omega_c**2)*(d['t1'] + d['t3'] + d['t4'] ) )
# solve for coefficients
d['a0'] = self.calc_a0(self.kphi,
self.kvco,
self.N,
self.fc,
d['t1'],
d['t2'],
d['t3'],
d['t4'])
d['a1'] = d['a0']*(d['t1'] + d['t3'] + d['t4'])
d['a2'] = d['a0']*(d['t1']*d['t3'] + d['t1']*d['t4'] + d['t3']*d['t4'])
d['a3'] = d['a0']*d['t1']*d['t3']*d['t4']
c1_t3, r3_t3 = self.calc_c1_r3(d['a0'],d['t1'],d['t2'],d['t3'])
c1_t4, r3_t4 = self.calc_c1_r3(d['a0'],d['t1'],d['t2'],d['t4'])
d['c1'] = (c1_t3 + c1_t4)/2
d['r3'] = (r3_t3 + r3_t4)/2
d['c2'], d['c3'] = self.calc_c2_c3( d['a0'],
d['a1'],
d['a2'],
d['a3'],
d['t2'],
d['r3'],
d['c1'] )
d['c4'] = d['a0']- d['c1']- d['c2'] - d['c3']
d['r2'] = d['t2']/d['c2']
d['r4'] = d['a3']/(d['t2']*d['r3']*d['c1']*d['c3']*d['c4'])
return d
def calc_c2_c3( self,
a0,
a1,
a2,
a3,
t2,
r3,
c1 ):
k0 = (a2/a3) - 1.0/t2 - 1.0/(c1*r3) - (a0 - c1)*t2*r3*c1/a3
k1 = a1 - t2*a0 - a3/(t2*r3*c1) - (a0 - c1)*r3*c1
a = a3/((t2*c1)**2)
b = t2 + r3*(c1 - a0) + (a3/(t2*c1))*((1.0/t2) - k0)
c = k1 - (k0*a3)/t2
c2 = (-b + np.sqrt(b**2 - 4*a*c))/(2*a)
c3 = (t2*a3*c1)/(r3*(k0*t2*a3*c1 - c2*(a3 - r3*((t2*c1)**2))))
return c2, c3
def calc_c1_r3( self,
a0,
t1,
t2,
tpole):
a1_t = a0*(t1+tpole)
a2_t = a0*t1*tpole
c1_t = (a2_t/(t2**2))*(1 + np.sqrt(1 + (t2/a2_t)*(t2*a0 - a1_t)) )
c3_t = (-1*(t2**2)*(c1_t**2) + t2*a1_t*c1_t - a2_t*a0)/((t2**2)*c1_t - a2_t)
r3_t = a2_t/(c1_t*c3_t*t2)
return c1_t, r3_t
def calc_a0( self,
kphi,
kvco,
N,
fc,
t1,
t2,
t3,
t4):
omega_c = 2*np.pi*fc
k1 = kphi*kvco/((omega_c**2)*(N))
k2 = np.sqrt(
(1+(omega_c*t2)**2)/((1+(omega_c*t1)**2)*(1+(omega_c*t3)**2)*(1+(omega_c*t4)**2) )
)
return k1*k2
def calc_t1(self,
fc,
pm,
gamma,
t31=0.4,
t43=0.4,
num_iters=100):
""" numerically solve for t1 using the bisection method
see: https://en.wikibooks.org/wiki/Numerical_Methods/Equation_Solving
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
gamma (float) - optimization factor (1.136)
num_iters (int) - number of times to loop
"""
a = 1e-15 # initial guess for a
b = 1.0 # initial guess for b
fa = self.func_t1(a,fc,pm,t31=t31,t43=t43,gamma=gamma)
fb = self.func_t1(b,fc,pm,t31=t31,t43=t43,gamma=gamma)
for i in range(num_iters):
guess = (a+b)/2
if (self.func_t1(guess,fc,pm,t31=t31,t43=t43,gamma=gamma) < 0):
b = guess
else:
a = guess
return guess
def func_t1(self,
x,
fc,
pm,
t31=0.4,
t43=0.4,
gamma=1.136):
""" simulate t1. This function is used to
numerically solve for T1.
Equation 22.31 in Dean Banerjee's Book
:Parameters:
x (float) - guess at t1
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
t31 (float) - ratio of t3 to t1
gamma (float) - optimization factor (1.136)
:Returns:
updated value for t1 based on guess (float)
"""
omega_c = 2*np.pi*fc
phi = pm*np.pi/180
val = np.arctan( gamma/(omega_c*x*(1+t31)) ) - \
np.arctan( omega_c*x ) - \
np.arctan( omega_c*x*t31 ) -\
np.arctan( omega_c*x*t31*t43 ) - phi
return val
class PllFourthOrderPassive2(PllSecondOrderPassive):
def __init__(self,
fc,
pm,
kphi,
kvco,
N,
gamma=1.115):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (default=1.115)
t31 (float) - ratio of T3 to T1 (default=0.4)
t43 (float) - ratio of T4 to T3 (default=0.4)
note: for a realizable solution, t31 + t43 <= 1
"""
self.fc = fc
self.pm = pm
self.kphi = kphi
self.kvco = kvco
self.N = N
self.gamma = gamma
self.pole3 = fc*30
self.pole4 = fc*10
def calc_t1(self,
fc,
pm,
gamma,
num_iters=100,
plotme=False):
""" numerically solve for t1 using the bisection method
see: https://en.wikibooks.org/wiki/Numerical_Methods/Equation_Solving
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
gamma (float) - optimization factor (1.136)
num_iters (int) - number of times to loop
"""
a = 1e-15 # initial guess for a
b = 1.0 # initial guess for b
fa = self.func_t1(a,fc,pm,gamma=gamma)
fb = self.func_t1(b,fc,pm,gamma=gamma)
for i in range(num_iters):
guess = (a+b)/2
if (self.func_t1(guess,fc,pm,gamma=gamma) < 0):
b = guess
else:
a = guess
if plotme:
x = np.linspace(a,b,1000)
y = []
for xx in x:
y.append(self.func_t1(xx,fc,pm,gamma=gamma) )
fig, ax = plt.subplots()
ax.plot(x,y,'r',label='func_t1')
plt.grid(True)
plt.show()
return guess
def func_t1(self,
t1,
fc,
pm,
gamma=1.115):
""" simulate t1. This function is used to
numerically solve for T1.
"""
omega_c = 2*np.pi*fc
phi = pm*np.pi/180
t3 = 1.0/self.pole3
t4 = 1.0/self.pole4
# val = np.arctan2( 1.0, ( (omega_c)*(t1*t3*t4) )/gamma ) - \
# np.arctan2( 1.0, 1.0/omega_c*t1 ) - \
# np.arctan2( 1.0, 1.0/omega_c*t3 ) - \
# np.croarctan2( 1.0, 1.0/omega_c*t1*t4 ) - phi
val = | np.arctan( gamma/( (omega_c)*(t1*t3*t4) ) ) | numpy.arctan |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test graph fallback """
import pytest
import numpy as np
from mindspore import ms_function, context, Tensor
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_linspace():
"""
Feature: JIT Fallback
Description: Test numpy with linspace in graph mode.
Expectation: No exception.
"""
@ms_function
def np_linspace():
a = Tensor(np.linspace(1, 10, 10))
b = Tensor(np.linspace(1, 1, 10))
c = Tensor(np.linspace(10, 20, 5, endpoint=False))
d = Tensor(np.linspace(10, 20, 5, endpoint=True))
e = Tensor(np.linspace(1, 10, 10).reshape([10, 1]))
return a, b, c, d, e
a, b, c, d, e = np_linspace()
print("a:", a)
print("b:", b)
print("c:", c)
print("d:", d)
print("e:", e)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_arange_slice_1():
"""
Feature: JIT Fallback
Description: Test numpy with arange slice in graph mode.
Expectation: No exception.
"""
@ms_function
def np_arange_slice_1():
x = np.arange(10)
index = slice(2, 7, 2)
a = Tensor(x[index])
b = Tensor(x[2:7:2])
c = Tensor(x[5])
d = Tensor(x[2:])
e = Tensor(x[2:5])
return a, b, c, d, e
a, b, c, d, e = np_arange_slice_1()
assert np.all(a.asnumpy() == np.array([2, 4, 6]))
assert np.all(b.asnumpy() == np.array([2, 4, 6]))
assert np.all(c.asnumpy() == np.array([5]))
assert np.all(d.asnumpy() == np.array([2, 3, 4, 5, 6, 7, 8, 9]))
assert np.all(e.asnumpy() == np.array([2, 3, 4]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_arange_slice_2():
"""
Feature: JIT Fallback
Description: Test numpy with arange slice in graph mode.
Expectation: No exception.
"""
@ms_function
def np_arange_slice_2():
x = np.array([[1, 2, 3], [3, 4, 5], [4, 5, 6]])
a = Tensor(x[1:])
b = Tensor(x[..., 1])
c = Tensor(x[1, ...])
d = Tensor(x[..., 1:])
return a, b, c, d
a, b, c, d = np_arange_slice_2()
assert np.all(a.asnumpy() == np.array([[3, 4, 5], [4, 5, 6]]))
assert np.all(b.asnumpy() == np.array([2, 4, 5]))
assert np.all(c.asnumpy() == np.array([3, 4, 5]))
assert np.all(d.asnumpy() == np.array([[2, 3], [4, 5], [5, 6]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_array_advanced_index_1():
"""
Feature: JIT Fallback
Description: Test numpy with array advanced index in graph mode.
Expectation: No exception.
"""
@ms_function
def np_array_advanced_index_1():
x = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
a = Tensor(x[[0, 1, 2], [0, 1, 0]])
rows = np.array([[0, 0], [3, 3]])
cols = np.array([[0, 2], [0, 2]])
b = Tensor(x[rows, cols])
c = Tensor(x[1:3, 1:3])
d = Tensor(x[1:3, [1, 2]])
e = Tensor(x[..., 1:])
return a, b, c, d, e
a, b, c, d, e = np_array_advanced_index_1()
assert np.all(a.asnumpy() == np.array([0, 4, 6]))
assert np.all(b.asnumpy() == np.array([[0, 2], [9, 11]]))
assert np.all(c.asnumpy() == np.array([[4, 5], [7, 8]]))
assert np.all(d.asnumpy() == np.array([[4, 5], [7, 8]]))
assert np.all(e.asnumpy() == np.array([[1, 2], [4, 5], [7, 8], [10, 11]]))
# Not support <class 'complex'> yet.
@pytest.mark.skip(reason='Not support graph fallback feature yet')
def test_np_array_advanced_index_2():
"""
Feature: JIT Fallback
Description: Test numpy with array advanced index in graph mode.
Expectation: No exception.
"""
@ms_function
def np_array_advanced_index_2():
x = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
y = np.array([np.nan, 1, 2, np.nan, 3, 4, 5])
z = np.array([1, 2 + 6j, 5, 3.5 + 5j])
a = Tensor(x[x > 5])
b = Tensor(y[~np.isnan(y)])
c = Tensor(z[np.iscomplex(z)])
return a, b, c
a, b, c = np_array_advanced_index_2()
assert np.all(a.asnumpy() == np.array([6, 7, 8, 9, 10, 11]))
assert np.all(b.asnumpy() == np.array([1., 2., 3., 4., 5.]))
assert np.all(c.asnumpy() == np.array([2. + 6.j, 3.5 + 5.j]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_array_advanced_index_3():
"""
Feature: JIT Fallback
Description: Test numpy with array advanced index in graph mode.
Expectation: No exception.
"""
@ms_function
def np_array_advanced_index_3():
x = np.arange(32).reshape((8, 4))
a = Tensor(x[[4, 2, 1, 7]])
y = np.arange(32).reshape((8, 4))
b = Tensor(y[[-4, -2, -1, -7]])
z = np.arange(32).reshape((8, 4))
c = Tensor(z[np.ix_([1, 5, 7, 2], [0, 3, 1, 2])])
return a, b, c
a, b, c = np_array_advanced_index_3()
print("a:", a)
print("b:", b)
print("c:", c)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_reshape():
"""
Feature: JIT Fallback
Description: Test numpy.reshape() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_reshape():
x = np.arange(8)
y = x.reshape(2, 4)
return Tensor(y)
assert np.all(np_reshape().asnumpy() == np.array([[0, 1, 2, 3], [4, 5, 6, 7]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_ndarray_flatten():
"""
Feature: JIT Fallback
Description: Test numpy.flatten() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_ndarray_flatten():
x = np.arange(8).reshape(2, 4)
y = x.flatten()
return Tensor(y)
assert np.all(np_ndarray_flatten().asnumpy() == np.array([0, 1, 2, 3, 4, 5, 6, 7]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_ravel():
"""
Feature: JIT Fallback
Description: Test numpy.ravel() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_ravel():
x = np.arange(8).reshape(2, 4)
y = x.ravel(order='F')
return Tensor(y)
assert np.all(np_ravel().asnumpy() == np.array([0, 4, 1, 5, 2, 6, 3, 7]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_transpose():
"""
Feature: JIT Fallback
Description: Test numpy.transpose() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_transpose():
x = np.arange(4).reshape(4, 1)
y = np.transpose(x)
return Tensor(y)
assert np.all(np_transpose().asnumpy() == np.array([0, 1, 2, 3]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_rollaxis():
"""
Feature: JIT Fallback
Description: Test numpy.rollaxis() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_rollaxis():
x = np.arange(8).reshape(2, 2, 2)
tensor_x = Tensor(x)
y = np.rollaxis(x, 2, 0)
tensor_y = Tensor(y)
return tensor_x[1, 1, 0], tensor_y[1, 1, 0]
x, y = np_rollaxis()
assert x == 6 and y == 5
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_swapaxes():
"""
Feature: JIT Fallback
Description: Test numpy.swapaxes() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_swapaxes():
x = np.arange(8).reshape(2, 2, 2)
tensor_x = Tensor(x)
y = np.swapaxes(x, 2, 0)
tensor_y = Tensor(y)
return tensor_x[1, 1, 0], tensor_y[1, 1, 0]
x, y = np_swapaxes()
assert x == 6 and y == 3
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_broadcast():
"""
Feature: JIT Fallback
Description: Test numpy.broadcast() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_broadcast():
x = np.array([[1], [2], [3]])
y = np.array([4, 5, 6])
z = np.broadcast(x, y)
return Tensor(z.shape)
assert np.all(np_broadcast().asnumpy() == np.array([3, 3]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_broadcast_to():
"""
Feature: JIT Fallback
Description: Test numpy.broadcast_to() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_broadcast_to():
x = np.arange(4).reshape(1, 4)
y = np.broadcast_to(x, (2, 4))
return Tensor(y)
assert np.all(np_broadcast_to().asnumpy() == np.array([[0, 1, 2, 3], [0, 1, 2, 3]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_expand_dims():
"""
Feature: JIT Fallback
Description: Test numpy.expand_dims() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_expand_dims():
x = np.array(([1, 2], [3, 4]))
y = np.expand_dims(x, axis=0)
return Tensor(y)
assert np.all(np_expand_dims().asnumpy() == np.array([[[1, 2], [3, 4]]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_squeeze():
"""
Feature: JIT Fallback
Description: Test numpy.squeeze() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_squeeze():
x = np.arange(4).reshape(1, 2, 2)
y = np.squeeze(x)
return Tensor(y)
assert np.all(np_squeeze().asnumpy() == np.array([[0, 1], [2, 3]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_concat():
"""
Feature: JIT Fallback
Description: Test numpy method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_concat():
x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
concatenate = np.concatenate((x, y))
stack = np.stack((x, y), 0)
hstack = np.hstack((x, y))
vstack = np.vstack((x, y))
return Tensor(concatenate), Tensor(stack), Tensor(hstack), Tensor(vstack)
out_concatenate, out_stack, out_hstack, out_vstack = np_concat()
assert np.all(out_concatenate.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
assert np.all(out_stack.asnumpy() == np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
assert np.all(out_hstack.asnumpy() == np.array([[1, 2, 5, 6], [3, 4, 7, 8]]))
assert np.all(out_vstack.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_split():
"""
Feature: JIT Fallback
Description: Test numpy split method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_split():
x = np.arange(4).reshape(2, 2)
split = np.split(x, 2)
hsplit = np.hsplit(x, 2)
vsplit = np.vsplit(x, 2)
return Tensor(split), Tensor(hsplit), Tensor(vsplit)
out_split, out_hsplit, out_vsplit = np_split()
assert np.all(out_split.asnumpy() == np.array([[[0, 1]], [[2, 3]]]))
assert np.all(out_hsplit.asnumpy() == np.array([[[0], [2]], [[1], [3]]]))
assert np.all(out_vsplit.asnumpy() == np.array([[[0, 1]], [[2, 3]]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_element():
"""
Feature: JIT Fallback
Description: Test numpy method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_element():
resize = np.resize(np.array([[1, 2, 3], [4, 5, 6]]), (3, 2))
append = np.append(np.array([[1, 2, 3], [4, 5, 6]]), [[7, 8, 9]], axis=0)
insert = np.insert(np.array([[1, 2], [3, 4], [5, 6]]), 3, [7, 8], axis=0)
delete = np.delete(np.arange(6).reshape(2, 3), 0, axis=0)
unique = np.unique(np.array([5, 2, 6, 2, 7, 5, 6, 8, 2, 9]))
return Tensor(resize), Tensor(append), Tensor(insert), Tensor(delete), Tensor(unique)
out_resize, out_append, out_insert, out_delete, out_unique = np_element()
assert np.all(out_resize.asnumpy() == | np.array([[1, 2], [3, 4], [5, 6]]) | numpy.array |
# Code from Chapter 18 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by <NAME> (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2014
import pylab as pl
import numpy as np
import scipy.optimize as so
def kernel4(data1, data2, theta, wantderiv=True, measnoise=1.):
theta = np.squeeze(theta)
# Periodic
if np.shape(data1)[0] == len(data1):
d1 = np.shape(data1)[0]
n = 1
else:
(d1, n) = np.shape(data1)
d2 = np.shape(data2)[0]
sumxy = np.zeros((d1, d2))
for d in range(n):
D1 = np.transpose([data1[:, d]]) * np.ones((d1, d2))
D2 = [data2[:, d]] * | np.ones((d1, d2)) | numpy.ones |
import numpy as np
from .dimension import embed
from .rotate import rotate_2D
__all__ = ["torus", "dsphere", "sphere", "swiss_roll", "infty_sign", "eyeglasses"]
# TODO: Make a base class that controls `ambient` and `noise`.
class Shape:
def __init__(self):
pass
def dsphere(n=100, d=2, r=1, noise=None, ambient=None, seed=None):
"""
Sample `n` data points on a d-sphere.
Parameters
-----------
n : int
Number of data points in shape.
r : float
Radius of sphere.
ambient : int, default=None
Embed the sphere into a space with ambient dimension equal to `ambient`. The sphere is randomly rotated in this high dimensional space.
seed : int, default=None
Seed for random state.
"""
np.random.seed(seed)
data = np.random.randn(n, d + 1)
# Normalize points to the sphere
data = r * data / np.sqrt(np.sum(data ** 2, 1)[:, None])
if noise:
data += noise * np.random.randn(*data.shape)
if ambient:
assert ambient > d, "Must embed in higher dimensions"
data = embed(data, ambient)
return data
def sphere(n=100, r=1, noise=None, ambient=None, seed=None):
"""
Sample `n` data points on a sphere.
Parameters
-----------
n : int
Number of data points in shape.
r : float
Radius of sphere.
ambient : int, default=None
Embed the sphere into a space with ambient dimension equal to `ambient`. The sphere is randomly rotated in this high dimensional space.
seed : int, default=None
Seed for random state.
"""
np.random.seed(seed)
theta = np.random.random((n,)) * 2.0 * np.pi
phi = np.random.random((n,)) * np.pi
rad = np.ones((n,)) * r
data = np.zeros((n, 3))
data[:, 0] = rad * np.cos(theta) * np.cos(phi)
data[:, 1] = rad * np.cos(theta) * np.sin(phi)
data[:, 2] = rad * np.sin(theta)
if noise:
data += noise * np.random.randn(*data.shape)
if ambient:
data = embed(data, ambient)
return data
def torus(n=100, c=2, a=1, noise=None, ambient=None, seed=None):
"""
Sample `n` data points on a torus.
Parameters
-----------
n : int
Number of data points in shape.
c : float
Distance from center to center of tube.
a : float
Radius of tube.
ambient : int, default=None
Embed the torus into a space with ambient dimension equal to `ambient`. The torus is randomly rotated in this high dimensional space.
seed : int, default=None
Seed for random state.
"""
assert a <= c, "That's not a torus"
np.random.seed(seed)
theta = np.random.random((n,)) * 2.0 * np.pi
phi = np.random.random((n,)) * 2.0 * np.pi
data = np.zeros((n, 3))
data[:, 0] = (c + a * np.cos(theta)) * np.cos(phi)
data[:, 1] = (c + a * np.cos(theta)) * np.sin(phi)
data[:, 2] = a * np.sin(theta)
if noise:
data += noise * np.random.randn(*data.shape)
if ambient:
data = embed(data, ambient)
return data
def swiss_roll(n=100, r=10, noise=None, ambient=None, seed=None):
"""Swiss roll implementation
Parameters
----------
n : int
Number of data points in shape.
r : float
Length of roll
ambient : int, default=None
Embed the swiss roll into a space with ambient dimension equal to `ambient`. The swiss roll is randomly rotated in this high dimensional space.
seed : int, default=None
Seed for random state.
References
----------
Equations mimic [Swiss Roll and SNE by jlmelville](https://jlmelville.github.io/smallvis/swisssne.html)
"""
np.random.seed(seed)
phi = (np.random.random((n,)) * 3 + 1.5) * np.pi
psi = np.random.random((n,)) * r
data = np.zeros((n, 3))
data[:, 0] = phi * np.cos(phi)
data[:, 1] = phi * np.sin(phi)
data[:, 2] = psi
if noise:
data += noise * np.random.randn(*data.shape)
if ambient:
data = embed(data, ambient)
return data
def infty_sign(n=100, noise=None, angle=None, seed=None):
"""Construct a figure 8 or infinity sign with :code:`n` points and noise level with :code:`noise` standard deviation.
Parameters
============
n: int
number of points in returned data set.
noise: float
standard deviation of normally distributed noise added to data.
angle: float
angle in radians to rotate the infinity sign.
seed : int
Seed for random state.
"""
| np.random.seed(seed) | numpy.random.seed |
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
import time
import ctypes
import tempfile
import numpy
import h5py
from pyscf import lib
from functools import reduce
from pyscf.lib import logger
from pyscf import gto
from pyscf import ao2mo
from pyscf.cc import ccsd
from pyscf.cc import _ccsd
from pyscf.cc import ccsd_rdm
from pyscf.scf import rhf_grad
from pyscf.scf import cphf
BLKSIZE = 192
def IX_intermediates(mycc, t1, t2, l1, l2, eris=None, d1=None, d2=None):
if eris is None:
# Note eris are in Chemist's notation
eris = ccsd._ERIS(mycc)
if d1 is None:
d1 = ccsd_rdm.gamma1_intermediates(mycc, t1, t2, l1, l2)
doo, dov, dvo, dvv = d1
if d2 is None:
_d2tmpfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fd2intermediate = h5py.File(_d2tmpfile.name, 'w')
ccsd_rdm.gamma2_outcore(mycc, t1, t2, l1, l2, fd2intermediate)
dovov = fd2intermediate['dovov']
dvvvv = fd2intermediate['dvvvv']
doooo = fd2intermediate['doooo']
doovv = fd2intermediate['doovv']
dovvo = fd2intermediate['dovvo']
dovvv = fd2intermediate['dovvv']
dooov = fd2intermediate['dooov']
else:
dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = d2
log = logger.Logger(mycc.stdout, mycc.verbose)
nocc, nvir = t1.shape
nov = nocc * nvir
nvir_pair = nvir * (nvir+1) //2
_tmpfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fswap = h5py.File(_tmpfile.name, 'w')
fswap.create_group('e_vvov')
fswap.create_group('c_vvov')
# Note Ioo, Ivv are not hermitian
Ioo = numpy.zeros((nocc,nocc))
Ivv = numpy.zeros((nvir,nvir))
Ivo = numpy.zeros((nvir,nocc))
Xvo = numpy.zeros((nvir,nocc))
eris_oooo = _cp(eris.oooo)
eris_ooov = _cp(eris.ooov)
d_oooo = _cp(doooo)
d_oooo = _cp(d_oooo + d_oooo.transpose(1,0,2,3))
#:Ioo += numpy.einsum('jmlk,imlk->ij', d_oooo, eris_oooo) * 2
Ioo += lib.dot(eris_oooo.reshape(nocc,-1), d_oooo.reshape(nocc,-1).T, 2)
d_oooo = _cp(d_oooo.transpose(0,2,3,1))
#:Xvo += numpy.einsum('iljk,ljka->ai', d_oooo, eris_ooov) * 2
Xvo += lib.dot(eris_ooov.reshape(-1,nvir).T, d_oooo.reshape(nocc,-1).T, 2)
Xvo +=(numpy.einsum('kj,kjia->ai', doo, eris_ooov) * 4
- numpy.einsum('kj,ikja->ai', doo+doo.T, eris_ooov))
eris_oooo = eris_ooov = d_oooo = None
d_ovov = numpy.empty((nocc,nvir,nocc,nvir))
blksize = 8
for p0, p1 in prange(0, nocc, blksize):
d_ovov[p0:p1] = _cp(dovov[p0:p1])
d_ovvo = _cp(dovvo[p0:p1])
for i in range(p0,p1):
d_ovov[i] += d_ovvo[i-p0].transpose(0,2,1)
d_ovvo = None
d_ovov = lib.transpose_sum(d_ovov.reshape(nov,nov)).reshape(nocc,nvir,nocc,nvir)
#:Ivo += numpy.einsum('jbka,jbki->ai', d_ovov, eris.ovoo)
Ivo += lib.dot(d_ovov.reshape(-1,nvir).T, _cp(eris.ovoo).reshape(-1,nocc))
eris_ovov = _cp(eris.ovov)
#:Ioo += numpy.einsum('jakb,iakb->ij', d_ovov, eris.ovov)
#:Ivv += numpy.einsum('jcib,jcia->ab', d_ovov, eris.ovov)
Ioo += lib.dot(eris_ovov.reshape(nocc,-1), d_ovov.reshape(nocc,-1).T)
Ivv += lib.dot(eris_ovov.reshape(-1,nvir).T, d_ovov.reshape(-1,nvir))
eris_ovov = None
fswap['dovvo'] = d_ovov.transpose(0,1,3,2)
d_ovov = None
max_memory = mycc.max_memory - lib.current_memory()[0]
unit = max(nvir**3*2.5, nvir**3*2+nocc*nvir**2)
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/unit))
iobuflen = int(256e6/8/(blksize*nvir))
log.debug1('IX_intermediates pass 1: block size = %d, nocc = %d in %d blocks',
blksize, nocc, int((nocc+blksize-1)/blksize))
for istep, (p0, p1) in enumerate(prange(0, nocc, blksize)):
d_ooov = _cp(dooov[p0:p1])
eris_oooo = _cp(eris.oooo[p0:p1])
eris_ooov = _cp(eris.ooov[p0:p1])
#:Ivv += numpy.einsum('ijkb,ijka->ab', d_ooov, eris_ooov)
#:Ivo += numpy.einsum('jlka,jlki->ai', d_ooov, eris_oooo)
Ivv += lib.dot(eris_ooov.reshape(-1,nvir).T, d_ooov.reshape(-1,nvir))
Ivo += lib.dot(d_ooov.reshape(-1,nvir).T, eris_oooo.reshape(-1,nocc))
#:Ioo += numpy.einsum('klja,klia->ij', d_ooov, eris_ooov)
#:Xvo += numpy.einsum('kjib,kjba->ai', d_ooov, eris.oovv)
eris_oovv = _cp(eris.oovv[p0:p1])
tmp = _cp(d_ooov.transpose(0,1,3,2).reshape(-1,nocc))
Ioo += lib.dot(_cp(eris_ooov.transpose(0,1,3,2).reshape(-1,nocc)).T, tmp)
Xvo += lib.dot(eris_oovv.reshape(-1,nvir).T, tmp)
eris_oooo = tmp = None
d_ooov = d_ooov + dooov[:,p0:p1].transpose(1,0,2,3)
eris_ovov = _cp(eris.ovov[p0:p1])
#:Ioo += numpy.einsum('ljka,lika->ij', d_ooov, eris_ooov)
#:Xvo += numpy.einsum('jikb,jakb->ai', d_ooov, eris_ovov)
for i in range(p1-p0):
lib.dot(eris_ooov[i].reshape(nocc,-1),
d_ooov[i].reshape(nocc,-1).T, 1, Ioo, 1)
lib.dot(eris_ovov[i].reshape(nvir,-1),
d_ooov[i].reshape(nocc,-1).T, 1, Xvo, 1)
d_ooov = None
#:Ioo += numpy.einsum('kjba,kiba->ij', d_oovv, eris.oovv)
#:Ivv += numpy.einsum('ijcb,ijca->ab', d_oovv, eris.oovv)
#:Ivo += numpy.einsum('kjba,kjib->ai', d_oovv, eris.ooov)
d_oovv = _cp(doovv[p0:p1]) + doovv[:,p0:p1].transpose(1,0,3,2)
for i in range(p1-p0):
Ioo += lib.dot(eris_oovv[i].reshape(nocc, -1), d_oovv[i].reshape(nocc,-1).T)
Ivv += lib.dot(eris_oovv.reshape(-1,nvir).T, d_oovv.reshape(-1,nvir))
Ivo += lib.dot(d_oovv.reshape(-1,nvir).T,
_cp(eris_ooov.transpose(0,1,3,2).reshape(-1,nocc)))
eris_ooov = None
d_oovv = _ccsd.precontract(d_oovv.reshape(-1,nvir,nvir)).reshape(p1-p0,nocc,-1)
d_ovvv = numpy.empty((p1-p0,nvir,nvir,nvir))
ao2mo.outcore._load_from_h5g(dovvv, p0*nvir, p1*nvir,
d_ovvv.reshape(-1,nvir**2))
#:Ivo += numpy.einsum('jadc,jidc->ai', d_ovvv, eris_oovv)
for i in range(p1-p0):
Ivo += lib.dot(d_ovvv[i].reshape(nvir,-1), eris_oovv[i].reshape(nocc,-1).T)
eris_oovv = None
# tril part of (d_ovvv + d_ovvv.transpose(0,1,3,2))
c_ovvv = _ccsd.precontract(d_ovvv.reshape(-1,nvir,nvir))
ao2mo.outcore._transpose_to_h5g(fswap, 'c_vvov/%d'%istep, c_ovvv, iobuflen)
c_ovvv = c_ovvv.reshape(-1,nvir,nvir_pair)
eris_ovx = _cp(eris.ovvv[p0:p1])
ao2mo.outcore._transpose_to_h5g(fswap, 'e_vvov/%d'%istep,
eris_ovx.reshape(-1,nvir_pair), iobuflen)
#:Xvo += numpy.einsum('jibc,jabc->ai', d_oovv, eris_ovvv)
#:Ivv += numpy.einsum('ibdc,iadc->ab', d_ovvv, eris_ovvv)
for i in range(p1-p0):
lib.dot(eris_ovx[i].reshape(nvir,-1),
d_oovv[i].reshape(nocc,-1).T, 1, Xvo, 1)
lib.dot(eris_ovx[i].reshape(nvir,-1),
c_ovvv[i].reshape(nvir,-1).T, 1, Ivv, 1)
c_ovvv = d_oovv = None
eris_ovvo = numpy.empty((p1-p0,nvir,nvir,nocc))
for i in range(p1-p0):
d_ovvv[i] = _ccsd.sum021(d_ovvv[i])
eris_ovvo[i] = eris_ovov[i].transpose(0,2,1)
#:Ivo += numpy.einsum('abjc,ibjc->ai', d_ovvv, eris_ovov)
Ivo += lib.dot(d_ovvv.reshape(-1,nvir).T, eris_ovvo.reshape(-1,nocc))
eris_ovvo = eris_ovov = None
eris_ovvv = lib.unpack_tril(eris_ovx.reshape(-1,nvir_pair))
eris_ovx = None
eris_ovvv = eris_ovvv.reshape(p1-p0,nvir,nvir,nvir)
#:Ivv += numpy.einsum('icdb,icda->ab', d_ovvv, eris_ovvv)
#:Xvo += numpy.einsum('jibc,jabc->ai', d_oovv, eris_ovvv)
Ivv += lib.dot(eris_ovvv.reshape(-1,nvir).T, d_ovvv.reshape(-1,nvir))
Xvo[:,p0:p1] +=(numpy.einsum('cb,iacb->ai', dvv, eris_ovvv) * 4
- numpy.einsum('cb,icba->ai', dvv+dvv.T, eris_ovvv))
d_ovvo = _cp(fswap['dovvo'][p0:p1])
#:Xvo += numpy.einsum('jbic,jbca->ai', d_ovov, eris_ovvv)
lib.dot(eris_ovvv.reshape(-1,nvir).T, d_ovvo.reshape(-1,nocc), 1, Xvo, 1)
d_ovvv = d_ovvo = eris_ovvv = None
max_memory = mycc.max_memory - lib.current_memory()[0]
unit = nocc*nvir**2 + nvir**3*2.5
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/unit))
log.debug1('IX_intermediates pass 2: block size = %d, nocc = %d in %d blocks',
blksize, nocc, int((nocc+blksize-1)/blksize))
for p0, p1 in prange(0, nvir, blksize):
off0 = p0*(p0+1)//2
off1 = p1*(p1+1)//2
d_vvvv = _cp(dvvvv[off0:off1]) * 4
for i in range(p0, p1):
d_vvvv[i*(i+1)//2+i-off0] *= .5
d_vvvv = lib.unpack_tril(d_vvvv)
eris_vvvv = lib.unpack_tril(_cp(eris.vvvv[off0:off1]))
#:Ivv += numpy.einsum('decb,deca->ab', d_vvvv, eris_vvvv) * 2
#:Xvo += numpy.einsum('dbic,dbca->ai', d_vvov, eris_vvvv)
lib.dot(eris_vvvv.reshape(-1,nvir).T, d_vvvv.reshape(-1,nvir), 2, Ivv, 1)
#:d_vvvv = _cp(d_vvvv + d_vvvv.transpose(0,1,3,2))
d_vvov = numpy.empty((off1-off0,nocc,nvir))
ao2mo.outcore._load_from_h5g(fswap['c_vvov'], off0, off1, d_vvov.reshape(-1,nov))
d_vvvo = _cp(d_vvov.transpose(0,2,1))
lib.dot(eris_vvvv.reshape(-1,nvir).T, d_vvvo.reshape(-1,nocc), 1, Xvo, 1)
d_vvov = eris_vvvv = None
eris_vvov = numpy.empty((off1-off0,nocc,nvir))
ao2mo.outcore._load_from_h5g(fswap['e_vvov'], off0, off1,
eris_vvov.reshape(-1,nov))
eris_vvvo = _cp(eris_vvov.transpose(0,2,1))
#:Ioo += numpy.einsum('abjc,abci->ij', d_vvov, eris_vvvo)
#:Ivo += numpy.einsum('dbca,dbci->ai', d_vvvv, eris_vvvo) * 2
lib.dot(d_vvvv.reshape(-1,nvir).T, eris_vvvo.reshape(-1,nocc), 2, Ivo, 1)
lib.dot(eris_vvvo.reshape(-1,nocc).T, d_vvvo.reshape(-1,nocc), 1, Ioo, 1)
eris_vvov = eris_vovv = d_vvvv = None
del(fswap['e_vvov'])
del(fswap['c_vvov'])
del(fswap['dovvo'])
fswap.close()
_tmpfile = None
if d2 is None:
for key in fd2intermediate.keys():
del(fd2intermediate[key])
fd2intermediate.close()
_d2tmpfile = None
Ioo *= -1
Ivv *= -1
Ivo *= -1
Xvo += Ivo
return Ioo, Ivv, Ivo, Xvo
def response_dm1(mycc, t1, t2, l1, l2, eris=None, IX=None):
if eris is None:
# Note eris are in Chemist's notation
eris = ccsd._ERIS(mycc)
if IX is None:
Ioo, Ivv, Ivo, Xvo = IX_intermediates(mycc, t1, t2, l1, l2, eris)
else:
Ioo, Ivv, Ivo, Xvo = IX
nocc, nvir = t1.shape
nmo = nocc + nvir
max_memory = mycc.max_memory - lib.current_memory()[0]
blksize = max(ccsd.BLKMIN, int(max_memory*1e6/8/(nocc*nvir**2)))
def fvind(x):
x = x.reshape(Xvo.shape)
if eris is None:
mo_coeff = mycc.mo_coeff
dm = reduce(numpy.dot, (mo_coeff[:,nocc:], x, mo_coeff[:,:nocc].T))
dm = (dm + dm.T) * 2
v = reduce(numpy.dot, (mo_coeff[:,nocc:].T, mycc._scf.get_veff(mol, dm),
mo_coeff[:,:nocc]))
else:
v = numpy.zeros((nocc,nvir))
for p0, p1 in prange(0, nocc, blksize):
eris_ovov = _cp(eris.ovov[p0:p1])
v[p0:p1] += numpy.einsum('iajb,bj->ia', eris_ovov, x) * 4
v[p0:p1] -= numpy.einsum('ibja,bj->ia', eris_ovov, x)
eris_ovov = None
v[p0:p1] -= numpy.einsum('ijba,bj->ia', _cp(eris.oovv[p0:p1]), x[:,p0:p1])
return v.T
mo_energy = eris.fock.diagonal()
mo_occ = numpy.zeros_like(mo_energy)
mo_occ[:nocc] = 2
dvo = cphf.solve(fvind, mo_energy, mo_occ, Xvo, max_cycle=30)[0]
dm1 = numpy.zeros((nmo,nmo))
dm1[nocc:,:nocc] = dvo
dm1[:nocc,nocc:] = dvo.T
return dm1
#
# Note: only works with canonical orbitals
# Non-canonical formula refers to JCP, 95, 2639
#
def kernel(mycc, t1=None, t2=None, l1=None, l2=None, eris=None, atmlst=None,
mf_grad=None, verbose=logger.INFO):
if t1 is None: t1 = mycc.t1
if t2 is None: t2 = mycc.t2
if l1 is None: l1 = mycc.l1
if l2 is None: l2 = mycc.l2
if eris is None: eris = ccsd._ERIS(mycc)
if mf_grad is None:
mf_grad = rhf_grad.Gradients(mycc._scf)
log = logger.Logger(mycc.stdout, mycc.verbose)
time0 = time.clock(), time.time()
mol = mycc.mol
moidx = numpy.ones(mycc.mo_coeff.shape[1], dtype=numpy.bool)
if isinstance(mycc.frozen, (int, numpy.integer)):
raise NotImplementedError('frozen orbital ccsd_grad')
moidx[:mycc.frozen] = False
else:
moidx[mycc.frozen] = False
mo_coeff = mycc.mo_coeff[:,moidx] #FIXME: ensure mycc.mo_coeff is canonical orbital
mo_energy = eris.fock.diagonal()
nocc, nvir = t1.shape
nao, nmo = mo_coeff.shape
nao_pair = nao * (nao+1) // 2
log.debug('Build ccsd rdm1 intermediates')
d1 = ccsd_rdm.gamma1_intermediates(mycc, t1, t2, l1, l2)
doo, dov, dvo, dvv = d1
time1 = log.timer('rdm1 intermediates', *time0)
log.debug('Build ccsd rdm2 intermediates')
_d2tmpfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fd2intermediate = h5py.File(_d2tmpfile.name, 'w')
d2 = ccsd_rdm.gamma2_outcore(mycc, t1, t2, l1, l2, fd2intermediate)
time1 = log.timer('rdm2 intermediates', *time1)
log.debug('Build ccsd response_rdm1')
Ioo, Ivv, Ivo, Xvo = IX_intermediates(mycc, t1, t2, l1, l2, eris, d1, d2)
time1 = log.timer('response_rdm1 intermediates', *time1)
dm1mo = response_dm1(mycc, t1, t2, l1, l2, eris, (Ioo, Ivv, Ivo, Xvo))
dm1mo[:nocc,:nocc] = doo + doo.T
dm1mo[nocc:,nocc:] = dvv + dvv.T
dm1ao = reduce(numpy.dot, (mo_coeff, dm1mo, mo_coeff.T))
im1 = numpy.zeros_like(dm1mo)
im1[:nocc,:nocc] = Ioo
im1[nocc:,nocc:] = Ivv
im1[nocc:,:nocc] = Ivo
im1[:nocc,nocc:] = Ivo.T
im1 = reduce(numpy.dot, (mo_coeff, im1, mo_coeff.T))
time1 = log.timer('response_rdm1', *time1)
log.debug('symmetrized rdm2 and MO->AO transformation')
_dm2file = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
# Basically, 4 times of dm2 is computed. *2 in _rdm2_mo2ao, *2 in _load_block_tril
fdm2 = h5py.File(_dm2file.name, 'w')
dm1_with_hf = dm1mo.copy()
for i in range(nocc): # HF 2pdm ~ 4(ij)(kl)-2(il)(jk), diagonal+1 because of 4*dm2
dm1_with_hf[i,i] += 1
_rdm2_mo2ao(mycc, d2, dm1_with_hf, mo_coeff, fdm2)
time1 = log.timer('MO->AO transformation', *time1)
for key in fd2intermediate.keys():
del(fd2intermediate[key])
fd2intermediate.close()
#TODO: pass hf_grad object to compute h1 and s1
log.debug('h1 and JK1')
h1 = mf_grad.get_hcore(mol)
s1 = mf_grad.get_ovlp(mol)
zeta = lib.direct_sum('i+j->ij', mo_energy, mo_energy) * .5
zeta[nocc:,:nocc] = mo_energy[:nocc]
zeta[:nocc,nocc:] = mo_energy[:nocc].reshape(-1,1)
zeta = reduce(numpy.dot, (mo_coeff, zeta*dm1mo, mo_coeff.T))
p1 = numpy.dot(mo_coeff[:,:nocc], mo_coeff[:,:nocc].T)
vhf4sij = reduce(numpy.dot, (p1, mycc._scf.get_veff(mol, dm1ao+dm1ao.T), p1))
time1 = log.timer('h1 and JK1', *time1)
# Hartree-Fock part contribution
hf_dm1 = mycc._scf.make_rdm1(mycc._scf.mo_coeff, mycc._scf.mo_occ)
dm1ao += hf_dm1
zeta += mf_grad.make_rdm1e(mycc._scf.mo_energy, mycc._scf.mo_coeff,
mycc._scf.mo_occ)
if atmlst is None:
atmlst = range(mol.natm)
offsetdic = mol.offset_nr_by_atom()
max_memory = mycc.max_memory - lib.current_memory()[0]
blksize = max(1, int(max_memory*1e6/8/(nao**3*2.5)))
ioblksize = fdm2['dm2/0'].shape[-1]
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = offsetdic[ia]
# s[1] dot I, note matrix im1 is not hermitian
de[k] =(numpy.einsum('xij,ij->x', s1[:,p0:p1], im1[p0:p1])
+ numpy.einsum('xji,ij->x', s1[:,p0:p1], im1[:,p0:p1]))
# h[1] \dot DM, *2 for +c.c., contribute to f1
h1ao = mf_grad._grad_rinv(mol, ia)
h1ao[:,p0:p1] += h1[:,p0:p1]
de[k] +=(numpy.einsum('xij,ij->x', h1ao, dm1ao)
+ numpy.einsum('xji,ij->x', h1ao, dm1ao))
# -s[1]*e \dot DM, contribute to f1
de[k] -=(numpy.einsum('xij,ij->x', s1[:,p0:p1], zeta[p0:p1] )
+ numpy.einsum('xji,ij->x', s1[:,p0:p1], zeta[:,p0:p1]))
# -vhf[s_ij[1]], contribute to f1, *2 for s1+s1.T
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], vhf4sij[p0:p1]) * 2
# 2e AO integrals dot 2pdm
ip0 = p0
for b0, b1, nf in shell_prange(mol, shl0, shl1, blksize):
eri1 = mol.intor('cint2e_ip1_sph', comp=3, aosym='s2kl',
shls_slice=(b0,b1,0,mol.nbas,0,mol.nbas,0,mol.nbas))
eri1 = eri1.reshape(3,nf,nao,-1)
dm2buf = numpy.empty((nf,nao,nao_pair))
for ic, (i0, i1) in enumerate(prange(0, nao_pair, ioblksize)):
_load_block_tril(fdm2['dm2/%d'%ic], ip0, ip0+nf, dm2buf[:,:,i0:i1])
de[k] -= numpy.einsum('xijk,ijk->x', eri1, dm2buf) * 2
eri1 = dm2buf = None
ip0 += nf
log.debug('grad of atom %d %s = %s', ia, mol.atom_symbol(ia), de[k])
time1 = log.timer('grad of atom %d'%ia, *time1)
log.note('CCSD gradinets')
log.note('==============')
log.note(' x y z')
for k, ia in enumerate(atmlst):
log.note('%d %s %15.9f %15.9f %15.9f', ia, mol.atom_symbol(ia),
de[k,0], de[k,1], de[k,2])
log.timer('CCSD gradients', *time0)
for key in fdm2.keys():
del(fdm2[key])
fdm2.close()
_d2tmpfile = _dm2file = None
return de
def shell_prange(mol, start, stop, blksize):
nao = 0
ib0 = start
for ib in range(start, stop):
now = (mol.bas_angular(ib)*2+1) * mol.bas_nctr(ib)
nao += now
if nao > blksize and nao > now:
yield (ib0, ib, nao-now)
ib0 = ib
nao = now
yield (ib0, stop, nao)
def _rdm2_mo2ao(mycc, d2, dm1, mo_coeff, fsave=None):
log = logger.Logger(mycc.stdout, mycc.verbose)
if fsave is None:
_dm2file = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fsave = h5py.File(_dm2file.name, 'w')
else:
_dm2file = None
time1 = time.clock(), time.time()
dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = d2
nocc, nvir = dovov.shape[:2]
nov = nocc * nvir
nao, nmo = mo_coeff.shape
nao_pair = nao * (nao+1) // 2
nvir_pair = nvir * (nvir+1) //2
mo_coeff = numpy.asarray(mo_coeff, order='F')
def _trans(vin, orbs_slice, out=None):
nrow = vin.shape[0]
if out is None:
out = numpy.empty((nrow,nao_pair))
fdrv = getattr(_ccsd.libcc, 'AO2MOnr_e2_drv')
pao_loc = ctypes.POINTER(ctypes.c_void_p)()
fdrv(_ccsd.libcc.AO2MOtranse2_nr_s1, _ccsd.libcc.CCmmm_transpose_sum,
out.ctypes.data_as(ctypes.c_void_p),
vin.ctypes.data_as(ctypes.c_void_p),
mo_coeff.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nrow), ctypes.c_int(nao),
(ctypes.c_int*4)(*orbs_slice), pao_loc, ctypes.c_int(0))
return out
# transform dm2_ij to get lower triangular (dm2+dm2.transpose(0,1,3,2))
_tmpfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
fswap = h5py.File(_tmpfile.name)
max_memory = mycc.max_memory - lib.current_memory()[0]
blksize = max(1, int(max_memory*1e6/8/(nmo*nao_pair+nmo**3+nvir**3)))
iobuflen = int(256e6/8/(blksize*nmo))
log.debug1('_rdm2_mo2ao pass 1: blksize = %d, iobuflen = %d', blksize, iobuflen)
fswap.create_group('o') # for h5py old version
pool1 = numpy.empty((blksize,nmo,nmo,nmo))
pool2 = numpy.empty((blksize,nmo,nao_pair))
bufd_ovvv = numpy.empty((blksize,nvir,nvir,nvir))
for istep, (p0, p1) in enumerate(prange(0, nocc, blksize)):
buf1 = pool1[:p1-p0]
buf1[:,:nocc,:nocc,:nocc] = doooo[p0:p1]
buf1[:,:nocc,:nocc,nocc:] = dooov[p0:p1]
buf1[:,:nocc,nocc:,:nocc] = 0
buf1[:,:nocc,nocc:,nocc:] = doovv[p0:p1]
buf1[:,nocc:,:nocc,:nocc] = 0
buf1[:,nocc:,:nocc,nocc:] = dovov[p0:p1]
buf1[:,nocc:,nocc:,:nocc] = dovvo[p0:p1]
d_ovvv = bufd_ovvv[:p1-p0]
ao2mo.outcore._load_from_h5g(dovvv, p0*nvir, p1*nvir,
d_ovvv.reshape(-1,nvir**2))
buf1[:,nocc:,nocc:,nocc:] = d_ovvv
for i in range(p0, p1):
buf1[i-p0,i,:,:] += dm1
buf1[i-p0,:,:,i] -= dm1 * .5
buf2 = pool2[:p1-p0].reshape(-1,nao_pair)
_trans(buf1.reshape(-1,nmo**2), (0,nmo,0,nmo), buf2)
ao2mo.outcore._transpose_to_h5g(fswap, 'o/%d'%istep, buf2, iobuflen)
pool1 = pool2 = bufd_ovvv = None
time1 = log.timer_debug1('_rdm2_mo2ao pass 1', *time1)
fswap.create_group('v') # for h5py old version
pool1 = numpy.empty((blksize*nvir,nao_pair))
pool2 = numpy.empty((blksize*nvir,nvir,nvir))
for istep, (p0, p1) in enumerate(prange(0, nvir_pair, blksize*nvir)):
buf1 = _cp(dvvvv[p0:p1])
buf2 = lib.unpack_tril(buf1, out=pool2[:p1-p0])
buf1 = _trans(buf2, (nocc,nmo,nocc,nmo), out=pool1[:p1-p0])
ao2mo.outcore._transpose_to_h5g(fswap, 'v/%d'%istep, buf1, iobuflen)
pool1 = pool2 = None
time1 = log.timer_debug1('_rdm2_mo2ao pass 2', *time1)
# transform dm2_kl then dm2 + dm2.transpose(2,3,0,1)
max_memory = mycc.max_memory - lib.current_memory()[0]
blksize = max(nao, int(max_memory*1e6/8/(nao_pair+nmo**2)))
iobuflen = int(256e6/8/blksize)
log.debug1('_rdm2_mo2ao pass 3: blksize = %d, iobuflen = %d', blksize, iobuflen)
gsave = fsave.create_group('dm2')
for istep, (p0, p1) in enumerate(prange(0, nao_pair, blksize)):
gsave.create_dataset(str(istep), (nao_pair,p1-p0), 'f8')
diagidx = numpy.arange(nao)
diagidx = diagidx*(diagidx+1)//2 + diagidx
pool1 = numpy.empty((blksize,nmo,nmo))
pool2 = numpy.empty((blksize,nvir_pair))
pool3 = numpy.empty((blksize,nvir,nvir))
pool4 = | numpy.empty((blksize,nao_pair)) | numpy.empty |
"""
Unit tests for crystal class
"""
__author__ = '<NAME>'
import unittest
import numpy as np
import yaml
import onsager.crystal as crystal
class UnitCellTests(unittest.TestCase):
"""Tests to make sure incell and halfcell work as expected."""
def testincell(self):
"""In cell testing"""
a = np.array([4. / 3., -2. / 3., 19. / 9.])
b = np.array([1. / 3., 1. / 3., 1. / 9.])
self.assertTrue(np.allclose(crystal.incell(a), b))
def testhalfcell(self):
"""Half cell testing"""
a = np.array([4. / 3., -2. / 3., 17. / 9.])
b = np.array([1. / 3., 1. / 3., -1. / 9.])
self.assertTrue(np.allclose(crystal.inhalf(a), b))
class GroupOperationTests(unittest.TestCase):
"""Tests for our group operations."""
def setUp(self):
self.rot = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
self.trans = np.zeros(3)
self.cartrot = np.array([[0., 1., 0.],
[1., 0., 0.],
[0., 0., 1.]])
self.indexmap = ((0,),)
self.mirrorop = crystal.GroupOp(self.rot, self.trans, self.cartrot, self.indexmap)
self.ident = crystal.GroupOp(np.eye(3, dtype=int), np.zeros(3), np.eye(3), ((0,),))
def testEquality(self):
"""Can we check if two group operations are equal?"""
self.assertNotEqual(self.mirrorop, self.rot)
self.assertEqual(self.mirrorop.incell(), self.mirrorop)
# self.assertEqual(self.mirrorop.__hash__(), (self.mirrorop + np.array([1,0,0])).__hash__())
def testAddition(self):
"""Can we add a vector to our group operation and get a new one?"""
with self.assertRaises(TypeError):
self.mirrorop + 0
v1 = np.array([1, 0, 0])
newop = self.mirrorop + v1
mirroroptrans = crystal.GroupOp(self.rot, self.trans + v1, self.cartrot, self.indexmap)
self.assertEqual(newop, mirroroptrans)
self.assertTrue(np.allclose((self.ident - v1).trans, -v1))
def testMultiplication(self):
"""Does group operation multiplication work correctly?"""
self.assertEqual(self.mirrorop * self.mirrorop, self.ident)
v1 = np.array([1, 0, 0])
trans = self.ident + v1
self.assertEqual(trans * trans, self.ident + 2 * v1)
rot3 = crystal.GroupOp( | np.eye(3, dtype=int) | numpy.eye |
import inspect
from abc import ABCMeta, abstractmethod
from copy import deepcopy as _deepcopy, copy as _copy
import sympy as sp
import wrapt
import itertools
from utils.func_utils import get_cached_func_spec, make_function
from structdict import StructDict, OrderedStructDict
import numpy as np
from numpy.lib.stride_tricks import as_strided as _as_strided
import scipy.linalg as scl
import scipy.sparse as scs
from collections import namedtuple as NamedTuple
from utils.decorator_utils import cache_hashable_args
import functools
def is_scalar_like(val):
shape = getattr(val, 'shape', (1,))
return all([d==1 for d in shape])
def matmul(self, other):
if any(map(is_scalar_like, (self, other))):
return self * other
else:
return self @ other
def atleast_2d_col(arr, dtype=None, order=None) -> np.ndarray:
arr = np.asanyarray(arr, dtype=dtype, order=order)
if arr.ndim == 0:
result = arr.reshape(1, 1)
elif arr.ndim == 1:
result = arr[:, np.newaxis]
else:
result = arr
return result
def _atleast_3d_col(arr, dtype=None, order=None):
arr = np.asanyarray(arr, dtype=dtype, order=order)
if arr.ndim == 0:
result = arr.reshape(1, 1, 1)
elif arr.ndim == 1:
result = arr[:, np.newaxis, np.newaxis]
elif arr.ndim == 2:
result = arr[np.newaxis, :]
else:
result = arr
return result
def block_diag_dense_same_shape(mats, format=None, dtype=None):
arrs = _atleast_3d_col(mats, dtype=dtype)
k, n, m = arrs.shape
arrs = arrs.reshape(k * n, m)
vals = np.zeros(shape=(k * n, k * m), dtype=arrs.dtype)
vals[:, :m] = arrs
item_size = arrs.itemsize
shape = (k, n, k * m)
strides = ((k * n - 1) * m * item_size, k * m * item_size, item_size)
strided = np.ascontiguousarray(_as_strided(vals, shape=shape, strides=strides))
block_diag = strided.reshape(n * k, m * k)
return block_diag
def block_diag_dense(mats, format=None, dtype=None):
# scl.blockdiag is faster for large matrices or a large number of matrices.
a_mats = _atleast_3d_col(mats)
if a_mats.dtype != np.object_ and np.prod(a_mats.shape) < 720:
block_diag = block_diag_dense_same_shape(a_mats, format=format, dtype=dtype)
else:
block_diag = scl.block_diag(*a_mats)
if dtype is not None:
block_diag = block_diag.astype(dtype)
return block_diag
import timeit
def block_diag_test(a, number=1000):
def t1():
return block_diag_dense(a)
def t2():
return scl.block_diag(*a)
tt1 = timeit.timeit("t1()", globals=locals(), number=number)
print("block_diag_dense", tt1)
tt2 = timeit.timeit("t2()", globals=locals(), number=number)
print("scl.block_diag", tt2)
t1 = t1()
t2 = t2()
print("t1", t1.dtype)
print("t2", t2.dtype)
return np.array_equal(t1, t2)
def create_object_array(tup):
try:
obj_arr = np.empty(len(tup), dtype=np.object_)
except TypeError:
raise TypeError("tup must be array like.")
for ind, item in enumerate(tup):
obj_arr[ind] = item
return obj_arr
def block_toeplitz(c_tup, r_tup=None, sparse=False):
"""
Based on scipy.linalg.toeplitz method but applied in a block fashion.
"""
try:
c = | np.array(c_tup) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 24 13:06:12 2016
@author: mariapanteli
"""
import numpy as np
import matplotlib.pyplot as plt
from bokeh.models import HoverTool, TapTool, CustomJS
from bokeh.plotting import figure, show, save, output_file, ColumnDataSource
from mpl_toolkits.basemap import Basemap
from shapely.geometry import Point, Polygon
import random
from bokeh.models.widgets import Panel, Tabs
import os
SHAPEFILE = os.path.join(os.path.dirname(__file__), 'util_data', 'shapefiles', 'ne_110m_admin_0_countries')
def get_random_point_in_polygon(poly):
'''Select at random a point within given polygon boundaries.
Parameters
----------
poly : Polygon
The polygon boundaries.
Returns
-------
p : Point
A random point (x, y coords) inside the given polygon.
'''
(minx, miny, maxx, maxy) = poly.bounds
while True:
p = Point(random.uniform(minx, maxx), random.uniform(miny, maxy))
if poly.contains(p):
return p
def get_random_point_in_country_poly(countries_data):
'''Load country polygons and selects a point at random within each polygon.
Parameters
----------
countries_data : np.array, 1D
Names of countries to select random points.
Returns
-------
data_x : list of float
The x-coordinates of random points within each country in countries_data.
data_y : list of float
The y-coordinates of random points within each country in countries_data.
'''
pp_x, pp_y, coords_poly, countries_poly = get_countries_lonlat_poly(SHAPEFILE)
data_x = []
data_y = []
for country in countries_data:
#print country
poly_inds = np.where(countries_poly==country)[0]
if len(poly_inds)<1:
data_x.append(np.nan)
data_y.append(np.nan)
continue
poly = coords_poly[poly_inds[0]]
if len(poly_inds)>1:
# if many polys for country choose the largest one (ie most points)
len_list = [len(pp_x[poly_ind]) for poly_ind in poly_inds]
poly = coords_poly[poly_inds[np.argmax(len_list)]]
p = Polygon(poly)
point_in_poly = get_random_point_in_polygon(p)
data_x.append(point_in_poly.x)
data_y.append(point_in_poly.y)
return data_x, data_y
def get_countries_lonlat_poly(shapefile):
'''Load spatial information for each country from shapefiles.
Parameters
----------
shapefile : str
Path to shapefile.
Returns
-------
pp_x : list of float
The x-coordinates of country polygons.
pp_y : list of float
The y-coordinates of country polygons.
mm.units : list of float tuples.
Polygon coordinates for each country.
countries_poly : np.arry, 1D
Country names for each polygon.
'''
mm=Basemap()
mm.readshapefile(shapefile, 'units', color='#444444', linewidth=.2)
pp_x = []
pp_y = []
for shape in mm.units:
pp_x.append([ss[0] for ss in shape])
pp_y.append([ss[1] for ss in shape])
countries_poly = []
for mm_info in mm.units_info:
countries_poly.append(mm_info['admin'])
countries_poly = np.array(countries_poly, dtype=str)
#(-52.55642473001839, 2.504705308437053) for French Guiana
countries_poly[102] = 'French Guiana' # manual correction
return pp_x, pp_y, mm.units, countries_poly
def add_bokeh_interactivity(p, r, hover_outlier=False):
'''Add plot interactivity.
'''
callback = CustomJS(args=dict(r=r), code="""
var inds = cb_obj.get('selected')['1d'].indices;
var d1 = cb_obj.get('data');
url = d1['url'][inds[0]];
if (url){
window.open(url);}""")
hover_tooltips = """
<div>
<div><span style="font-size: 17px; font-weight: bold;">@name</span></div>
<div><span style="font-size: 12px;">@info</span></div>
</div>"""
hover_tooltips_outlier = """
<div>
<div><span style="font-size: 17px; font-weight: bold;">@name</span></div>
<div><span style="font-size: 12px;">@info</span></div>
<div><span style="font-size: 10px; color: #500;">@outlierMD</span></div>
<div><span style="font-size: 12px;">@collection</span></div>
</div>"""
if hover_outlier:
p.add_tools(HoverTool(renderers=[r], tooltips=hover_tooltips_outlier))
else:
p.add_tools(HoverTool(renderers=[r], tooltips=hover_tooltips))
p.add_tools(TapTool(renderers=[r], callback = callback))
return p
def beautify_bokeh_background(p):
'''Remove unnecessary background in plot.
'''
p.outline_line_color = None
p.grid.grid_line_color=None
p.axis.axis_line_color=None
p.axis.major_label_text_font_size='0pt'
p.axis.major_tick_line_color=None
p.axis.minor_tick_line_color=None
return p
def plot_outliers_world_figure(MD, y_pred, df, out_file=None):
'''Visualise outliers on an interactive world map figure.
Parameters
----------
MD : np.array, float, 1D
Mahalanobis distances for each data point.
y_pred : np.array, boolean, 1D
Whether data point was detected as an outlier or not.
df : pd.DataFrame
Additional metadata (country, culture, language, genre, collection) for each data point.
out_file : str
Path to export html file.
Returns
-------
p : bokeh
The interactive map.
'''
pp_x, pp_y, coords_poly, countries_poly = get_countries_lonlat_poly(SHAPEFILE)
data_x, data_y = get_random_point_in_country_poly(df['Country'].get_values())
alpha_color = (MD- | np.min(MD) | numpy.min |
import copy
import math
import os
import sys
import time
import gfootball.env as football_env
import numpy as np
import torch
from a2c_ppo_acktr.envs import EpisodeRewardScoreWrapper
from a2c_ppo_acktr.envs import GFNoopResetEnv
from a2c_ppo_acktr.storage_ma import RolloutStorageMA
from gym.spaces.discrete import Discrete
from torch.distributions import Categorical
from utils import dict2csv
def env_step(rank, args, action_logits, values, observations,
rollout_storages, wait, done_list, step_dones, please_load_model, please_load_model_actor,
shared_cpu_actor_critics, shared_cpu_actor_critics_env_actor, all_episode_scores, vgl_display):
"""
Environment process grabs action logit from the action buffer and sample an action according to the action logit.
Then it executes the action and send the next observation to the observation buffer. The transition tuples are
stroed to data storage.
Args:
rank: environment process id.
args: command line argument.
action_logits: A shared PyTorch tensor served as an action buffer.
values: A shared PyTorch tensor served as a value buffer.
observations: A shared PyTorch tensor served as an observation buffer.
rollout_storages: A list of two rollout storage.
wait: A shared list that indicates if environment processes are waiting for updated model.
done_list: A shared list that indicates if environment processes finish all steps.
step_dones: A shared list to indicate environment processes finish one environment step.
please_load_model: A shared integer. Set to zero when finishing loading the update model from learner.
please_load_model_actor: A shared array between actors and the environment process 0. When updated model is
available. It is set to one.
shared_cpu_actor_critics: A list of shared models. It contains the updated parameters.
shared_cpu_actor_critics_env_actor: Shared models between actor and environment processes. Actor processes will
load models from environment process 0.
all_episode_scores: A shared list that collect all episode score from all environment processes
Returns:
None
"""
os.environ['VGL_DISPLAY'] = vgl_display
torch.manual_seed(args.seed + rank)
env = football_env.create_environment(
representation=args.representation,
env_name=args.env_name,
stacked=('stacked' in args.state),
rewards=args.reward_experiment,
logdir=args.log_dir,
render=args.render and (args.seed == 0),
dump_frequency=50 if args.render and args.seed == 0 else 0,
other_config_options={'game_engine_random_seed': args.seed + rank})
env = EpisodeRewardScoreWrapper(env,
number_of_left_players_agent_controls=1,
number_of_right_players_agent_controls=0)
env.seed(args.seed + rank)
if args.noop > 0:
env = GFNoopResetEnv(env, noop_max=args.noop, seed=args.seed + rank)
if args.num_agents == 1:
from a2c_ppo_acktr.envs import ObsUnsqueezeWrapper
env = ObsUnsqueezeWrapper(env)
env = EpisodeRewardScoreWrapper(env,
number_of_left_players_agent_controls=args.num_left_agents,
number_of_right_players_agent_controls=args.num_right_agents)
step_dones_np = np.frombuffer(step_dones.get_obj(), dtype=np.int32)
step_dones_np = step_dones_np.reshape(args.num_processes)
obs = env.reset()
aug_feat_dim = 0
# store the rollout by this process. After args.sync_every steps, batch copy to rollouts
local_rollouts = RolloutStorageMA(args.sync_every, 1, env.observation_space.shape[1:],
env.action_space if args.num_agents == 1 else Discrete(
env.action_space.nvec[0]),
recurrent_hidden_state_size=1, num_agents=args.num_agents,
aug_size=aug_feat_dim)
observations[rank] = torch.from_numpy(obs)
step_dones_np[rank] = 1
local_rollouts.obs[0].copy_(torch.from_numpy(obs).float().unsqueeze(0))
num_steps = int(math.ceil(args.num_env_steps / args.num_processes))
recurrent_hidden_states = torch.ones(1)
print('Num of steps per environment', num_steps)
sync_count = 0
target_eval_step = 0
if rank == 0:
plot = {'steps': [], 'avg_scores': [], 'time_elapsed': [], 'fps': [], 'avg_rewards': [],
'final_scores': [], 'final_rewards': [], 'fps_one_sync': []}
scores = []
episode_rewards = []
start_sync = time.time()
start_rollout = time.time()
env_step_timer_start = time.time()
if args.dump_traj_flag:
prev_obs = copy.deepcopy(obs)
dump_traj = {'action': [], 'obs': [], 'action_logit': [], 'v': []}
for step in range(num_steps):
# Observe reward and next observation
while True:
if step_dones_np[rank] == 0:
break
value_pred = values[rank].clone()
dist = Categorical(logits=copy.deepcopy(action_logits[rank]))
action = dist.sample()
action_log_prob = dist.log_probs(action)
obs, reward, done, infos = env.step(action.numpy().reshape(-1))
if args.dump_traj_flag:
dump_traj['action'].append(action)
dump_traj['obs'].append(prev_obs)
dump_traj['action_logit'].append(
copy.deepcopy(action_logits[rank]))
dump_traj['v'].append(value_pred)
if done:
if rank == 0:
scores.append(infos['episode_score'])
sys.stdout.flush()
obs = env.reset()
episode_rewards.append(
| np.sum(infos['episode_reward'][:args.num_left_agents]) | numpy.sum |
import gym
from gym import spaces
import numpy as np
import pandas as pd
import math
import tensorflow as tf
N_DAYS = 100
MAX_OCCUPANCY = 300
MIN_OCCUPANCY = 125
def cost_function(prediction, penalties_array, family_size, days, choice_array_num):
prediction = np.around(prediction * 100).astype(int)
penalty = 0
# We'll use this to count the number of people scheduled each day
daily_occupancy = np.zeros((len(days)+1))
N = family_size.shape[0]
# Looping over each family; d is the day, n is size of that family,
# and choice is their top choices
for i in range(N):
# add the family member count to the daily occupancy
n = family_size[i]
d = prediction[i]
choice = choice_array_num[i]
daily_occupancy[d] += n
# Calculate the penalty for not getting top preference
penalty += penalties_array[n, choice[d]]
# for each date, check total occupancy
# (using soft constraints instead of hard constraints)
relevant_occupancy = daily_occupancy[1:]
incorrect_occupancy = np.any(
(relevant_occupancy > MAX_OCCUPANCY) |
(relevant_occupancy < MIN_OCCUPANCY)
)
if incorrect_occupancy:
penalty += 100000000
# Calculate the accounting cost
# The first day (day 100) is treated special
init_occupancy = daily_occupancy[days[0]]
accounting_cost = (init_occupancy - 125.0) / 400.0 * init_occupancy**(0.5)
# using the max function because the soft constraints might allow occupancy to dip below 125
accounting_cost = max(0, accounting_cost)
# Loop over the rest of the days, keeping track of previous count
yesterday_count = init_occupancy
for day in days[1:]:
today_count = daily_occupancy[day]
diff = np.abs(today_count - yesterday_count)
accounting_cost += max(0, (today_count - 125.0) / 400.0 * today_count**(0.5 + diff / 50.0))
yesterday_count = today_count
penalty += accounting_cost
return penalty
class Santa_env(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
super(Santa_env, self).__init__()
self.state = None
self.counter = 0
self.done = 0
self.add = [0, 0]
self.reward = 0
self.cnt = 0
# Actions of the format Buy x%, Sell x%, Hold, etc.
self.action_space = spaces.Box(
low=0, high=1, shape=(2,), dtype=np.float16)
# Prices contains the OHCL values for the last five prices
self.observation_space = spaces.Box(
low=0, high=1, shape=(5000,), dtype=np.float16)
self.load_dataset()
self.lastScore = cost_function(self.state, self.penalties_array, self.family_size, self.days_array, self.choice_array_num)
def load_dataset(self):
fpath = './Data/family_data.csv'
self.data = pd.read_csv(fpath, index_col='family_id')
fpath = './Data/sample_submission.csv'
self.sample_submission = pd.read_csv(fpath, index_col='family_id')["assigned_day"].values
self.state = self.sample_submission.copy() / 100
self.family_size = self.data.n_people.values
self.days_array = | np.arange(N_DAYS, 0, -1) | numpy.arange |
import os
import numpy as np
import matplotlib.pyplot as plt
from robotics.estimation import KalmanFilter
class Vehicle2DEstimation:
def __init__(self) -> None:
delta_t = 1
newton_acc = 0.5*delta_t*delta_t
# no control inputs
# fmt: off
F = np.array([
[1 , delta_t , newton_acc , 0 , 0 , 0] ,
[0 , 1 , delta_t , 0 , 0 , 0] ,
[0 , 0 , 1 , 0 , 0 , 0] ,
[0 , 0 , 0 , 1 , delta_t , newton_acc] ,
[0 , 0 , 0 , 0 , 1 , delta_t] ,
[0 , 0 , 0 , 0 , 0 , 1]
])
print(F)
q_directions = np.array(
[
[delta_t**4/4 , delta_t**3/2 , delta_t**2/2] ,
[delta_t**3/2 , delta_t**2 , delta_t] ,
[delta_t**2/2 , delta_t , 1]
]
)
# fmt: on
Q = np.zeros((6, 6))
Q[0:3, 0:3] = q_directions
Q[3:, 3:] = q_directions
# sigma for the acceleration
sigma_a = 0.15
print(Q)
Q *= sigma_a*sigma_a
init_state = np.zeros(6)
# high estimate uncertainty gives a high weight to the measurement
init_cov = np.eye(6)*500
# observation matrix
self.H = np.array(
[
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0]
]
)
self.R = np.eye(2)*9
self.kalman = KalmanFilter(init_state, init_cov, F, Q)
self.kalman.state, self.kalman.covariance = self.kalman.predict()
print("Initialized filter: \n", self.kalman.state, "\n", self.kalman.covariance)
cur_path = os.path.dirname(os.path.abspath(__file__))
data = np.load(cur_path + "/data/linear_kalman_vehicle_data.npz")
self.xs = data['x']
self.ys = data['y']
if __name__ == "__main__":
vehicle = Vehicle2DEstimation()
vehicle.kalman.update(
np.array([-393.66, 300.4]).reshape(-1, 1),
vehicle.H,
vehicle.R
)
states = [vehicle.kalman.state.flatten()]
vehicle.kalman.predict()
# perform simulation over the rest of the measurements
for x in range(1, len(vehicle.xs)):
point = np.array([vehicle.xs[x], vehicle.ys[x]]).reshape(-1, 1)
vehicle.kalman.update(point, vehicle.H, vehicle.R)
# update the state
cur_state = vehicle.kalman.state
states.append(cur_state.flatten())
# prediction step
vehicle.kalman.predict()
states = | np.array(states) | numpy.array |
# <NAME>
# 3/18/2019
# General object to run empirical sr actflow process
# For group-level/cross-subject analyses
import numpy as np
import os
import multiprocessing as mp
import scipy.stats as stats
import nibabel as nib
import os
os.environ['OMP_NUM_THREADS'] = str(1)
import sklearn
from scipy import signal
import h5py
import sys
sys.path.append('glmScripts/')
import glmScripts.taskGLMPipeline_v2 as tgp
import sys
import pandas as pd
import pathlib
import calculateFC as fc
import tools
# Using final partition
networkdef = np.loadtxt('/home/ti61/f_mc1689_1/NetworkDiversity/data/network_partition.txt')
networkorder = np.asarray(sorted(range(len(networkdef)), key=lambda k: networkdef[k]))
networkorder.shape = (len(networkorder),1)
# network mappings for final partition set
networkmappings = {'fpn':7, 'vis1':1, 'vis2':2, 'smn':3, 'aud':8, 'lan':6, 'dan':5, 'con':4, 'dmn':9,
'pmulti':10, 'none1':11, 'none2':12}
networks = networkmappings.keys()
## General parameters/variables
nParcels = 360
class Model():
"""
Class to perform empirical actflow for a given subject (stimulus-to-response)
"""
def __init__(self,projectdir='/home/ti61/f_mc1689_1/SRActFlow/',ruletype='12',n_hiddenregions=10,randomize=False,scratchfcdir=None):
"""
instantiate:
indices for condition types
indices for specific condition instances
betas
"""
#### Set up basic model parameters
self.projectdir = projectdir
# Excluding 084
self.subjNums = ['013','014','016','017','018','021','023','024','026','027','028','030','031','032','033',
'034','035','037','038','039','040','041','042','043','045','046','047','048','049','050',
'053','055','056','057','058','062','063','066','067','068','069','070','072','074','075',
'076','077','081','085','086','087','088','090','092','093','094','095','097','098','099',
'101','102','103','104','105','106','108','109','110','111','112','114','115','117','119',
'120','121','122','123','124','125','126','127','128','129','130','131','132','134','135',
'136','137','138','139','140','141']
self.inputtypes = ['RED','VERTICAL','CONSTANT','HIGH']
self.ruletype = ruletype
#### Load in atlas
glasserfile2 = projectdir + 'data/Q1-Q6_RelatedParcellation210.LR.CorticalAreas_dil_Colors.32k_fs_RL.dlabel.nii'
glasser2 = nib.load(glasserfile2).get_data()
glasser2 = np.squeeze(glasser2)
self.glasser2 = glasser2
####
# Define hidden units
if n_hiddenregions!=None:
#######################################
#### Select hidden layer regions
hiddendir = projectdir + 'data/results/MAIN/RSA/'
hiddenregions = np.loadtxt(hiddendir + 'RSA_Similarity_SortedRegions2.txt',delimiter=',')
#######################################
#### Output directory
if randomize:
print("Constructing model with", n_hiddenregions, "randomly selected hidden regions")
fcdir = scratchfcdir
#### Necessary to optimize amarel
pathlib.Path(fcdir).mkdir(parents=True, exist_ok=True) # Make sure directory exists
hiddenregions = np.random.choice(hiddenregions,size=n_hiddenregions,replace=False)
else:
print("Constructing model with", n_hiddenregions, "hidden regions")
fcdir = projectdir + 'data/results/MAIN/fc/LayerToLayerFC_' + str(n_hiddenregions) + 'Hidden/'
pathlib.Path(fcdir).mkdir(parents=True, exist_ok=True) # Make sure directory exists
# Select hidden layer
if n_hiddenregions < 0:
hiddenregions = hiddenregions[n_hiddenregions:]
else:
hiddenregions = hiddenregions[:n_hiddenregions]
## Set object attributes
self.n_hiddenregions = n_hiddenregions
self.hiddenregions = np.squeeze(hiddenregions)
self.fcdir = fcdir
self.hidden = True # Set this variable to true - indicates to run sr simulations with a hidden layer
#### identify hidden region vertex indices
hidden_ind = []
for roi in hiddenregions:
hidden_ind.extend(np.where(self.glasser2==roi+1)[0])
self.hidden_ind = hidden_ind
else:
print("Constructing model with NO hidden layers")
fcdir = projectdir + 'data/results/MAIN/fc/LayerToLayerFC_NoHidden/'
pathlib.Path(fcdir).mkdir(parents=True, exist_ok=True) # Make sure directory exists
self.hidden = False # Set this variable to true - indicates to run sr simulations with a hidden layer
self.fcdir = fcdir
self.hiddenregions = None
self.n_hiddenregions = n_hiddenregions
####
# Define task rule (input) layer
ruledir = self.projectdir + 'data/results/MAIN/RuleDecoding/'
if ruletype=='12':
rule_regions = np.loadtxt(ruledir + self.ruletype + 'Rule_Regions.csv',delimiter=',')
elif ruletype=='fpn':
rule_regions = []
rule_regions.extend(np.where(networkdef==networkmappings['fpn'])[0])
rule_regions = np.asarray(rule_regions)
elif ruletype=='nounimodal':
allrule_regions = np.loadtxt(ruledir + '12Rule_Regions.csv',delimiter=',')
unimodal_nets = ['vis1','aud']
unimodal_regions = []
for net in unimodal_nets:
unimodal_regions.extend(np.where(networkdef==networkmappings[net])[0])
# only include regions that are in allrule_regions but also NOT in unimodal_regions
rule_regions = []
for roi in allrule_regions:
if roi in unimodal_regions:
continue
else:
rule_regions.append(roi)
rule_regions = np.asarray(rule_regions)
rule_ind = []
for roi in rule_regions:
rule_ind.extend(np.where(self.glasser2==roi+1)[0])
self.rule_ind = rule_ind
####
# Define motor regions
# Set indices for layer-by-layer vertices
targetdir = projectdir + 'data/results/MAIN/MotorResponseDecoding/'
motor_resp_regions_LH = np.loadtxt(targetdir + 'MotorResponseRegions_LH.csv',delimiter=',')
motor_resp_regions_RH = np.loadtxt(targetdir + 'MotorResponseRegions_RH.csv',delimiter=',')
targetROIs = np.hstack((motor_resp_regions_LH,motor_resp_regions_RH))
# Define all motor_ind
motor_ind = []
for roi in targetROIs:
roi_ind = np.where(glasser2==roi+1)[0]
motor_ind.extend(roi_ind)
motor_ind = np.asarray(motor_ind).copy()
self.motor_ind = motor_ind
#### override -- only pick the motor parcel with the greatest response decoding
motor_ind_lh = []
for roi in motor_resp_regions_LH:
# only include left hand responses in the right hemisphere
if roi>=180:
roi_ind = np.where(glasser2==roi+1)[0]
motor_ind_lh.extend(roi_ind)
motor_ind_rh = []
for roi in motor_resp_regions_RH:
# only include left hand responses in the right hemisphere
if roi<180:
roi_ind = np.where(glasser2==roi+1)[0]
motor_ind_rh.extend(roi_ind)
#
motor_ind_rh = np.asarray(motor_ind_rh).copy()
motor_ind_lh = np.asarray(motor_ind_lh).copy()
self.motor_ind_rh = motor_ind_rh
self.motor_ind_lh = motor_ind_lh
#### Load model task set
filename= projectdir + 'data/results/MAIN/EmpiricalSRActFlow_AllTrialKeys_15stims_v3.csv' # Great
self.trial_metadata = pd.read_csv(filename)
def computeGroupFC(self,n_components=500,nproc='max'):
"""
Function that wraps _computeSubjFC() to compute FC for all subjs, and computes averaged groupFC
"""
if nproc=='max':
nproc=mp.cpu_count()
inputs = []
for subj in self.subjNums:
inputs.append((subj,n_components))
pool = mp.Pool(processes=nproc)
if self.hidden:
pool.starmap_async(self._computeSubjFC,inputs)
else:
pool.starmap_async(self._computeSubjFC_NoHidden,inputs)
pool.close()
pool.join()
#### Compute group FC
for inputtype in self.inputtypes:
if self.hidden:
fc.computeGroupFC(inputtype,self.fcdir)
else:
fc.computeGroupFC_NoHidden(inputtype,self.fcdir)
if self.hidden:
fc.computeGroupFC(self.ruletype,self.fcdir)
else:
fc.computeGroupFC_NoHidden(self.ruletype,self.fcdir)
def loadRealMotorResponseActivations(self,vertexmasks=True):
#### Load motor response activations localized in output vertices only (for faster loading)
if vertexmasks:
print('Load real motor responses in output vertices')
self.data_task_rh, self.data_task_lh = tools.loadMotorResponsesOutputMask()
else:
print('Load real motor responses in output parcels -- inefficient since need to load all vertices first')
data_task_rh = []
data_task_lh = []
for subj in self.subjNums:
tmp_rh = tools.loadMotorResponses(subj,hand='Right')
tmp_lh = tools.loadMotorResponses(subj,hand='Left')
data_task_rh.append(tmp_rh[self.motor_ind_rh,:].copy().T)
data_task_lh.append(tmp_lh[self.motor_ind_lh,:].copy().T)
self.data_task_rh = np.asarray(data_task_rh).T
self.data_task_lh = np.asarray(data_task_lh).T
def loadModelFC(self):
if self.hidden:
print('Load Model FC weights')
fcdir = self.fcdir
self.fc_input2hidden = {}
self.eig_input2hidden = {}
for inputtype in ['VERTICAL','RED','HIGH','CONSTANT']:
self.fc_input2hidden[inputtype], self.eig_input2hidden[inputtype] = tools.loadGroupActFlowFC(inputtype,fcdir)
# Load rule to hidden
self.fc_12rule2hidden, self.eig_12rule2hidden = tools.loadGroupActFlowFC(self.ruletype,fcdir)
# Load hidden to motor resp mappings
self.fc_hidden2motorresp, self.eig_hidden2motorresp = tools.loadGroupActFlowFC('hidden2out',fcdir)
else:
print('Load Model FC weights -- No hidden layer')
fcdir = self.fcdir
self.fc_input2output = {}
self.eig_input2output = {}
for inputtype in ['VERTICAL','RED','HIGH','CONSTANT']:
self.fc_input2output[inputtype], self.eig_input2output[inputtype] = tools.loadGroupActFlowFC_NoHidden(inputtype,fcdir)
# Load rule to hidden
self.fc_12rule2output, self.eig_12rule2output = tools.loadGroupActFlowFC_NoHidden('12',fcdir)
def simulateGroupActFlow(self,thresh=0,nproc='max',vertexmasks=True):
"""
Simulate group level actflow (all subject simulations)
"""
if nproc=='max':
nproc=mp.cpu_count()
inputs = []
for subj in self.subjNums:
inputs.append((subj,thresh))
if nproc == 1:
results = []
for input1 in inputs:
results.append(self._simulateSubjActFlow(input1[0],input1[1]))
else:
pool = mp.Pool(processes=nproc)
results = pool.starmap_async(self._simulateSubjActFlow,inputs).get()
pool.close()
pool.join()
actflow_predictions = np.zeros((len(self.subjNums),len(self.motor_ind),4))
#actflow_predictions_noReLU = np.zeros((len(self.subjNums),len(self.motor_ind),4))
scount = 0
for result in results:
# actflow_predictions[scount,:,:] = result[0]
# actflow_predictions_noReLU[scount,:,:] = result[1]
actflow_predictions[scount,:,:] = result
scount += 1
## Reformat to fit shape of actual data array
actflow_rh = np.zeros((len(self.glasser2),2,len(self.subjNums)))
actflow_lh = np.zeros((len(self.glasser2),2,len(self.subjNums)))
for scount in range(len(self.subjNums)):
# RMID
actflow_rh[self.motor_ind,0,scount] = actflow_predictions[scount,:,2]
# RIND
actflow_rh[self.motor_ind,1,scount] = actflow_predictions[scount,:,3]
# LMID
actflow_lh[self.motor_ind,0,scount] = actflow_predictions[scount,:,0]
# LIND
actflow_lh[self.motor_ind,1,scount] = actflow_predictions[scount,:,1]
#### Now save out only relevant output mask vertices
if vertexmasks:
tmp = np.squeeze(nib.load(self.projectdir + 'data/results/MAIN/MotorRegionsMasksPerSubj/sractflow_smn_outputRH_mask.dscalar.nii').get_data())
rh_ind = np.where(tmp==True)[0]
actflow_rh = actflow_rh[rh_ind,:,:]
tmp = np.squeeze(nib.load(self.projectdir + 'data/results/MAIN/MotorRegionsMasksPerSubj/sractflow_smn_outputLH_mask.dscalar.nii').get_data())
lh_ind = np.where(tmp==True)[0]
actflow_lh = actflow_lh[lh_ind,:,:].copy()
else:
actflow_rh = actflow_rh[self.motor_ind_rh,:,:].copy()
actflow_lh = actflow_lh[self.motor_ind_lh,:,:].copy()
return actflow_rh, actflow_lh
def actflowDecoding(self,trainset,testset,outputfile,
nbootstraps=1000,featsel=False,nproc='max',null=False,verbose=True):
if nproc=='max':
nproc=mp.cpu_count()
# Decoding
for i in range(nbootstraps):
distances_baseline = np.zeros((1,len(self.subjNums)*2)) # subjs * nlabels
distances_baseline[0,:],rmatch,rmismatch, confusion_mats = tools.actflowDecodings(testset,trainset,
effects=True, featsel=featsel,confusion=True,permutation=null,
ncvs=1, nproc=nproc)
##### Save out and append file
# Open/create file
filetxt = open(outputfile,"a+")
# Write out to file
print(np.mean(distances_baseline),file=filetxt)
# Close file
filetxt.close()
if i%100==0 and verbose==True:
print('Permutation', i)
print('\tDecoding accuracy:', np.mean(distances_baseline), '| R-match:', np.mean(rmatch), '| R-mismatch:', np.mean(rmismatch))
def extractSubjActivations(self, subj, df_trials):
"""
extract activations for a sample subject, including motor response
"""
## Set up data parameters
X = tgp.loadTaskTiming(subj,'ALL')
self.stimIndex = np.asarray(X['stimIndex'])
self.stimCond = np.asarray(X['stimCond'])
datadir = self.projectdir + 'data/postProcessing/hcpPostProcCiric/'
h5f = h5py.File(datadir + subj + '_glmOutput_data.h5','r')
self.betas = h5f['taskRegression/ALL_24pXaCompCorXVolterra_taskReg_betas_canonical'][:].copy()
h5f.close()
## Set up task parameters
self.logicRules = ['BOTH', 'NOTBOTH', 'EITHER', 'NEITHER']
self.sensoryRules = ['RED', 'VERTICAL', 'HIGH', 'CONSTANT']
self.motorRules = ['LMID', 'LIND', 'RMID', 'RIND']
self.colorStim = ['RED', 'BLUE']
self.oriStim = ['VERTICAL', 'HORIZONTAL']
self.pitchStim = ['HIGH', 'LOW']
self.constantStim = ['CONSTANT','ALARM']
# Begin extraction for specific trials
n_trials = len(df_trials)
stimData = np.zeros((n_trials,self.betas.shape[0]))
logicRuleData = np.zeros((n_trials,self.betas.shape[0]))
sensoryRuleData = np.zeros((n_trials,self.betas.shape[0]))
motorRuleData = np.zeros((n_trials,self.betas.shape[0]))
respData = np.zeros((n_trials,self.betas.shape[0]))
sensoryRuleIndices = []
motorRespAll = []
for trial in range(n_trials):
logicRule = df_trials.iloc[trial].logicRule
sensoryRule = df_trials.iloc[trial].sensoryRule
motorRule = df_trials.iloc[trial].motorRule
motorResp = df_trials.iloc[trial].motorResp
stim1 = df_trials.iloc[trial].stim1
stim2 = df_trials.iloc[trial].stim2
# if verbose:
# print 'Running actflow predictions for:', logicRule, sensoryRule, motorRule, 'task'
logicKey = 'RuleLogic_' + logicRule
sensoryKey = 'RuleSensory_' + sensoryRule
motorKey = 'RuleMotor_' + motorRule
stimKey = 'Stim_' + stim1 + stim2
motorResp = solveInputs(logicRule, sensoryRule, motorRule, stim1, stim2, printTask=False)
respKey = 'Response_' + motorResp
stimKey_ind = np.where(self.stimCond==stimKey)[0]
logicRule_ind = np.where(self.stimCond==logicKey)[0]
sensoryRule_ind = np.where(self.stimCond==sensoryKey)[0]
motorRule_ind = np.where(self.stimCond==motorKey)[0]
respKey_ind = np.where(self.stimCond==respKey)[0]
stimData[trial,:] = np.real(self.betas[:,stimKey_ind].copy()[:,0])
logicRuleData[trial,:] = np.real(self.betas[:,logicRule_ind].copy()[:,0])
sensoryRuleData[trial,:] = np.real(self.betas[:,sensoryRule_ind].copy()[:,0])
motorRuleData[trial,:] = np.real(self.betas[:,motorRule_ind].copy()[:,0])
respData[trial,:] = np.real(self.betas[:,respKey_ind].copy()[:,0])
motorRespAll.append(motorResp)
sensoryRuleIndices.append(sensoryRule)
self.motorRespAll = motorRespAll
self.stimData = stimData
self.logicRuleData = logicRuleData
self.sensoryRuleData = sensoryRuleData
self.motorRuleData = motorRuleData
self.respData = respData
self.sensoryRuleIndices = sensoryRuleIndices
def extractSubjHiddenRSMActivations(self, subj):
"""
extract activations for a sample subject, including motor response
"""
## Set up data parameters
X = tgp.loadTaskTiming(subj,'ALL')
self.stimIndex = np.asarray(X['stimIndex'])
self.stimCond = np.asarray(X['stimCond'])
datadir = self.projectdir + 'data/postProcessing/hcpPostProcCiric/'
h5f = h5py.File(datadir + subj + '_glmOutput_data.h5','r')
self.betas = h5f['taskRegression/ALL_24pXaCompCorXVolterra_taskReg_betas_canonical'][:].copy()
h5f.close()
## Set up task parameters
self.logicRules = ['BOTH', 'NOTBOTH', 'EITHER', 'NEITHER']
self.sensoryRules = ['RED', 'VERTICAL', 'HIGH', 'CONSTANT']
self.motorRules = ['LMID', 'LIND', 'RMID', 'RIND']
self.colorStim = ['RED', 'BLUE']
self.oriStim = ['VERTICAL', 'HORIZONTAL']
self.pitchStim = ['HIGH', 'LOW']
self.constantStim = ['CONSTANT','ALARM']
total_conds = 28 # 12 rules + 16 stimulus pairings
rsm_activations = np.zeros((28,self.betas.shape[0]))
labels = []
condcount = 0
##
# START
for cond in self.logicRules:
labels.append(cond)
key = 'RuleLogic_' + cond
ind = np.where(self.stimCond==key)[0]
rsm_activations[condcount,:] = np.real(self.betas[:,ind].copy()[:,0])
condcount += 1 # go to next condition
for cond in self.sensoryRules:
labels.append(cond)
key = 'RuleSensory_' + cond
ind = | np.where(self.stimCond==key) | numpy.where |
import csv
import json
from itertools import tee
import numpy as np
from Bio import SeqIO
import pysam
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def extract_lanl_genome(lanl_input, lanl_id, fasta_output):
records = SeqIO.to_dict(SeqIO.parse(lanl_input, 'fasta'))
record = records[lanl_id]
SeqIO.write(record, fasta_output, 'fasta')
def simulate_amplicon_dataset(dataset, gene, output_fastq, output_fasta):
simulated_reads = []
true_genes = []
with open('simulations.json') as json_file:
simulation_information = json.load(json_file)[dataset]
for lanl_information in simulation_information:
lanl_id = lanl_information['lanl_id']
lanl_reads_filename = "output/lanl/%s/%s/reads.fastq" % (lanl_id, gene)
lanl_reads = list(SeqIO.parse(lanl_reads_filename, 'fastq'))
current_frequency = lanl_information['frequency']
number_of_reads_to_extract = int(current_frequency * len(lanl_reads))
simulated_reads.extend(lanl_reads[:number_of_reads_to_extract])
lanl_gene_filename = "output/lanl/%s/%s/sequence.fasta" % (lanl_id, gene)
lanl_gene = SeqIO.read(lanl_gene_filename, 'fasta')
true_genes.append(lanl_gene)
SeqIO.write(simulated_reads, output_fastq, 'fastq')
SeqIO.write(true_genes, output_fasta, 'fasta')
def create_numeric_fasta(records):
for_numeric, for_headers = tee(records, 2)
mr = MappedReads()
np_arrays = [
mr.get_numeric_representation(record)
for record in for_numeric
]
numeric = np.vstack(np_arrays)
headers = [record.id for record in for_headers]
return headers, numeric
def evaluate(input_haplotypes, input_truth, output_json):
haplotypes = SeqIO.parse(input_haplotypes, 'fasta')
truth = SeqIO.parse(input_truth, 'fasta')
haplotype_index, numeric_haplotypes = create_numeric_fasta(haplotypes)
truth_index, numeric_truth = create_numeric_fasta(truth)
full_numeric = np.vstack([numeric_truth, numeric_haplotypes])
counts = np.array([
np.sum(full_numeric == 15, axis=0),
np.sum(full_numeric == 0, axis=0),
np.sum(full_numeric == 1, axis=0),
np.sum(full_numeric == 2, axis=0),
np.sum(full_numeric == 3, axis=0)
])
discordant = np.sum(counts == 0, axis=0) != 4
get_headers_as_strings = lambda index: [str(header) for header in index]
headers = get_headers_as_strings(truth_index)+get_headers_as_strings(haplotype_index)
n_headers = len(headers)
discordance_matrix = [n_headers*[0] for i in range(n_headers)]
for i in range(n_headers):
for j in range(n_headers):
discordance = np.sum(full_numeric[i,:] != full_numeric[j,:])
discordance_matrix[i][j] = int(discordance)
output = {
'number_of_discordant_sites': int(np.sum(discordant)),
'headers': headers,
'discordance_matrix': discordance_matrix
}
with open(output_json, 'w') as json_file:
json.dump(output, json_file, indent=2)
def covarying_sites(input_fasta, output_json):
fasta_np = np.array([
list(str(record.seq)) for record in SeqIO.parse(input_fasta, 'fasta')
], dtype='<U1')
A_sum = np.sum(fasta_np == 'A', axis=0)
C_sum = np.sum(fasta_np == 'C', axis=0)
G_sum = np.sum(fasta_np == 'G', axis=0)
T_sum = np.sum(fasta_np == 'T', axis=0)
max_count = np.max(np.vstack([A_sum, C_sum, G_sum, T_sum]), axis=0)
covarying_sites = np.arange(fasta_np.shape[1])[max_count < fasta_np.shape[0]]
with open(output_json, 'w') as json_file:
json.dump([int(cvs) for cvs in covarying_sites], json_file)
def get_sam_info(sam):
sam_info = {}
for i, read in enumerate(sam):
location = (read.reference_start, read.reference_end)
if location in sam_info:
sam_info[location].append(i)
else:
sam_info[location] = [i]
return sam_info
def get_reference_to_alignment_map(lanl_id, aligned_genomes):
fasta_np = np.array(list(aligned_genomes[lanl_id].seq), dtype='<U1')
indices = np.arange(len(fasta_np))[fasta_np != '-']
return indices
def get_alignment_to_reference_map(lanl_id, aligned_genomes):
fasta_np = np.array(list(aligned_genomes[lanl_id].seq), dtype='<U1')
indices = np.cumsum(fasta_np != '-') - 1
return indices
def get_mate(
read, left_strain, right_strain, sams, sam_infos,
r2a_maps, a2r_maps, stop=25
):
sam = sams[right_strain]
sam_info = sam_infos[right_strain]
i = 0
left_alignment_start = r2a_maps[left_strain][read.reference_start]
left_alignment_end = r2a_maps[left_strain][read.reference_end-1]
while True:
for j in range(i+1):
all_shifts = [(j, i-j), (j, j-i), (-j, i-j), (-j, j-i)]
for left_shift, right_shift in all_shifts:
right_alignment_start = left_alignment_start + left_shift
right_alignment_end = left_alignment_end + right_shift
starts_before = right_alignment_start < 0
ends_after = right_alignment_end >= len(a2r_maps[right_strain])
if starts_before or ends_after:
continue
right_reference_start = a2r_maps[right_strain][right_alignment_start]
right_reference_end = a2r_maps[right_strain][right_alignment_end]
location = (right_reference_start, right_reference_end)
if location in sam_info:
n = len(sam_info[location])
i = np.random.randint(n)
return sam_info[location][i]
i += 1
if i == stop:
return None
def write_ar_dataset(
lanl_ids, frequencies, ar, aligned_genomes, output_fastq,
number_of_reads
):
sams = [
list(pysam.AlignmentFile('output/lanl/%s/wgs.sam' % lanl_id, "r"))
for lanl_id in lanl_ids
]
sam_infos = [get_sam_info(sam) for sam in sams]
number_of_ar_reads = | np.ceil(ar*number_of_reads) | numpy.ceil |
import numpy as np
from malib.spaces import Discrete, Box, MASpace, MAEnvSpec
from malib.environments.base_game import BaseGame
from malib.error import (
EnvironmentNotFound,
WrongNumberOfAgent,
WrongNumberOfAction,
WrongNumberOfState,
WrongActionInputLength,
)
class StochasticMatrixGame(BaseGame):
def __init__(
self, game_name, agent_num, action_num, state_num, payoff=None, transition=None
):
self.game_name = game_name
self.agent_num = agent_num
self.action_num = action_num
self.state_num = state_num
game_list = StochasticMatrixGame.get_game_list()
if not self.game_name in game_list:
raise EnvironmentNotFound(f"The game {self.game_name} doesn't exists")
expt_num_agent = game_list[self.game_name]["agent_num"]
if expt_num_agent != self.agent_num:
raise WrongNumberOfAgent(
f"The number of agent \
required for {self.game_name} is {expt_num_agent}"
)
expt_num_action = game_list[self.game_name]["action_num"]
if expt_num_agent != self.action_num:
raise WrongNumberOfAction(
f"The number of action \
required for {self.game_name} is {expt_num_action}"
)
expt_num_state = game_list[self.game_name]["state_num"]
if expt_num_state != self.state_num:
raise WrongNumberOfState(
f"The number of state \
required for {self.game_name} is {expt_num_state}"
)
self.action_spaces = MASpace(
tuple(Box(low=-1.0, high=1.0, shape=(1,)) for _ in range(self.agent_num))
)
self.observation_spaces = MASpace(
tuple(Discrete(1) for _ in range(self.agent_num))
)
self.env_specs = MAEnvSpec(self.observation_spaces, self.action_spaces)
self.t = 0
if payoff is not None:
payoff = np.array(payoff)
assert payoff.shape == tuple(
[state_num, agent_num] + [action_num] * agent_num
)
self.payoff = payoff
if payoff is None:
self.payoff = np.zeros(
tuple([state_num, agent_num] + [action_num] * agent_num)
)
if transition is None:
self.transition = np.zeros(
tuple([state_num] + [action_num] * agent_num + [state_num])
)
if self.game_name == "PollutionTax":
self.payoff[0][0] = [[4.0, 3.0], [7.0, 6.0]]
self.payoff[0][1] = [[5.0, 8.0], [4.0, 7.0]]
self.payoff[1][0] = [[1.0, 0.0], [4.0, 3.0]]
self.payoff[1][1] = [[2.0, 5.0], [1.0, 4.0]]
self.transition[0] = [[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]]
self.transition[1] = [[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]]
elif self.game_name == "three_matrix_games":
self.g1 = [[0.0, 3.0], [2.0, -1.0]]
self.g2 = [[0.0, 1.0], [4.0, 3.0]]
self.g = [["g1", 4.0], [5.0, "g2"]]
self.rewards = np.zeros((self.agent_num,))
self.state = 0
def get_three_matrix_games(self, a_n):
assert len(a_n) == 2
info = {}
reward_n = np.zeros((self.agent_num,))
if self.state == 0:
if a_n[0] == a_n[1] == 0:
state_prime = 1
state_n = np.array([state_prime] * self.agent_num)
self.state = state_prime
done_n = np.array([False] * self.agent_num)
elif a_n[0] == a_n[1] == 1:
state_prime = 2
state_n = np.array([state_prime] * self.agent_num)
self.state = state_prime
done_n = np.array([False] * self.agent_num)
else:
reward_n[0] = self.g[a_n[0]][a_n[1]]
reward_n[0] = -self.g[a_n[0]][a_n[1]]
state_prime = 0
state_n = np.array([state_prime] * self.agent_num)
self.state = state_prime
done_n = np.array([True] * self.agent_num)
if self.state == 1:
reward_n[0] = self.g1[a_n[0]][a_n[1]]
reward_n[0] = -self.g1[a_n[0]][a_n[1]]
state_prime = 0
state_n = np.array([state_prime] * self.agent_num)
self.state = state_prime
done_n = np.array([True] * self.agent_num)
if self.state == 2:
reward_n[0] = self.g2[a_n[0]][a_n[1]]
reward_n[0] = -self.g2[a_n[0]][a_n[1]]
state_prime = 0
state_n = np.array([state_prime] * self.agent_num)
self.state = state_prime
done_n = | np.array([True] * self.agent_num) | numpy.array |
import numpy as np
from scipy import sparse
import numba
def _get_mean_var(X, *, axis=0):
if sparse.issparse(X):
mean, var = sparse_mean_variance_axis(X, axis=axis)
else:
mean = np.mean(X, axis=axis, dtype=np.float64)
mean_sq = | np.multiply(X, X) | numpy.multiply |
# USAGE
# python bank_check_ocr.py --image example_check.png --reference micr_e13b_reference.png
# import the necessary packages
from skimage.segmentation import clear_border
from imutils import contours
import imutils
import numpy as np
import argparse
import cv2
def extract_digits_and_symbols(image, charCnts, minW=5, minH=15):
# grab the internal Python iterator for the list of character
# contours, then initialize the character ROI and location
# lists, respectively
charIter = charCnts.__iter__()
rois = []
locs = []
# keep looping over the character contours until we reach the end
# of the list
while True:
try:
# grab the next character contour from the list, compute
# its bounding box, and initialize the ROI
c = next(charIter)
(cX, cY, cW, cH) = cv2.boundingRect(c)
roi = None
# check to see if the width and height are sufficiently
# large, indicating that we have found a digit
if cW >= minW and cH >= minH:
# extract the ROI
roi = image[cY:cY + cH, cX:cX + cW]
rois.append(roi)
locs.append((cX, cY, cX + cW, cY + cH))
# otherwise, we are examining one of the special symbols
else:
# MICR symbols include three separate parts, so we
# need to grab the next two parts from our iterator,
# followed by initializing the bounding box
# coordinates for the symbol
parts = [c, next(charIter), next(charIter)]
(sXA, sYA, sXB, sYB) = (np.inf, np.inf, -np.inf,
-np.inf)
# loop over the parts
for p in parts:
# compute the bounding box for the part, then
# update our bookkeeping variables
(pX, pY, pW, pH) = cv2.boundingRect(p)
sXA = min(sXA, pX)
sYA = min(sYA, pY)
sXB = max(sXB, pX + pW)
sYB = max(sYB, pY + pH)
# extract the ROI
roi = image[sYA:sYB, sXA:sXB]
rois.append(roi)
locs.append((sXA, sYA, sXB, sYB))
# we have reached the end of the iterator; gracefully break
# from the loop
except StopIteration:
break
# return a tuple of the ROIs and locations
return (rois, locs)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-r", "--reference", required=True,
help="path to reference MICR E-13B font")
args = vars(ap.parse_args())
# initialize the list of reference character names, in the same
# order as they appear in the reference image where the digits
# their names and:
# T = Transit (delimit bank branch routing transit #)
# U = On-us (delimit customer account number)
# A = Amount (delimit transaction amount)
# D = Dash (delimit parts of numbers, such as routing or account)
charNames = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0",
"T", "U", "A", "D"]
# load the reference MICR image from disk, convert it to grayscale,
# and threshold it, such that the digits appear as *white* on a
# *black* background
ref = cv2.imread(args["reference"])
ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
ref = imutils.resize(ref, width=400)
ref = cv2.threshold(ref, 0, 255, cv2.THRESH_BINARY_INV |
cv2.THRESH_OTSU)[1]
# find contours in the MICR image (i.e,. the outlines of the
# characters) and sort them from left to right
refCnts = cv2.findContours(ref.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
refCnts = imutils.grab_contours(refCnts)
refCnts = contours.sort_contours(refCnts, method="left-to-right")[0]
# extract the digits and symbols from the list of contours, then
# initialize a dictionary to map the character name to the ROI
refROIs = extract_digits_and_symbols(ref, refCnts,
minW=10, minH=20)[0]
chars = {}
# loop over the reference ROIs
for (name, roi) in zip(charNames, refROIs):
# resize the ROI to a fixed size, then update the characters
# dictionary, mapping the character name to the ROI
roi = cv2.resize(roi, (36, 36))
chars[name] = roi
# initialize a rectangular kernel (wider than it is tall) along with
# an empty list to store the output of the check OCR
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (17, 7))
output = []
# load the input image, grab its dimensions, and apply array slicing
# to keep only the bottom 20% of the image (that's where the account
# information is)
image = cv2.imread(args["image"])
(h, w,) = image.shape[:2]
delta = int(h - (h * 0.2))
bottom = image[delta:h, 0:w]
# convert the bottom image to grayscale, then apply a blackhat
# morphological operator to find dark regions against a light
# background (i.e., the routing and account numbers)
gray = cv2.cvtColor(bottom, cv2.COLOR_BGR2GRAY)
blackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKernel)
# compute the Scharr gradient of the blackhat image, then scale
# the rest back into the range [0, 255]
gradX = cv2.Sobel(blackhat, ddepth=cv2.CV_32F, dx=1, dy=0,
ksize=-1)
gradX = np.absolute(gradX)
(minVal, maxVal) = ( | np.min(gradX) | numpy.min |
from baselines.spaces.box import Box
from baselines.spaces.product import Product
import numpy as np
import re
import gym
from os.path import dirname, abspath
from baselines.special import probScale1D, categorical_sample, probScale
from scipy.spatial import distance
from gym.utils import seeding
import csv
class TemporalPolicePatrollingGame(gym.Env):
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, N, time_shift = 0):
d = dirname(dirname(abspath(__file__)))
self.incident_file_num = 31
self.stateNum = 24
self.h = 100
self.time_shift = time_shift
time_range = [[0,7], [8,19],[20,24]]
time_shift_h = np.array(time_range)*60
self.period = time_shift_h[self.time_shift]
allocation_file = (d + '/data/duty_frcs_F_1M2D_5thTeam.csv')
csv_data = np.genfromtxt(allocation_file, delimiter=',')
self.fixed_allocation = []# np.zeros(self.stateNum)
start_hour = time_range[self.time_shift][0]
end_hour = time_range[self.time_shift][1]
#F_sector_centroid_distance_with_distance_and_cost
for i in range(len(csv_data)):
row = csv_data[i]
if row[1]>=start_hour and row[2]<=end_hour:
self.fixed_allocation.append(int(row[0]))
#self.fixed_allocation[int(row[0])] += 1
#travel_time[urgent][zone1][zone2][time_mean][time_var]
self.travel_time = np.ones((3, self.stateNum, self.stateNum,2))#*100
travel_time_file = (d + '/data/F_sector_centroid_distance_with_distance_and_cost.csv')#travel_time_google
csv_data = np.genfromtxt(travel_time_file, delimiter=',')
for i in range(1,len(csv_data)):
row = csv_data[i]
loc1 = int(row[0])
loc2 = int(row[3])
time = float(row[7])/60 + 5
#non-urgent incident travel time
self.travel_time[0, loc1, loc2, 0] = time
#urgent incident travel time
self.travel_time[1, loc1, loc2, 0] = time#/1.2
#re-allocation travel time
self.travel_time[2, loc1, loc2, 0] = time
self.incident_set = []
#"sector", "start_time", "is_urgent", "demand", "engagement_time"
for i in range(1, self.incident_file_num+1):
input_file =d + '/data/SyntheticMonthx1000/'+ str(i) + 'L.csv'
daily_incidents = []
csv_data = np.genfromtxt(input_file, delimiter=',', dtype = int)
for i in range(len(csv_data)):
row = csv_data[i]
if (row[1]>=self.period[0])&(row[1]<=self.period[1]):
daily_incidents.append(row)
# time_daily_incidents = np.array(daily_incidents)[:,1]
#delta_time = np.roll(time_daily_incidents, -1, axis=0) - time_daily_incidents
self.incident_set.append(daily_incidents)
self.t = 0
self.start_time = self.period[0]
# self.initialDistribution = np.ones(self.stateNum, dtype=float)/self.stateNum
self.N = len(self.fixed_allocation)
#randomly extract an incident scenario
self.incidents = self.incident_set[np.random.randint(7)]
self.max_waiting_time = 1000
self.max_obs_value = -1000
self.delay_cost_coeff = -1
self.lose_passenger_cost = -100
self.adjacent_list = []
self.initialDistribution = np.ones(self.stateNum, dtype=float)/self.stateNum
for i in range(self.stateNum):
adjacent_nodes = []
for j in range(self.stateNum):
if self.travel_time[0, i, j, 0] <10.0:
adjacent_nodes.append(j)
self.adjacent_list.append(adjacent_nodes)
# adjacent_temp = np.argpartition(self.travel_time[0, 7, :, 0], 5)[:5]
# self.adjacent_list[7] = list(adjacent_temp)
self.scenario_index = 0
self.urgentResponse = 10
self.nonurgentResponse = 20
self.time_interval = 3
self.C = 5
self.discretized_travel_times = np.zeros((self.stateNum, self.stateNum), dtype=int)
for loc1 in range(self.stateNum):
for loc2 in range(self.stateNum):
if loc1!=loc2:
discretized_time = int(self.travel_time[2, loc1, loc2, 0]/self.C)
self.discretized_travel_times[loc1, loc2] = discretized_time
a = [i for i in range(24)]
a[0] = [0,1,3,4,6]
a[1] = [1,0,3]
a[2] = [2,3,4,5,6]
a[3] = [3,0,1,2,4]
a[4] = [4,0,2,3,5,6]
a[5] = [5,2,4,6,21]
a[6] = [6,0,7,4,5,3]
a[7] = [7,6,8,9]
a[8] = [8,7,9,10,11]
a[9] = [9,7,8,10,11]
a[10] = [10,8,9,11,13,14]
a[11] = [11,8,9,10,12,13,14]
a[12] = [12,11,13,15,18]
a[13] = [13,10,11,12,14,15,17,18]
a[14] = [14,10,11,13,17,18,20,21]
a[15] = [15,12,16,18]
a[16] = [16,15,17,18,19,20]
a[17] = [17,13,14,16,18,20,21,19]
a[18] = [18,12,13,14,15,16,17]
a[19] = [19,16,17,20,22,23]
a[20] = [20,14,16,17,19,21,22,23]
a[21] = [21,5,14,17,20,23]
a[22] = [22,19,20,23]
a[23] = [23,19,20,21,22]
self.adjacent_list = a
def generate_demand(self):
incident = self.incidents[self.t]
#map origin and dest to the node in the map
loc = incident[0]
# dest = incident[0]
start_time = incident[1]
is_urgent = incident[2]
demand = incident[3]
engagement_time = incident[4]
return loc, start_time, is_urgent, demand, engagement_time
def reset(self):
self.t = 0
self.clock_time = 0
self.start_time = self.period[0]
# randomly extract an incident scenario
self.incidents = self.incident_set[self.scenario_index]#np.array(self.fixed_allocation)#np.random.randint(7)#np.random.randint(self.incident_file_num)
# print('scenario_index:'+ str(self.scenario_index))
self.scenario_index += 1
self.scenario_index = self.scenario_index%self.incident_file_num
#agent location
self.agent_locs = self.fixed_allocation#np.array([categorical_sample(self.initialDistribution, np.random) for _ in range(self.N)])#
#how many time step ahead agent would be free
self.assignment_done_time = []
self.assignment_done_loc = np.array([], dtype=int)
# summary of free agent
self.S = np.zeros((self.stateNum, self.C+1)) # (self.C+1))
for i in range(self.N):
self.S[self.agent_locs[i],-1] += 1 #/ max(self.agent_status[i], 1.0)
self.obs = np.array(self.S)#np.zeros((self.N, self.stateNum))
self.state = self.S[:,-1]
self.state_count = np.tile(self.S[:,0, np.newaxis], [1, self.stateNum]).flatten()
return self.obs, self.state_count#, self.assignment_validity
@property
def observation_space(self):
return (Box(low=-self.max_obs_value, high=self.max_obs_value, shape=(self.stateNum, self.C+1)))
@property
def action_space(self):
components = []
for i in range(self.stateNum):
components.append(Box(low=0, high=self.N, shape=(self.stateNum)))
return Product(components)
def _step(self, prob):
self.t += 1
prob = probScale(np.asarray(prob, dtype=float))
a = np.asarray(
[np.random.multinomial(self.state[i], prob[i]) for i in range(prob.shape[0])])
tempDests = np.zeros(self.stateNum)
for i in range(self.stateNum):
tempDests[i] += a[i, i]
for j in range(self.stateNum):
if i != j:
for k in range(a[i, j]):
self.assignment_done_loc = np.append(self.assignment_done_loc, j)
self.assignment_done_time = np.append(self.assignment_done_time, self.travel_time[2, i, j, 0])
# generate the new demand
loc, arrival_time, is_urgent, self.demand, engagement_time = self.generate_demand()
time_past = arrival_time - self.clock_time
self.clock_time = arrival_time
# update agent status at the arrival of new demand
self.assignment_done_time = np.maximum(0, np.array(self.assignment_done_time) - time_past)
done_list = []
for i in range(len(self.assignment_done_time)):
if self.assignment_done_time[i]==0:
done_list.append(i)
tempDests[self.assignment_done_loc[i]] += 1
#purge the on-the-fly list
self.assignment_done_time = np.delete(self.assignment_done_time, done_list)
self.assignment_done_loc = | np.delete(self.assignment_done_loc, done_list) | numpy.delete |
import numpy as np
import sys
import pickle
from collections import defaultdict
import itertools
import nltk
import random
# space is included in whitelist
EN_WHITELIST = '0123456789abcdefghijklmnopqrstuvwxyz '
EN_BLACKLIST = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~\''
DATA_PATH = './data/chat.txt'
# these values determine the length of questions and answers while training
# increase the length to get a better trained model at the cost of more time and resources...
limit = {
'maxq': 20,
'minq': 0,
'maxa': 20,
'mina': 0
}
# increase vocab size for a better trained mmodel at the cost of more time and resource
VOCAB_SIZE = 6000
UNK = 'unk'
def default():
return 1
# read lines from the file
def read_lines(filename):
return open(filename).read().split('\n')[:-1]
# separate sentences in a line
def split_sentences(line):
return line.split('.')
# remove anything that isn't in the vocabulary
def filter_lines(line, whitelist):
return ''.join([ch for ch in line if ch in whitelist])
# read words and create index to word and word to index dictionaries
def index(tokenized_sentences, vocab_size):
# get frequency distribution of the tokenized words which are most used
freq_dist = nltk.FreqDist(itertools.chain(tokenized_sentences))
# get vocabulary of size VOCAB_SIZE
vocab = freq_dist.most_common(vocab_size)
# generate index to word dictionary
index2word = ['_'] + [UNK] + [x[0] for x in vocab]
# generate word to index dictionary
word2index = dict([(w, i) for i, w in enumerate(index2word)])
return index2word, word2index, freq_dist
# filter sequences based on set min length and max length
def filter_data(sequences):
filter_q, filter_a = [], []
raw_data_len = len(sequences) // 2
for i in range(0, len(sequences), 2):
qlen = len(sequences[i].split(' '))
alen = len(sequences[i+1].split(' '))
if qlen >= limit['minq'] and qlen <= limit['maxq']:
if alen >= limit['mina'] and alen <= limit['maxa']:
filter_q.append(sequences[i])
filter_a.append(sequences[i+1])
filter_data_len = len(filter_q)
filter_percent = int((raw_data_len - filter_data_len) / raw_data_len * 100)
print('{} filtered from original data'.format(filter_percent))
return filter_q, filter_a
'''
Replacing words with indices in a sequcnce
Replace with unknown if word not present in vocabulary
'''
def pad_seq(seq, lookup, maxlen):
indices = []
for word in seq:
if word in lookup:
indices.append(lookup[word])
else:
indices.append(lookup[UNK])
return indices + [0] * (maxlen - len(seq))
'''
generating the final dataset by creating and array of indices
and adding zero paddig. Zero Padding is simply a process of
adding layers of zeros to our inputs
'''
def zero_pad(tokenized_q, tokenized_a, word2index):
data_len = len(tokenized_q)
index_q = np.zeros([data_len, limit['maxq']], dtype=np.int32)
index_a = np.zeros([data_len, limit['maxa']], dtype=np.int32)
for i in range(data_len):
q_indices = pad_seq(tokenized_q[i], word2index, limit['maxq'])
a_indices = pad_seq(tokenized_a[i], word2index, limit['maxa'])
index_q[i] = np.array(q_indices)
index_a[i] = np.array(a_indices)
return index_q, index_a
def process_data():
print('\n[ READING LINES FROM FILE ]')
lines = read_lines(filename=DATA_PATH)
# connverting all characters to lowercase
lines = [ line.lower() for line in lines ]
print('\n[ SAMPLE FROM THE DATASET ]')
print(lines[25:35])
# filter out unnecessary characters
print('\n[ 1ST LAYER OF FILTERING ]')
lines = [ filter_lines(line, EN_WHITELIST) for line in lines ]
print(lines[25:35])
# filter and distributing sequences into questions and answers
print('\n[ 2ND LAYER OF FILTERING ]')
qlines, alines = filter_data(lines)
print('\n [ SAMPLE QUESTION ANSWER PAIR ]')
print('\n q: {0} ; a: {1}'.format(qlines[15], alines[15]))
print('\n q: {0} ; a: {1}'.format(qlines[20], alines[20]))
# convert list of [ lines of text ] into list of [ list of words ]
print('\n[ SEGMMENTING LINES OF TEXTS INTO LISTS OF WORDS ]')
qtokenized = [ wordslist.split(' ') for wordslist in qlines ]
atokenized = [ wordslist.split(' ') for wordslist in alines ]
print('\n[ SAMPLE FROM SEGMENTED WORDS LIST ]')
print('\nq : {0} ; a : {1}'.format(qtokenized[15], atokenized[15]))
print('\nq : {0} ; a : {1}'.format(qtokenized[20], atokenized[20]))
# indexing --> idx2w, w2idx
idx2w, w2idx, freq_dist = index(qtokenized + atokenized, vocab_size=VOCAB_SIZE)
# adding zero padding
idx_q, idx_a = zero_pad(qtokenized, atokenized, w2idx)
print('\n[ STORING NUMPY PADDED ARRAYS TO DISK ]')
| np.save('./data/processed_data/idx_q.npy', idx_q) | numpy.save |
import time
import numpy as np
import torch
import torch.nn as nn
import open3d as o3d
import h5py
import math
import sklearn
import copy
from sklearn.neighbors import KDTree
from PIL import Image
import matplotlib.pyplot as plt
def show_point_cloud(src_, src_corr_, ref_, ref_corr_):
src = src_.copy()
src_corr = src_corr_.copy()
ref = ref_.copy()
ref_corr = ref_corr_.copy()
ref[:,1] = ref[:,1] + 2.5
ref_corr[:,1] = ref_corr[:,1] + 2.5
src_pcd = o3d.geometry.PointCloud()
src_corr_pcd = o3d.geometry.PointCloud()
ref_pcd = o3d.geometry.PointCloud()
ref_corr_pcd = o3d.geometry.PointCloud()
src_pcd.points = o3d.utility.Vector3dVector(src)
ref_pcd.points = o3d.utility.Vector3dVector(ref)
src_corr_pcd.points = o3d.utility.Vector3dVector(src_corr)
ref_corr_pcd.points = o3d.utility.Vector3dVector(ref_corr )
ref_pcd.paint_uniform_color([1, 0, 0.651]) # 蓝色
# src_corr_pcd.paint_uniform_color([1, 0.706, 0]) # 黄色
src_pcd.paint_uniform_color([0, 0.651, 0.929]) # 红色
line_size = src_corr.shape[0]
line_src = np.arange(0, 2 * line_size, 2) # 这个代表所有偶数
rand_idxs = np.random.choice(line_size, math.ceil(line_size / 3), replace=False)
# print('line_src',line_src)
line_src = line_src[rand_idxs].reshape(rand_idxs.shape[0], 1)
# print('line_src',line_src)
line_ref = line_src + 1
# print('line_ref',line_ref)
lines = np.concatenate([line_ref, line_src], -1).reshape(-1, 2)
# print('lines',lines)
colors = [[1, 0, 0]]
# triangle_points=np.concatenate([data['points_ref'][1, :, :3].detach().cpu().numpy()+1,data['points_src'][1, :, :3].detach().cpu().numpy()],-1)
triangle_points = np.concatenate([src_corr, ref_corr ], -1)
triangle_points = triangle_points.reshape(-1, 3)
# print('triangle_points',triangle_points.shape)
line_pcd = o3d.geometry.LineSet()
line_pcd.lines = o3d.utility.Vector2iVector(lines)
line_pcd.colors = o3d.utility.Vector3dVector(colors)
# line_pcd.paint_uniform_color([1, 0.706, 0])
line_pcd.points = o3d.utility.Vector3dVector(triangle_points)
o3d.visualization.draw_geometries([line_pcd, src_pcd, ref_pcd], window_name='line_pcd src_pcd src_corr_pcd')
# o3d.visualization.draw_geometries([src_corr_pcd, ref_pcd], window_name='src_corr_pcd ref_pcd')
# src_pcd.transform(transform)
# src_corr_pcd.points = o3d.utility.Vector3dVector(weighted_ref)
# o3d.visualization.draw_geometries([src_corr_pcd, src_pcd], window_name='src_corr_pcd src_pcd.transform(T)')
#
# ref_pcd.points = o3d.utility.Vector3dVector(ref)
# o3d.visualization.draw_geometries([src_pcd, ref_pcd], window_name='src_pcd.transform(T) ref_pcd')
def draw_registration_result(source, target, src_color, tgt_color):
src_pcd = o3d.geometry.PointCloud()
ref_pcd = o3d.geometry.PointCloud()
src_pcd.points = o3d.utility.Vector3dVector(source)
ref_pcd.points = o3d.utility.Vector3dVector(target)
src_pcd.colors = o3d.utility.Vector3dVector(src_color)
ref_pcd.colors = o3d.utility.Vector3dVector(tgt_color)
# src_pcd.paint_uniform_color([1, 0.706, 0])
# ref_pcd.paint_uniform_color([0, 0.651, 0.929])
o3d.visualization.draw_geometries([src_pcd, ref_pcd])
def draw_registration_result_no_blocking(source, target,vis):
vis.update_geometry(source)
vis.poll_events()
vis.update_renderer()
def get_npy_data(filename, index):
all_data = np.load(filename, allow_pickle=True)
# print(len(all_data))
# xyz_src = torch.from_numpy(all_data[index * 3])
# feat_src = torch.from_numpy(all_data[index * 3 + 2])
# xyz_ref = torch.from_numpy(all_data[index * 3 + 3])
# feat_ref = torch.from_numpy(all_data[index * 3 + 5])
xyz = all_data[index * 4]
normal = all_data[index * 4 + 1]
feat = all_data[index * 4 + 2]
color = all_data[index * 4 + 3]
return xyz, normal, feat, color
def calGrad(point,normal,feature,kdTree):
# n * 3; n * 3 ; n * d
N = point.shape[0]
d = feature.shape[1]
grads = np.zeros([N,3,d])
for i in range(N):
pt = point[i,:].reshape(1,-1)
nt = normal[i,:].reshape(1,-1)
ft = feature[i,:].reshape(1,-1)
_, idx = kdTree.query(pt, k=20, return_distance=True)
# idx_ = np.reshape(idx,(-1,1))
# neighbor_ = point[idx_, :]
# neighbor = np.reshape(neighbor_, (N,-1, 3))
neighbor_pt = point[idx, :].reshape(-1,3)
neighbor_ft = feature[idx,:].reshape(-1,d)
proj_pt = neighbor_pt - (neighbor_pt - pt) @ nt.T * nt
A = proj_pt - pt
b = neighbor_ft - ft
A = | np.concatenate((A,nt),axis=0) | numpy.concatenate |
"""
Usage:
Make instance of class (currently only MainSequence class available
call instance.Interpolate(instance.dict, SpT) where dict is the name of the dictionary you want to interpolate (Temperature, Radius, or Mass) and SpT is the spectral type of what you wish to interpolate to.
# Provides relations for temperature, luminosity, radius, and mass for varius spectral types
#Data comes from Carroll and Ostlie book, or interpolated from it
#ALL RELATIONS ARE FOR MAIN SEQUENCE ONLY!
"""
from __future__ import print_function, absolute_import
from collections import defaultdict
import re
import logging
import os
from scipy.interpolate import UnivariateSpline
import numpy as np
import pandas
from kglib.utils import DataStructures
_ROOT = os.path.abspath(os.path.dirname(__file__))
def get_data(path):
return os.path.join(_ROOT, 'data', path)
SPT_PATTERN = '[A-Z]([0-9]\.?[0-9]*)' # regular expression pattern for identifying spectral types
def fill_dict(row, d, key, makefloat=True):
val = row[key].strip()
if makefloat:
if val != '':
d[row['SpT'].strip()[:-1]] = float(val)
else:
d[row['SpT'].strip()[:-1]] = val
class FitVals():
def __init__(self, coeffs, xmean=0.0, xscale=1.0, logscale=False, intercept=0.0, valid=(-5.0, 5.0)):
self.coeffs = coeffs
self.order = len(coeffs) - 1.0
self.xmean = xmean
self.xscale = xscale
self.log = logscale
self.intercept = intercept
self.valid = valid
class FunctionFits():
def __init__(self, MS=None):
self.MS = MainSequence() if MS is None else MS
# Mass fits, made using the old MainSequence dictionaries
self.sptnum_to_mass = FitVals(coeffs=np.array([0.11679476, -0.51168936, 0.27332682, 1.42616918,
-1.56182261, -1.21786221, 1.8851773, -0.04980108,
-0.30105226, -0.38423188, -0.17182606]),
xmean=26.681818181818183, xscale=19.342337838478862, logscale=True,
intercept=0.46702748509563452, valid=[5, 65])
# Radius fit, made using the old MainSequence dictionaries
self.sptnum_to_radius = FitVals(coeffs=np.array([0.02250148, 0.06041591, -0.21719815, -0.2087987,
0.55373813, 0.13635043, -0.50930703, -0.07293512,
0.3132073, -0.24671561, -0.08480404]),
xmean=34.5, xscale=20.702656834329261, logscale=True,
intercept=0.16198349185993394, valid=[5, 67])
# Absolute magnitude fit, using the old MainSequence dictionaries
self.sptnum_to_absmag = FitVals(coeffs=np.array([0.35215153, -0.2924717, -0.95804462, 1.74295661,
-0.41864979, 2.50954236, 0.45854428]),
xmean=32.44, xscale=18.456608572541164,
intercept=2.8008819709959134, valid=[5, 65])
# Color fits from Boyajian et al 2013
color_relations = defaultdict(lambda: defaultdict(FitVals))
color_relations['B']['V'] = FitVals(coeffs=np.array((9552, -17443, 44350, 68940, 57338, -24072, 4009)),
valid=[-0.1, 1.8])
color_relations['V']['J'] = FitVals(coeffs= | np.array((9052, -3972, 1039, -101)) | numpy.array |
import numpy as np
import tform as tf
import scipy.linalg as la
import control
import swing_trajectory as st
class PreviewControl:
def __init__(self, dt=1./240., Tsup_time=0.5, Tdl_time=0.1, CoMheight=0.45, g=9.8, previewStepNum=240, stride=0.1, initialTargetZMP=np.array([0.,0.]), initialFootPrint=np.array([[[0.,0.065],[0.,-0.065]]]), R=np.matrix([1.]), Q=np.matrix([[7000,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]])):
self._RIGHT_LEG = 1
self._LEFT_LEG = 0
self.dt = dt
self.previewStepNum = previewStepNum
self.A = np.matrix([[1, dt, (dt**2)/2],
[0, 1, dt],
[0, 0, 1]])
self.B = np.matrix([(dt**3)/6, (dt**2)/2, dt]).T
self.C = np.matrix([1, 0, -CoMheight/g])
self.CoMheight = CoMheight
self.G = np.vstack((-self.C*self.B, self.B))
self.Gr= np.matrix([1., 0., 0., 0.]).T
#state vector
self.x = np.matrix(np.zeros(3)).T
self.y = np.matrix(np.zeros(3)).T
self.footPrints = np.array([[[0.,0.065],[0.,-0.065]],
[[0.,0.065],[0.,-0.065]],
[[0.,0.065],[0.,-0.065]]])
self.Tsup = int(Tsup_time/dt)
self.Tdl = int(Tdl_time/dt)
self.px_ref = np.full((self.Tsup+self.Tdl)*3,initialTargetZMP[0])
self.py_ref = np.full((self.Tsup+self.Tdl)*3,initialTargetZMP[1])
self.px = np.array([0.0]) #zmp
self.py = np.array([0.0])
self.phi = np.hstack( (np.matrix([1,0,0,0]).T, np.vstack((-self.C*self.A, self.A)) ) )
P, _, _ = control.dare(self.phi,self.G,Q,R)
zai = (np.eye(4) - self.G * la.inv(R + self.G.T*P*self.G) * self.G.T * P )*self.phi
self.Fr=np.array([])
for j in range(1,previewStepNum+1):
self.Fr= np.append(self.Fr, -la.inv(R + self.G.T*P*self.G)*self.G.T*((zai.T)**(j-1))*P*self.Gr)
self.F=-la.inv(R + self.G.T*P*self.G)*self.G.T*P*self.phi
self.px_ref_log = self.px_ref[:(self.Tsup+self.Tdl)*2]
self.py_ref_log = self.py_ref[:(self.Tsup+self.Tdl)*2]
self.xdu = 0
self.ydu = 0
self.xu = 0
self.yu = 0
self.dx=np.matrix(np.zeros(3)).T
self.dy=np.matrix(np.zeros(3)).T
self.swingLeg = self._RIGHT_LEG
self.supportLeg = self._LEFT_LEG
self.targetZMPold = np.array([initialTargetZMP])
self.currentFootStep = 0
def footPrintAndCOMtrajectoryGenerator(self, inputTargetZMP,inputFootPrint):
currentFootStep = 0
self.footPrints = self.footOneStep(self.footPrints,inputFootPrint, self.supportLeg)
input_px_ref, input_py_ref = self.targetZMPgenerator(inputTargetZMP, self.targetZMPold[-1], self.Tsup,self.Tdl)
self.px_ref = self.fifo(self.px_ref, input_px_ref, len(input_px_ref))
self.py_ref = self.fifo(self.py_ref, input_py_ref, len(input_py_ref))
self.px_ref_log = np.append(self.px_ref_log, input_px_ref)
self.py_ref_log = np.append(self.py_ref_log, input_py_ref)
CoMTrajectory = np.empty((0,3), float)
startRobotVelocity = np.array([self.x[1],self.y[1]])
for k in range(len(input_px_ref)):
dpx_ref = self.px_ref[k+1] - self.px_ref[k]
dpy_ref = self.py_ref[k+1] - self.py_ref[k]
xe = self.px_ref[k] - self.C * self.x
ye = self.py_ref[k] - self.C * self.y
X=self.phi * np.vstack((xe, self.dx)) + self.G*self.xdu + self.Gr*dpx_ref
Y=self.phi * np.vstack((ye, self.dy)) + self.G*self.ydu + self.Gr*dpy_ref
xsum=ysum=0
for j in range(1,self.previewStepNum+1):
xsum +=self.Fr[j-1]*(self.px_ref[k+j]-self.px_ref[k+j-1])
ysum +=self.Fr[j-1]*(self.py_ref[k+j]-self.py_ref[k+j-1])
self.xdu=self.F*X+xsum
self.ydu=self.F*Y+ysum
self.xu+=self.xdu
self.yu+=self.ydu
old_x=self.x
old_y=self.y
self.x=self.A*self.x+self.B*self.xu
self.y=self.A*self.y+self.B*self.yu
self.dx=self.x-old_x
self.dy=self.y-old_y
CoMTrajectory = np.vstack((CoMTrajectory, [self.x[0,0], self.y[0,0], self.CoMheight]))
self.px = np.append(self.px, self.C*self.x)
self.py = np.append(self.py, self.C*self.y)
robotEndVelocity = np.array([self.x[1],self.y[1],0.])
leftTrj,rightTrj = self.footTrajectoryGenerator(np.hstack((self.footPrints[currentFootStep,self.swingLeg], 0.)),
np.hstack((self.footPrints[currentFootStep+1,self.swingLeg], 0.)),
np.array([0.,0.,0.]),
np.array([0.,0.,0.]),
np.hstack((self.footPrints[currentFootStep,self.supportLeg],0.)),
self.swingLeg)
self.swingLeg, self.supportLeg = self.changeSupportLeg(self.swingLeg, self.supportLeg)
self.targetZMPold = np.vstack((self.targetZMPold, inputTargetZMP))
return CoMTrajectory, leftTrj, rightTrj
def targetZMPgenerator(self,targetZMP,targetZMPold, Tsup, Tdl):
tdl_t = np.arange(0,Tdl)
x_a = (targetZMPold[0]-targetZMP[0])/(0-Tdl)
x_b = targetZMPold[0]
y_a = (targetZMPold[1]-targetZMP[1])/(0-Tdl)
y_b = targetZMPold[1]
px_ref = np.hstack(( x_a * tdl_t + x_b, np.full(Tsup, targetZMP[0]) ))
py_ref = np.hstack(( y_a * tdl_t + y_b, np.full(Tsup, targetZMP[1]) ))
return px_ref, py_ref
def footTrajectoryGenerator(self,swingStartPointV,swingEndPointV, startRobotVelocityV_xy,endRobotVelocityV,supportPointV,swingLeg,zheight=0.04):
supportTrajectory = np.vstack((np.full(self.Tdl+self.Tsup,supportPointV[0]),
np.full(self.Tdl+self.Tsup,supportPointV[1]),
np.full(self.Tdl+self.Tsup,supportPointV[2]))).T
swingTrajectoryForTdl = np.vstack((np.full(self.Tdl,swingStartPointV[0]),
np.full(self.Tdl,swingStartPointV[1]),
np.full(self.Tdl,swingStartPointV[2]))).T
if np.array_equal(swingStartPointV, swingEndPointV):
swingTrajectoryForTsup = np.vstack(( | np.full(self.Tsup,swingEndPointV[0]) | numpy.full |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Support functions for the sindy toolkit.
Called by 'runToolkit.py', 'runToolkitExtended.py' and by 'plotSelectedIterations.py'.
For the full procedure, see "README.md".
For method details, please see "A toolkit for data-driven discovery of governing equations in
high-noise regimes" (2022) by <NAME> and <NAME>.
Copyright (c) 2021 <NAME>. <EMAIL>
MIT License
"""
import sys
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from numpy.linalg import lstsq # since this accepts complex inputs and targets.
from scipy.integrate import solve_ivp
"""
---------------------------------------------------------------------------------------------
------------------------ Function Defs ------------------------------------------------------
--------------------------------------------------------------------------------------------- """
#%% Functions for various model systems, that return initial conditions for derivatives.
# These are used to simulate the system via odeint():
def lorenz_fn(z, t, p=({'p0': 10, 'p1': 28, 'p2': np.round(-8/3, 2), 'numExtraVars': 0},)):
""" xDot = -p0*x + p0*y
yDot = p1*x - y - x*z
zDot = p2*z + x*y
Inputs:
z: np.vector of floats (initial conditions)
t: np.vector of floats (timesteps)
p: dict (system parameters)
Output:
derivList: np.vector of floats (initial conditions of derivatives).
"""
derivList = [
p['p0'] * (z[1] - z[0]),
z[0] * (p['p1'] - z[2]) - z[1],
z[0] * z[1] + p['p2'] * z[2]
]
for i in range(p['numExtraVars']):
derivList.append(0)
return derivList
# End of lorenz attractor fn
# -------------------------------------------------------------------
def dampedHarmonicOscillatorLinear_fn(z, t, p=({'p0': 0.1, 'p1': 2, 'numExtraVars': 0})):
""" xDot = -p0*x + p1*y
yDot = -p1*x - p0*y
Inputs:
z: np.vector of floats (initial conditions)
t: np.vector of floats (timesteps)
p: dict (system parameters)
Output:
derivList: np.vector of floats (initial conditions of derivatives).
"""
derivList = [
-p['p0']*z[0] + p['p1']*z[1],
-p['p1']*z[0] - p['p0']*z[1]
]
for i in range(p['numExtraVars']):
derivList.append(0)
return derivList
# end of dampedHarmonicOscillatorLinear_fn
# -------------------------------------------------------------------
def dampedHarmonicOscillatorCubic_fn(z, t, p=({'p0': 0.1, 'p1': 2, 'numExtraVars': 0})):
""" xDot = -p0*x^3 + p1*y^3
yDot = -p1*x^3 - p0*y^3
Inputs:
z: np.vector of floats (initial conditions)
t: np.vector of floats (timesteps)
p: dict (system parameters)
Output:
derivList: np.vector of floats (initial conditions of derivatives).
"""
derivList = [
-p['p0']*pow(z[0], 3) + p['p1']*pow(z[1], 3),
-p['p1']*pow(z[0], 3) - p['p0']*pow(z[1], 3)
]
for i in range(p['numExtraVars']):
derivList.append(0)
return derivList
# end of dampedHarmonicOscillatorCubic_fn
#----------------------------------------------------------------------
def threeDimLinear_fn(z, t, p=({'p0': 0.1, 'p1': 2, 'p2': 0.3, 'numExtraVars': 0})):
""" xDot = -p0*x - p1*y
yDot = p1*x - p0*y
zDot = -p2*z
Inputs:
z: np.vector of floats (initial conditions)
t: np.vector of floats (timesteps)
p: dict (system parameters)
Output:
derivList: np.vector of floats (initial conditions of derivatives).
NOTE: >= 4th order library terms cause failure in Brunton
"""
derivList = [
-p['p0']*z[0] - p['p1']*z[1],
p['p1']*z[0] - p['p0']*z[1],
-p['p2']*z[2]
]
for i in range(p['numExtraVars']):
derivList.append(0)
return derivList
# end of threeDimLinear_fn
# -------------------------------------------------------------------
def hopfNormalForm2D_fn(z, t, p=({'p0': 0, 'p1': -1, 'p2': 1, 'numExtraVars': 0})):
""" Mean field model with zDot == 0:
xDot = p0*x + p1*y - p2*x*(x^2 + y^2)
yDot = -p1*x + p0*y - p2*y*(x^2 + y^2)
where p0 = mu, p1 = omega, p2 = A, p3 = lambda in Brunton paper.
Note that in the 3D model, zDot = -p2 * (z - x^2 - y^2). In this 2D version we assume
lambda is big, so zDot -> 0 rapidly and thus z = x^2 + y^2.
TO-DO: we need param values for this model. mu is in [-0.2, 0.6]. omega, A, and lambda
values are unknown.
Initial values estimate: x,y = {1, 0.75} or {0,0} (see fig 3 in Brunton paper)
Inputs:
z: np.vector of floats (initial conditions)
t: np.vector of floats (timesteps)
p: dict (system parameters)
Output:
derivList: np.vector of floats (initial conditions of derivatives).
"""
derivList = [
p['p0'] * z[0] - p['p1'] * z[1] + p['p2'] * z[0] * (pow(z[0], 2) + pow(z[1], 2)),
p['p1'] * z[0] + p['p0'] * z[1] + p['p2'] * z[1] * (pow(z[0], 2) + pow(z[1], 2))
]
for i in range(p['numExtraVars']):
derivList.append(0)
return derivList
# end of hopfNormalForm2D_fn
# -------------------------------------------------------------------
def hopfNormalForm3D_fn(z, t, p=({'p0': 0, 'p1': -1, 'p2': 1, 'p3': 0.5, 'numExtraVars': 0})):
""" Mean field model with zDot == 0:
xDot = p0*x + p1*y - p2*x*z
yDot = -p1*x +p0*y - p2*y*z
zDot = -p3 * (z - x^2 - y^2).
where p0 = mu, p1 = omega, p2 = A, p3 = lambda in Brunton paper.
In this 3D version of the model, we assume lambda is not too big, so zDot is not == 0.
TO-DO: We need param values for this model. mu is in [-0.2, 0.6]. omega, A, and lambda
values are unknown. See tables 10, 11 in Brunton paper S.I.
Question: is mu being used in two ways, as a coefficient and as a "bifurcation parameter"
(see eg table 13)?
Initial values estimate: x,y = {1, 0.75} or {0,0} (see fig 3 in Brunton paper)
Inputs:
z: np.vector of floats (initial conditions)
t: np.vector of floats (timesteps)
p: dict (system parameters)
Output:
derivList: np.vector of floats (initial conditions of derivatives).
"""
derivList = [
p['p0'] * z[0] - p['p1'] * z[1] + p['p2'] * z[0] * z[2], # (pow(z[0], 2) + pow(z[1], 2)),
p['p1'] * z[0] + p['p0'] * z[1] + p['p2'] * z[1] * z[2], # (pow(z[0], 2) + pow(z[1], 2)),
-p['p3'] * (z[2] - pow(z[0],2) - pow(z[1], 2))]
for i in range(p['numExtraVars']):
derivList.append(0)
return derivList
# end of hopfNormalForm3D_fn
# ------------------------------------------------------------------
def generateModelStrAndTrueArrays_fn(modelSystem, p):
"""
Build a string describing the model.
Parameters
----------
modelSystem : str
p : dict
Returns
-------
modelStr : str.
trueLib : tuple of lists of str
trueLibCoeffs : tuple of lists of floats
"""
if modelSystem == 'lorenz':
modelStr = \
"x' = -" + str(p['p0']) + ' x + ' + str(p['p0']) + ' y' + '\n' + \
"y' = " + str(p['p1']) + ' x - y - x*z' + '\n' + \
"z' = " + str(p['p2']) + ' z' + ' + x*y'
trueLib = (['x','y'], ['x','y','x*z'], ['z', 'x*y'])
trueLibCoeffs = ([-p['p0'], p['p0']], [p['p1'], -1, -1], [p['p2'], 1])
if modelSystem == 'harmOscLinear':
modelStr = \
"x' = -" + str(p['p0']) + ' x + ' + str(p['p1']) + ' y' + '\n' + \
"y' = -" + str(p['p1']) + " x -" + str(p['p0']) + ' y'
trueLib = (['x', 'y'], ['x', 'y'])
trueLibCoeffs = ([-p['p0'], p['p1']], [-p['p1'], -p['p0']])
if modelSystem == 'harmOscCubic':
modelStr = \
"x' = -" + str(p['p0']) + ' x^3 + ' + str(p['p1']) + ' y^3' + '\n' + \
"y' = -" + str(p['p1']) + " x^3 -" + str(p['p0']) + ' y^3'
trueLib = (['x^3', 'y^3'], ['x^3', 'y^3'])
trueLibCoeffs = ([-p['p0'], p['p1']], [-p['p1'], -p['p0']])
if modelSystem == 'threeDimLinear':
modelStr = \
"x' = -" + str(p['p0']) + ' x - ' + str(p['p1']) + ' y' + '\n' + \
"y' = " + str(p['p1']) + " x -" + str(p['p0']) + ' y' + '\n' + \
"z' = -" + str(p['p2']) + " z"
trueLib = (['x', 'y'], ['x', 'y'], ['z'])
trueLibCoeffs = ([-p['p0'], -p['p1']], [p['p1'], -p['p0']], [-p['p2']])
if modelSystem == 'hopfNormal2D':
modelStr = \
"x' = " + str(p['p0']) + ' x + ' + str(p['p1']) + ' y ' + "+ " + str(p['p2']) + \
'(x^3 + x*y^2)' + '\n' + \
"y' = " + str(p['p1']) + ' x + ' + str(p['p0']) + ' y ' + "+ " + str(p['p2']) + \
'(y*x^2 + y^3)'
trueLib = (['x', 'y', 'x^3', 'x*y^2'], ['x', 'y', 'y^3', 'x^2*y'])
trueLibCoeffs = ([p['p0'], p['p1'], p['p2'], p['p2']],
[p['p1'], p['p0'], p['p2'], p['p2']])
if modelSystem == 'hopfNormal3D':
modelStr = \
"x' = " + str(p['p0']) + ' x - ' + str(p['p1']) + ' y ' + "+ " + str(p['p2']) + \
' x*z' + '\n' + \
"y' = " + str(p['p1']) + ' x + ' + str(p['p0']) + ' y ' + "+ " + str(p['p2']) + \
' y*z' + '\n' + \
"z' = -" + str(p['p3']) + ' * (z - x^2 - y^2)'
trueLib = (['x', 'y', 'x*z'], ['x', 'y', 'y*z'], ['z', 'x^2', 'y^2'])
trueLibCoeffs = ([p['p0'], p['p1'], p['p2']],
[p['p1'], p['p0'], p['p2']], [-p['p3'], p['p3'], p['p3']])
return modelStr, trueLib, trueLibCoeffs
# End of generateModelStrAndTrueArrays_fn
# ---------------------------------------------------
def generateTrueFunctionalAndCoeffArrays_fn(trueLib, trueLibCoeffs, functionList):
"""
Given lists of functional names as str, and the function list, construct a true
'functionsToUseArray'
Parameters
----------
trueLib : list-like of lists of str. len of list-like = numVars, len of each list = num true
library functionals for that variable
trueLibCoeffs : list-like of lists of floats. Matches 'trueLib' above.
functionList : list of str. The functional names
Returns
-------
trueLibraryArray : np.array of bools, numVars x numFunctionals
"""
trueLibraryArray = np.zeros((len(trueLib), len(functionList)), dtype=bool)
trueCoeffArray = np.zeros((len(trueLib), len(functionList)))
for v in range(len(trueLib)): # v is the variable index
theseFnalNames = np.array(trueLib[v])
theseCoeffs = np.array(trueLibCoeffs[v])
for f in range(len(functionList)):
ind = np.where(theseFnalNames == functionList[f])[0]
if len(ind) > 0: # ie functionList[f] is a true functional
trueLibraryArray[v, f] = True
trueCoeffArray[v, f] = theseCoeffs[ind]
return trueLibraryArray, trueCoeffArray
# End of generateTrueFunctionalAndCoeffArrays_fn
# ----------------------------------------------------------
def generateFunctionStr_fn(v, varInds, variableNames):
"""
Given a list, generate a string. Used by generatePolynomialLibrary_fn.
Parameters
----------
v : list of ints
varInds : list of ints
variableNames : list of str
Returns
-------
fnStr : str
"""
fnStr = ''
for i in varInds:
if i in v:
if len(fnStr) > 0: # case: we need a multiplication sign:
fnStr += '*'
fnStr += variableNames[i]
if np.sum(np.array(v) == i) > 1: # case: we need an exponent:
fnStr += '^' + str(np.sum(np.array(v) == i))
return fnStr
# End of generateFunctionStr_fn
#------------------------------------------------------------------------
def generatePolynomialLibrary_fn(variableNames, degree):
"""
Generate a library of polynomials up to a certain degree. Return two things: a list of
functional names and a list of recipes for use in ode evolutions.
NOTE: If there is one variable, and its name is more that one character, this function will
fail because 'varInds' will equal the number of characters in the variable name.
Parameters
----------
variableNames : list-like of str
degree : int
Returns
-------
functionList : list of str
recipes : list of lists, length = numFunctionals
"""
varInds = np.arange(len(variableNames))
recipes = []
functionList = []
recipes.append(-1) # the constant function
functionList.append('1')
# Treat degree = 1:
combos = [] # initialize
for i in varInds:
combos.append(i)
recipes.append(i)
functionList.append(variableNames[i])
deg = 2 # initialize
while deg <= degree:
combos = [(i, j) for i in varInds for j in combos] # vector of {int, list} entries,
# entry has total len = deg. There are duplicates at >= degree 3, eg (0,(0,1)) and
# (1,(0,0)). So for each entry we must (a) make into a single list; and (b) check that it
# is new before appending it to 'recipes' and 'functionList'.
# (a) combine int and list:
keepCombos = [] # to keep the non-duplicates
for k in range(len(combos)):
c = combos[k]
this = []
for i in c:
if isinstance(i, (int, np.integer)):
this.append(i)
else:
for j in i:
this.append(j)
this = list(np.sort(np.array(this)))
# 'this' is now a single sorted list of ints.
# (b) If 'this' is new, append to recipes:
addFlag = True
for i in recipes:
if not isinstance(i, (int, np.integer)):
if len(this) == len(i) and np.sum(np.array(this) == np.array(i)) == len(i):
addFlag = False
break
if addFlag:
recipes.append(this)
keepCombos.append(this)
functionList.append(generateFunctionStr_fn(this, varInds, variableNames))
# Update combos with non-duplicate list:
combos = keepCombos
deg += 1
return functionList, recipes
# End of generatePolynomialLibrary_fn
#--------------------------------------------------------------
def calculateLibraryFunctionalValues_fn(x, recipes):
"""
For each functional, calculate its values at the timepoints.
Parameters
----------
x : np.array of floats, numTimepoints x numVars
recipes : list of lists. The i'th list gives the indices of variables to be multiplied together
to generate the i'th functional.
Returns
-------
fnVals : np.array of floats, numTimepoints x numFunctionals.
"""
fnVals = np.zeros((x.shape[0], len(recipes)))
for i in range(fnVals.shape[1]):
r = recipes[i]
temp = np.ones(x.shape[0])
if isinstance(r, (int, np.integer)): # constant or degree 1 monomial
if r != -1: # ie not the constant functional
temp = x[:, r]
else: # >= degree 2
for j in r:
temp = temp * x[:, j]
fnVals[:, i] = temp
return fnVals
# End of calculateLibraryFunctionalValues_fn
#------------------------------------------------------------
#%% Make a hamming window:
def makeHammingWindow_fn(hammingWindowLength, plateauRatio=0):
"""" Generate a hamming window, perhaps with a flat plateau in the middle (ie a smoothed step
function).
Inputs:
hammingWindowLength: int
usePlateauHammingFilterFlag: Boolean
plateauRatio: float 0 to 1
Outputs:
hamm: vector with sum = 1
"""
if plateauRatio > 0:
# add a plateau in the middle:
plateauRatio = min(1, plateauRatio)
ends = int(np.ceil(hammingWindowLength*(1 - plateauRatio)))
if ends%2 == 1:
ends = ends + 1 # make even
rise = int(ends/2)
ends = np.hamming(ends) # ends is now a hamming vector
hamm = np.ones((1, hammingWindowLength))
hamm = hamm.flatten()
hamm[0:rise] = ends[0:rise]
hamm[-rise:] = ends[-rise:]
else:
# normal hamming filter
hamm = np.hamming(hammingWindowLength)
# Normalize:
hamm = hamm / np.sum(hamm)
return hamm
# End of makeHammingWindow_fn
#---------------------------------------------------------
def calculateSlopeAndStd_fn(x, dt, w):
''' given a time-series, do two things:
1. calculate the deriv at each point by simple rise/run (Euler formula)
2. calculate the std of a window (size2*h) at each point, using the slope from (1) to
first tilt the data to roughly slope = 1.
Inputs:
z: np.vector
dt: float
w: int. Window length
Outputs:
slopeX: np.vector
stdX: np.vector
meanX: np.vector
'''
h = int(np.round(w/2)) # Half the window length
slopeX = np.zeros(x.shape)
stdX = np.zeros(x.shape)
meanX = np.zeros(x.shape)
# For efficiency, we take the mean of the first window, then update it at each new point:
for i in range(len(x)):
if i == h + 1: # First point's window
b = np.mean(x[i:i + h])
a = np.mean(x[i-h:i])
if i > h + 1 and i < len(x) - h: # all ensuing points' windows
b = b + (x[i + h] - x[i])/h
a = a + (x[i] - x[i-h])/h
if i > h and i < len(x) - h: # all points' windows (ie happens for both above cases)
slopeX[i] = (b-a)/h
tilted = x[i-h:i+h] - slopeX[i]*np.array(range(-h, h))
stdX[i] = np.std(tilted) # subsampling doesn't speed this up much.
meanX[i] = 0.5 * (b + a)
# Fill in start and end values:
slopeX[0:h + 1] = slopeX[h + 1]
slopeX[-h:] = slopeX[-h - 1]
stdX[0:h + 1] = stdX[h + 1]
stdX[-h:] = stdX[-h - 1]
meanX[0:h + 1] = meanX[h + 1]
meanX[-h:] = meanX[-h - 1]
# account for dt:
slopeX = slopeX/dt
return slopeX, stdX, meanX
# End of calculateSlopeAndStd_fn
#------------------------------------------------------------------
def addGaussianNoise_fn(X, noiseFactors, noiseType = 'normal'):
""" add noise (default gaussian) directly to the trajectory array.
Inputs:
X: n x m np.array. n = number of variables, m = number of timepoints, ie each row is
variable time-course
noiseFactors: np.array of floats, num real vars x 1
noiseType: str, 'normal' or 'uniform'
extraVarsnoiseFactors: float
Outputs:
XwithNoise: n x m np.array
"""
noiseArray = np.tile(noiseFactors, [X.shape[0], 1])
# Add noise to xTrain, scaled as fraction of std dev of x, y, z values over the trajectory:
scalingVals = np.std(X, axis = 0) # for scaling noise. Use only x, y, z
# 'scalingVals' for extra variables currently == 0, so replace these with the mean of others:
scalingVals[scalingVals < 1e-5] = np.mean(scalingVals[scalingVals > 1e-5])
# Add rows for the extra variables:
if noiseType == 'uniform':
noiseToAdd = 2 * noiseArray * np.multiply(np.tile(scalingVals, (X.shape[0], 1)),
-0.5 + np.random.uniform(size = X.shape))
else: # noiseType == 'normal':
noiseToAdd = noiseArray * np.multiply(np.tile(scalingVals, (X.shape[0], 1)),
np.random.normal(size = X.shape))
xNoisy = X + noiseToAdd
return xNoisy
# End of addNoise_fn
#-----------------------------------------------------------------------------------------
def addWhiteNoise_fn(xArray, noiseFactors):
''' Add white noise to trajectory array, using FFT.
Inputs:
xArray: np.array, numTimepoints x numVars
noiseFactors: np.array of floats, num real vars x 1
Outputs:
xNoisy: np.array, real part of ifft of noisy fft signal.
'''
xNoisy = np.zeros(xArray.shape)
for i in range(xArray.shape[1]):
x = xArray[:, i]
xF = np.fft.fft(x)
realSigma = np.std(np.real(xF))
imSigma = np.std(np.imag(xF))
noiseToAdd = noiseFactors[i] * (realSigma * np.random.normal(size=xF.shape) + \
1j * imSigma * np.random.normal(size=xF.shape))
xFNoisy = xF + noiseToAdd
xNoisy[:, i] = np.fft.ifft(xFNoisy)
return np.real(xNoisy)
# End of addWhiteNoise_fn
#--------------------------------------------------------------------------------------
def calcWeightArray_fn(cNew, functionsToUseArray, imputed, coeffWeightsCutoffFactor,
percentileOfImputedValuesForWeights):
"""
Create a weight array to weight the coeffs by size of functional values. Culling will be
based on the weighted coeffs.
We give high weights to functionals that tend to have high values, since we want to allow
them to have lower coeffs without being culled. Functionals that tend to have low values
get low weights, since their effect will be less. We 'normalize' using the median of
all functional imputed values.
Parameters
----------
cNew : np.array of floats. numVars x numFunctionals.
functionsToUseArray : np.array of booleans. numVars x numFunctionals.
imputed : np.array of floats. vector 1 x numFunctionals. Estimated magnitudes of each
functional.
coeffWeightsCutoffFactor : (scalar float or int)
percentileOfImputedValuesForWeights : scalar int
Returns
-------
weightArray : np.array of floats. numVars x numFunctions.
"""
# Reduce the extremes of this vector in each row (ie for each variable), and normalize:
weightArray= np.zeros(cNew.shape)
for i in range(weightArray.shape[0]):
if np.sum(functionsToUseArray[i, :]) > 0:
temp = imputed.copy()
centralVal = np.percentile(temp[functionsToUseArray[i, :]],
percentileOfImputedValuesForWeights,
interpolation='lower')
# Over functionals currently active for this variable.
# Moderate the extremes of this vector:
temp[temp > coeffWeightsCutoffFactor * centralVal] = \
coeffWeightsCutoffFactor * centralVal
temp[temp < centralVal / coeffWeightsCutoffFactor] = \
centralVal / coeffWeightsCutoffFactor
temp = temp * functionsToUseArray[i, :] # Zero the weights of unused functions.
# Normalize (comment: It would be nice if the normalization here penalized variables
# with many active functionals, to enforce sparsity.):
temp = temp / np.median(temp[temp > 0]) # So in the ballpark of 1
weightArray[i, :] = temp
return weightArray
#-------- End of calcWeightArray_fn-----------------
def calculateFftForRegression_fn(x, numFftPoints, fftRegressionTarget):
"""
Create a vector using some form of FFT, to act as a regression target.
Parameters
----------
x : np.array (vector) of floats
numFftPoints : int
fftRegressionTarget : str
Returns
-------
xT : np.array (vector) of floats
"""
x = x - np.mean(x)
xP = np.fft.fft(x) # Use this if fftRegressionTarget == 'complex'
if fftRegressionTarget == 'realOnly':
xP = np.real(xP)
if fftRegressionTarget == 'magnitude':
xP = np.abs(xP)
if fftRegressionTarget == 'power':
xP = pow(np.real(xP), 2)
xP = xP[0:numFftPoints] / np.sum(np.abs(xP[0:numFftPoints])) # normalize
return xP
# ------- End of calculateFftRegressionTarget_fn ----------------------------
def cullAndAssessWhetherToRerun_fn(localCoeffArray, variableNames, functionList,
imputedSizeOfFunctionals, cullingRulesDict, functionsToUseArray,
cullable, inBalanceVarsArray, coeffWeightsCutoffFactor,
percentileOfImputedValuesForWeights):
"""
Given results of post-smoothing sindy or linear fit, see if we can cull any variables or
library functions.
We make an array of booleans that shows which coeffs of the model were non-zero. This array
serves as modelActiveLib, restricting which library functions can be used for each of
the variables. No variables or functions are actually removed. Instead, they are set off-limits
by modifying the boolean array.
Parameters
----------
localCoeffArray : np.array of floats, numVars x numFunctionals
variableNames : vector of strings
functionList : vector of strings
imputedSizeOfFunctionals : np.array of floats
cullingRulesDict : Dict with keys like 'maxAllowedWeightedCoeff'
functionsToUseArray : np.array booleans, numVars x numFunctionals
cullable : np.array booleans, numVars x numFunctionals
inBalanceVarsArray : np.array booleans, numVars x numFunctionals. True = eligible for culling
coeffWeightsCutoffFactor : scalar float
percentileOfImputedValuesForWeights : int
Returns
-------
iterateAgainFlag : Boolean
functionsToUseArray : np.array of booleans, numVars x numFunctions
outputStr : str
minNonZeroVal : float, the minimum weighted coeff (to use as a stopping signal)
"""
numExtraToKill = cullingRulesDict['extraNumFnsCulledPerIter']
minAllowedWeightedCoeff = cullingRulesDict['minAllowedWeightedCoeff']
# Note: these are trivially == 1 and have no effect if 'coeffWeightsCutoffFactor' == 1:
cOld = localCoeffArray # i'th col corresponds to the i'th entry in functionList. j'th
#row corresponds to j'th variable.
cNew = cOld.copy() # we'll zero out entries in this array
fOld = functionsToUseArray.copy() # fOld is for comparison, to determine 'iterateAgainFlag',
# since we'll update functionsToUseArray.
# We could also get fOld from modelActiveLib
# Kill the constant term if it is the only non-zero term:
if cullingRulesDict['setConstantDerivEstimatesToZero']:
for i in range(cNew.shape[0]):
if np.sum(np.abs(cNew[i, 1:])) < 1e-5: # 1e-5 = almostZeroRoundDownThreshold, which
# deals with weird not-quite-zero zeros
cNew[i,0] = 0
# 1. find which vars are non-zero:
zeroedVars = np.sum(cNew, axis = 1) == 0 # col vector, True -> var has been zeroed out.
# 2. Now zero out all functions that involve zeroed-out variables, ie zero out columns that
# contain the var name:
# Note: this has not been updated to use 'cullable'.
for i in range(len(zeroedVars)):
if zeroedVars[i] == True:
varName = variableNames[i]
for j in range(cNew.shape[1]):
if varName in functionList[j]:
cNew[:,j] = 0
# 3. Create a weight array to weight the coeffs by size of functional values.
functionsToUseArray = np.abs(cNew) > 1e-5 # almostZeroRoundDownThreshold
if coeffWeightsCutoffFactor > 1: # Recall coeffWeightsCutoffFactor = 1 -> no weights.
weightArray = calcWeightArray_fn(cNew, functionsToUseArray, imputedSizeOfFunctionals,
coeffWeightsCutoffFactor,
percentileOfImputedValuesForWeights)
else: # Case: we're not weighting the coeffs.
weightArray = np.ones(cNew.shape) / cNew.shape[1]
# weightArray is now ready to multiply cNew.
# 3. Now zero out all functions with coeffs that are too large. Currently never activates (at
# maxAllowed = 50: we can disable this by picking a very large 'maxAllowedWeightedCoeff'
# parameter. inBalanceVarsArray == true prevents culling fnals from vars with low fnal counts.
tooBigStr = ''
# cNewWeighted = cNew * weightArray
# tooBigFnsArray = np.logical_and(np.abs(cNewWeighted) > maxAllowedWeightedCoeff,
# inBalanceVarsArray)
# make string of too-big functionals:
# for i in np.where(np.sum(tooBigFnsArray,axis=1) > 0)[0]: # ie rows/variables with too-small
# # functionals.
# tooBigStr = tooBigStr + variableNames[i] + ': ' + \
# str(np.array(functionList)[tooBigFnsArray[i, :]]) + ' '
# cNew[np.where(tooBigFnsArray)] = 0 # set too-big coeffs to 0
# numTooBigCulledFunctionals = np.sum(tooBigFnsArray.flatten())
# 4. Make an updated boolean array of retained functions. Also cull any functionals with very
# low weighted coeffs:
cNewWeighted = cNew * weightArray
tooSmallFnsArray = np.logical_and.reduce((np.abs(cNewWeighted) < minAllowedWeightedCoeff,
np.abs(cNewWeighted) > 0, inBalanceVarsArray))
cNew[np.where(tooSmallFnsArray)] = 0 # set too-small coeffs to 0
# make string of too-small functionals:
tooSmallStr = ''
for i in np.where(np.sum(tooSmallFnsArray,axis=1) > 0)[0]: # ie rows/variables with too-small
# functionals.
tooSmallStr = tooSmallStr + variableNames[i] + ': ' + \
str(np.array(functionList)[tooSmallFnsArray[i, :]]) + ' '
numTooSmallCulledFunctionals = np.sum(tooSmallFnsArray.flatten())
# 5. see if a variable has been removed:
oldZeroedVars = np.sum(cOld, axis = 1) == 0
variableRemovedFlag = np.sum(zeroedVars) > np.sum(oldZeroedVars)
functionsToUseArray = np.abs(cNew) > 1e-5 # almostZeroRoundDownThreshold # Update to remove
# too small coeffs
# 6. See if an entire function (a column) has been removed:
oldZeroedFns = np.sum(fOld, axis = 0) # Sum the columns
newZeroedFns = np.sum(functionsToUseArray, axis = 0)
functionColumnRemovedFlag = np.sum(newZeroedFns == 0) - np.sum(oldZeroedFns == 0) > 0
# 7. Cull based on weighted coeff size relative to current threshold:
# If we have not removed a functional column, and if the lowest weighted coeff is small enough,
# cull more coeffs.
# Make a new weight array, since we may have removed some functionals:
if coeffWeightsCutoffFactor > 1: # Recall coeffWeightsCutoffFactor = 1 -> no weights.
weightArray = calcWeightArray_fn(cNew, functionsToUseArray, imputedSizeOfFunctionals,
coeffWeightsCutoffFactor,
percentileOfImputedValuesForWeights)
else: # Case: we're not weighting the coeffs.
weightArray = np.ones(cNew.shape) / cNew.shape[1]
cNewWeighted = cNew * weightArray
if np.sum(np.abs(cNewWeighted.flatten())) > 1e-5: # almostZeroRoundDownThreshold:
minNonZeroVal = np.min(np.abs(cNewWeighted)[np.logical_and(cullable,
cNewWeighted != 0)].flatten())
else:
minNonZeroVal = 0
extraFunctionalKilledOffFlag = False
numExtraFunctionalsKilled = 0
extraCullBypassedDueToRemovedFunctionColumnFlag = functionColumnRemovedFlag
# If we removed a function from all vars (ie a column), we're done for this cull. Else see if
# any functionals have coeffs below the current threshold. We ignore the protected fns (ie we
# only consider 'cullable' fns)
culledFnsArray = np.zeros(functionsToUseArray.shape, dtype=bool) # to record functionals
# culled because they were the most below the current threshold (but not including 'too-small'
# fns).
while numExtraToKill > 0 and (functionColumnRemovedFlag == False) and \
np.sum(np.abs((cNewWeighted * inBalanceVarsArray).flatten())) > 1e-5:
# 1e-5 = almostZeroRoundDownThreshold
loc = np.where(np.logical_and.reduce((cullable, np.abs(cNewWeighted) == minNonZeroVal,
inBalanceVarsArray)))
functionsToUseArray[loc[0], loc[1]] = False # Update the boolean functionals array
cNewWeighted[loc[0], loc[1]] = 0 # Update the beta weights array
culledFnsArray[loc[0], loc[1]] = True # Record this culled functional
numExtraFunctionalsKilled += len(loc[0])
# outputStr = outputStr + '; ' + str(functionList[loc[1][0]]) + \
# ' (' + str(variableNames[loc[0][0]] + ')')
extraFunctionalKilledOffFlag = True
# Update 'functionRemovedFlag' accounting for newly-culled function:
newZeroedFns = np.sum(functionsToUseArray > 0, axis = 0)
functionColumnRemovedFlag = np.sum(newZeroedFns == 0) - np.sum(oldZeroedFns == 0) > 0
temp = cNewWeighted * inBalanceVarsArray * cullable
if np.sum(np.abs(temp.flatten())) > 1e-5: # almostZeroRoundDownThreshold
minNonZeroVal = np.min(np.abs(temp[temp != 0].flatten()))
if 'loc' in locals():
numExtraToKill -= len(loc[0])
else:
numExtraToKill -= 1
else: # escape
minNonZeroVal = 0
numExtraToKill = 0
culledStr = ''
# List the culled fns in a str:
for i in np.where(np.sum(culledFnsArray, axis=1) > 0)[0]:
culledStr = culledStr + variableNames[i] + ': ' + \
str(np.array(functionList)[culledFnsArray[i, :]]) + ' '
# 8. If there are any functions still below thisThreshold, we want to rerun without changing
# the threshold, in order to pick them off one by one:
if np.sum(np.abs(cNewWeighted.flatten())) > 1e-5 and \
np.sum(np.logical_and(cullable, cNewWeighted != 0).flatten()) > 0:
newMinNonZeroVal = np.min(np.abs(cNewWeighted)[np.logical_and(cullable,
cNewWeighted != 0)].flatten())
cullableFunctionFlag = True
else:
cullableFunctionFlag = False
# Make a combined too-big, too-small output str:
# if numTooBigCulledFunctionals > 0:
# tooBigStr = '. Culled ' + tooBigStr + ' with weighted coeffs > ' + \
# str(maxAllowedWeightedCoeff) + '. '
if numTooSmallCulledFunctionals > 0:
tooSmallStr = 'Culled ' + tooSmallStr + \
' with weighted coeffs < ' + str(minAllowedWeightedCoeff) + '. '
if numExtraFunctionalsKilled > 0:
culledStr = 'Culled ' + culledStr
else:
culledStr = ''
if extraCullBypassedDueToRemovedFunctionColumnFlag:
culledStr = ' Extra culling step bypassed due to removed function column.'
outputStr = tooBigStr + tooSmallStr + culledStr
# if any new variable, new function, or even one new functional has been removed, we will do
# another cull.
iterateAgainFlag = variableRemovedFlag or functionColumnRemovedFlag or \
extraFunctionalKilledOffFlag or cullableFunctionFlag
# Note: the returned arg 'culledFnsArray' shows the functionals culled in a way (by being
# below the ratcheting threshold) that makes them eligible to be restored. Functionals that
# have too-big or too-small weighted coeffs are not restorable.
return iterateAgainFlag, functionsToUseArray, culledFnsArray, outputStr, \
minNonZeroVal, cullableFunctionFlag
# End of cullAndAssessWhetherToRerun_fn
# -----------------------------------------------------------------------------------------
def smoothData_fn(x, window, smoothType='hamming'):
"""
Smooth each column of an input array using a window.
Parameters
----------
x : np.array of floats, likely numTimepoints x numSomething (variables or functionals)
window : np.array, vector of floats
smoothType : str, eg 'hamming'
Returns
-------
xSmoothed : np.array, size x.shape
"""
xSmoothed = np.zeros(x.shape)
if True: # smoothType == 'hamming': Assume it's always hamming
for i in range(x.shape[1]):
temp = x[:, i]
xSmoothed[:, i] = np.convolve(temp - temp[0], window, mode='same') + temp[0]
# shift time-series to start at 0 for the convolution, to mitigate edge effects
return xSmoothed
# End of smoothLibraryFunctionsForRegression_fn
#-----------------------------------------------------------------------
def parseTimepointsByMagnitudesOfVariables_fn(x, functionsArray, nonConstantIndices, margin,
maxFnValRatio, minNumStartIndsToUse,
removeMarginFlag=True):
"""
Accept or reject timepoints to use in regressions, based on whether the functionals have
widely disparate values or not.
Parameters
----------
x : np.array of floats, numTimepoints x numFunctionals. Each column is time-series of a
functional's values.
functionsArray : np.array of booleans, numVariables x numFunctionals.
nonConstantIndices : list of ints (generated after the functional library is created)
margin : int
maxFnValRatio : float
minNumStartIndsToUse : int
removeMarginFlag : bool
Returns
-------
startIndsToUse : np.array of booleans numVariables x numTimepoints (not counting margins at
either end).
"""
lengthOfPadToIgnore = 0
if removeMarginFlag:
lengthOfPadToIgnore = margin
startIndsToUse = np.zeros((functionsArray.shape[0], x.shape[0] - 2*lengthOfPadToIgnore),
dtype=bool)
for var in range(functionsArray.shape[0]):
if np.sum(functionsArray[var,]) == 0: # Ignore fully culled variables
pass
else:
thisX = x * np.tile(functionsArray[var,], (x.shape[0], 1))
if removeMarginFlag:
thisX = thisX[margin:-margin,] # Ignore outer margins.
# At spaced timepoints, find the median of abs() of non-zero values, and use
# that as an anchor for scaling:
okToUse = np.ones(thisX.shape, dtype=bool)
subsampleRate = 3
for t in range(0, thisX.shape[0], subsampleRate):
v = np.abs(thisX[t,])
m = np.median(v[v != 0])
okToUse[t:t + subsampleRate,] = \
np.logical_and.reduce((v < m * maxFnValRatio, v > m / maxFnValRatio,
functionsArray[var,]))
# 'okToUse' is a boolean array that tells us, for each timepoint, which functionals
# are ok to regress on. We now select the timepoints that are valid for the highest
# number of functionals:
numOkFnsAtEachTimepoint = np.sum(okToUse, axis=1)
# 'removeMarginFlag' = False is a signal that we are testing linear dependence, so we
# wish to use all the columns of functionsArray because we have already subselected
# the functionals of interest.
if removeMarginFlag:
counts = np.zeros((1, int(np.sum(functionsArray[var, nonConstantIndices]))))
else: # case: lin dependence context, use all columns of functionsArray
counts = np.zeros((1, int(np.sum(functionsArray[var, :]))))
for i in range(counts.shape[1]):
counts[0, i] = np.sum(numOkFnsAtEachTimepoint == i + 1) # sum the columns
# to get number of useable timepoints.
# Keep only those timepoints that use all non-constant available fns
# (captured in 'countValToUse'), ie ignore any timepoints where any
# two functions exceed the maxFnValRatio-derived bounds, EXCEPT subject to a
# guaranteed minimum number of timepoints 'minNumStartIndsToUse', enforced in the
# while loop below.
if removeMarginFlag:
countValToUse = np.sum(functionsArray[var, nonConstantIndices])
else: # case: lin dependence context, use all columns of functionsArray
countValToUse = np.sum(functionsArray[var, :])
# fullCountValToUse = countValToUse.copy() # used for diagnostic printout below.
startIndsToUse[var,] = numOkFnsAtEachTimepoint >= countValToUse
# Add a catch to prevent too few startInds:
while np.sum(startIndsToUse[var,]) < minNumStartIndsToUse:
countValToUse = countValToUse - 1
startIndsToUse[var,] = numOkFnsAtEachTimepoint >= countValToUse
# Diagnostic printount:
# print('var ' + str(var) + ': functional count used for selecting timepoints: ' + \
# str(countValToUse) + ' out of ' + str(fullCountValToUse) + \
# ', numTimepoints = ' + str(np.sum(startIndsToUse[var,])) + ' out of ' + \
# str(startIndsToUse.shape[1]))
# 'startIndsToUse' is an array of booleans, which say whether a certain
# timepoint (starting at 'margin' and ending at {len(tTrain) - margin}) is eligible
# to use.
return startIndsToUse
# End of parseTimepointsByMagnitudesOfVariables_fn
# ---------------------------------------------------------------------------
def calculateDerivativesFromData_fn(x, t, method='centralDifference'):
"""
Given time-series of variables x, calculate the derivatives of each variable.
This function differs from 'estimateDerivatives_fn' below by (a) accepting arrays (ie multiple
time-series); (b) handling endpoints; (c) not returning weights. Could be combined.
NOTE: Currently only allows central difference method
Parameters
----------
x: np.array of floats, numTimepoints x numVars
t : np.vector of floats. The timepoints
Returns
-------
df : np.array of floats, numTimepoints x numFunctionals
"""
t = t.reshape(-1,1)
# if True method == 'centralDifference':
dfMiddle = (x[2:, :] - x[0:-2, :]) / np.tile(t[2:] - t[0:-2], (1, x.shape[1]))
dfFirst = (x[1, :] - x[0, :]) / (t[1] - t[0])
dfLast = (x[-1, :] - x[-2, :]) / (t[-1] - t[-2])
df = np.vstack((dfFirst, dfMiddle, dfLast))
return df
# End of calculateDerivativesFromData_fn
#------------------------------------------------------------------
def calculateDerivativesFromModel_fn(x, coeffs, fVals):
"""
Given a model with coefficients for functionals, calculate the derivatives based on this model,
ie return what the model thinks the derivatives are.
This is different from 'calculateDerivativesFromData_fn' above, which returns the central difference
derivative of the time-series.
Parameters
----------
x : np.array of floats, numTimepoints x numVars
coeffs : np.array of floats, numVars x numFunctionals
fVals : np.array of floats, numTimepoints x numFunctionals
Returns
-------
xDotByModel : np.array of floats, numTimepoints x numVars
"""
xDotByModel = np.zeros(x.shape)
for i in range(x.shape[1]):
xDotByModel[:, i] = np.sum(fVals * np.tile(coeffs[i, :], (fVals.shape[0], 1)), axis=1)
return xDotByModel
# End of calculateDerivativesFromModel_fn
#----------------------------------------------------------------------
def estimateDerivatives_fn(xS, startInds, numDtStepsForDeriv, pointWtsForThisVar, dt):
"""
For a vector, calculate the target derivative (as in dx/dt = rise/run) that we hope to match
when we regress on the library functionals. The target derivative is calculated using the
variables' time-series. Also return the weights for each element of the target rise, for use
in the regression.
NOTE: We require values in xS before the first startInd and beyond the last start ind
Parameters
----------
xS : np.array of floats, (column vector same length as timepoints eg tTrain), the
pre-processed time-series of one variable.
startInds : np.array (vector of ints). Indices of timepoints.
numDtStepsForDeriv : scalar int
pointWtsForThisVar : np.array (vector of floats, same length as timepoints eg tTrain).
dt : scalar float. The length of the simulation timestep
Returns
-------
derivEstimate : np.array (vector of floats). size = startInds.shape
weights : np.array (vector of floats). size = startInds.shape
"""
timeStep = dt*numDtStepsForDeriv # float (ie a measure of time, not an index of timepoints)
# 4th order approx to derivative:
m2Inds = startInds - 2*numDtStepsForDeriv # indices
m1Inds = startInds - 1*numDtStepsForDeriv
p2Inds = startInds + 2*numDtStepsForDeriv
p1Inds = startInds + 1*numDtStepsForDeriv
derivEstimates = \
(xS[m2Inds] - 8*xS[m1Inds] + 8*xS[p1Inds] - xS[p2Inds]) / (12*timeStep)
# The weight for each deriv estimate combines the timepoint values used:
weights = \
pointWtsForThisVar[m2Inds] + 8 * pointWtsForThisVar[m1Inds] + \
8 * pointWtsForThisVar[p1Inds] + pointWtsForThisVar[p2Inds]
weights = weights / sum(weights)
return derivEstimates, weights
# End of estimateDerivatives_fn
#--------------------------------------------------------
def defineXAndYForLinRegressionOnEvolution_fn(xT, functionalVals, startInds, numEvolveSteps,
pointWtsForThisVar, dt):
"""
Given start inds, create a target y to be fitted by a linear function of functionals by:
1. define a set of subtargets subY which are individual estimates of derivatives over short
hops, using a sequence of equally-spaced points.
2. define an 'evolution' from the first to the last point by Euler stepping. It is not
a real evolution because the input values of the points at each step are from the given time-
series, not the prediction from the previous timepoint. We do this to maintain a linear
relationship between the functionals and the target value:
(x[t + n] - x[t])/dt = summation( beta_i * (f_i[t] + f_i[t+1] + ... + f_i[t+n-1]) ).
So X = summations of the functionals over the n timepoints; and y = the difference between
the two end timepoints, divided by the timestep.
Parameters
----------
xT : np.array of floats, numTimepoints x numVariables (the pre-processed time-series)
functionalVals : np.array of floats, numTimepoints x num active functionals for this
variable (the value of the active library functionals at each timepoint).
startInds : np.array (vector of ints)
numEvolveSteps : scalar int. How far to evolve to get regression target.
pointWtsForThisVar : np.array (vector of floats, length = numTimepoints)
dt : scalar float
Returns
-------
X : np.array of floats, len(startInds) x num active functionals
y : np.array (vector of floats, same size as 'startInds')
weights : np.array (vector of floats, same size as 'startInds')
"""
wtsPerPoint = np.zeros((numEvolveSteps, len(startInds)))
evolveSteps = np.zeros((len(startInds), functionalVals.shape[1], numEvolveSteps))
for i in range(numEvolveSteps):
evolveSteps[:, :, i] = functionalVals[startInds + i, :]
wtsPerPoint[i, :] = pointWtsForThisVar[startInds + i]
X = np.sum(evolveSteps, axis=2) # Sum over the evolve steps. The result is
# len(startInds) x numFunctionals.
y = (xT[startInds + 1] - xT[startInds]) / dt
weights = np.median(wtsPerPoint, axis=0)
weights = weights / np.sum(weights) # so weights sum to 1
return X, y, weights
# End of defineXAndYForLinRegressionOnEvolution_fn
#-----------------------------------------------------------
def combineSegmentCoeffs_fn(coeffsPerSeg, printDiagnosticsFlag, varName, fnalName,
snrThreshold, minNumSegmentResultsToUse, outputFilename):
"""
Given a vector of coefficients (found by regressing on many segments, or subsets of
timepoints) for some {variable, functional} pair, return (i) a median value, and (ii) a
measure of reliability (stdDev/median). We do this by rejecting outliers until either a
minimum number of coeffs are left, or the stdDev/median is within some bound.
Parameters
----------
coeffsPerSeg : np.array (vector of floats). length = number of segments that were used in
regressions.
printDiagnosticsFlag : scalar boolean
varName : str
fnalName : str
snrThreshold : scalar float
minNumSegmentResultsToUse : scalar int (could be a float)
outputFilename : str
Returns
-------
coeff : scalar float
stdOverMedian : scalar float
"""
# Decide what coeff value to carry forward:
# 1. If the SNR is low, just use mean or median.
segsToUse = coeffsPerSeg != -100 # This should not be empty.
this = coeffsPerSeg[segsToUse].copy()
m = np.median(this)
if m == 0: # to catch an edge case
m = np.mean(this)
s = np.std(this)
# 2. Start removing outliers until snr is low:
while s/np.abs(m) > snrThreshold and len(this) > minNumSegmentResultsToUse:
# Eliminate values and retry:
dist = np.abs(this - m)
this = this[dist < max(dist)]
m = np.median(this)
if m == 0:
m = np.mean(this)
s = np.std(this)
# Assign the final coeffs for this {variable, function} pair:
coeff = np.median(this)
stdOverMedian = np.abs(s / m) # We'll cull functions based on high variability.
# (Diagnostic) Print out vector of segment coeffs if not too many:
if printDiagnosticsFlag: # ie few enough fns in library that we can print the outcome:
console = sys.stdout
with open(outputFilename, 'a') as file:
print(varName + "', " + fnalName + ' values: ' + \
str(np.round(coeffsPerSeg,2)) + ', median ' + \
str(np.round(np.median(coeffsPerSeg), 2)) + ', std ' + \
str(np.round(np.std(coeffsPerSeg), 2)) + '. Final median = ' + \
str(np.round(coeff, 2)) + ', final stdOverMedian = ' + \
str(np.round(stdOverMedian, 2)), file=file)
sys.stdout = console
file.close()
return coeff, stdOverMedian
# End of combineSegmentCoeffs_fn
#------------------------------------------------------------
def cullBasedOnStdOverMedian_fn(coeffArray, coeffsBySeg, stdOverMedianArray, functionsToUseArray,
startIndsToUse, segStartInds, segEndInds, pointWeights,
functionalVals, p):
"""
Cull functions whose fitted coeffs had high stdDev/Median, ie high variability over the
various segments. This has two steps:
1. Cull functionals based on high variability. Do not cull functionals with relatively high
median (weighted) coefficients.
2. Do a new regression, to calculate new coeffs for the remaining functionals.
NOTE: This method gave bad results (though it is the culling criterion used in REF).
Parameters
----------
coeffArray : np.array of floats, numVariables x numFunctionals. The previous coefficients.
coeffsBySeg : np.array of floats, numVariables x numFunctionals x numSegments. The coefficients
from the new regressions on each segment, to be combined in this function.
stdOverMedian : np.array of floats, numVariables x numFunctionals
functionsToUseArray : np.array of booleans, numVariables x numFunctionals
startIndsToUse : np.array of booleans, (numTimepoints - 2*margin) x numVars
pointWeights : np.array of floats, numTimepoints x numVars
functionalVals : np.array of floats, numTimepoints x numFunctionals
p : dict of params, including:
xTrain : np.array of floats, numTimepoints x numVars
minStdOverMedianThreshold : scalar float
numFunctionsToTriggerCullBasedOnStdOverMean : scalar int
maxNumToRemoveByStdPerCull : scalar int
numDtStepsForDeriv : scalar int
dt : scalar float (the timestep)
regressOnFftAlpha : scalar float (the fraction of regression that is on FFTs)
fftXDotTrainTarget : np.array (numFftPoints x numVars)
fftLibFns : np.array (numFftPoints x numFunctionals)
variableNames : list (np.array?) of str
functionList : list (np.array?) of str
margin : int
weightTimepointsFlag : bool
fftRegressionTarget : str
snrThreshold : scalar float
outputFilename : str
Returns
-------
postRegressionCoeffs : np.array of floats, , numVariables x numFunctionals
functionsToUseArray : np.array of booleans, numVariables x numFunctionals
"""
# Loop through the variables, handling each one independently:
for v in range(functionsToUseArray.shape[0]):
# Check if any functions have high variance and also lower magnitude for this variable,
# and also if there are few enough functions left to start this type of cull:
# Calculate stdOverMedian threshold:
tempStd = stdOverMedianArray[v, functionsToUseArray[v, :]]
stdThreshold = np.median(tempStd) + 1 * np.std(tempStd)
stdThreshold = max(stdThreshold, p['minStdOverMedianThreshold'])
# Calculate coeff median threshold:
tempMag = np.abs(coeffArray[v, functionsToUseArray[v, :]])
magThreshold = np.median(tempMag)
pointWtsForThisVar = pointWeights[:, v]
# If there are no active functions, stdThreshold and medianThreshold = np.nan
# Identify noisy functionals: high variability and low magnitude:
noisyFnInds = np.where(np.logical_and(tempStd > stdThreshold, tempMag < magThreshold))[0]
numActiveFns = np.sum(functionsToUseArray[v, :])
if len(noisyFnInds) > 0 and numActiveFns <= \
p['numFunctionsToTriggerCullBasedOnStdOverMean'] and numActiveFns > 2:
# Cull only an allowable number of these. Argsort in descending order, then
# cull the first several:
inds = (np.argsort(stdOverMedianArray[v, noisyFnInds]))[-1: :-1]
noisyFnInds = noisyFnInds[inds[0:p['maxNumToRemoveByStdPerCull']]]
# (Diagnostic) Output to console:
console = sys.stdout
with open(p['outputFilename'], 'a') as file:
print('Culled ' + str(np.array(p['functionList'])[noisyFnInds]) + \
' from variable ' + p['variableNames'][v] + \
' due to coeff stdOverMedian = ' + \
str(np.round(tempStd[noisyFnInds], 2)) + ' > ' + \
str(np.round(stdThreshold, 2)) + ' and coeff mag = ' + \
str(np.round(tempMag[noisyFnInds], 2)) + ' < ' + \
str(np.round(magThreshold, 2)) + '. Re-running linear regression.',file=file)
sys.stdout = console
file.close()
# Update functionsToUseArray to cull noisy functions for this variable:
functionsToUseArray[v, noisyFnInds] = False
# Zero out coeffs for this variable. The relevant ones will be replaced using the new
# regressions:
coeffsBySeg[v, :, :] = -100
coeffArray[v, :] = 0
# Pick new segments and regress on each in turn. Use the same 'startIndsToUse' array.
startIndsVarI = startIndsToUse[v,].copy() # booleans
# Convert to indices of timepoints:
startIndsVarI = np.where(startIndsVarI == True)[0] + p['margin']
for seg in range(p['numRegressionSegments']):
# Pick a subset of startIndsVarI for this segment:
if p['useRandomSegmentsFlag']: # Case: Randomly choose points to regress on.
numPointsPerSegment = \
int(len(startIndsVarI) / p['numRegressionSegments'] * \
(1 + 2 * p['overlapFraction']))
startIndIndices = np.sort(np.random.choice(range(len(startIndsVarI)),
numPointsPerSegment, replace=False))
startInds = startIndsVarI[startIndIndices]
else: # Case: Use sequential segments
startInds = startIndsVarI[np.logical_and(startIndsVarI >= segStartInds[seg],
startIndsVarI <= segEndInds[seg])]
# Now do regression, if there are startInds in this segment:
functionInds = np.where(functionsToUseArray[v, :] == True)[0]
if len(startInds) > 0 and len(functionInds) > 0: # Case: There are some startInds.
# Define the target rise based on the derivative approximation:
if p['regressionTarget'] == 'estimated derivatives': # Currently always true
y, weights = \
estimateDerivatives_fn(p['xTrain'][:, v], startInds,
p['numDtStepsForDeriv'],
pointWtsForThisVar, p['dt'])
# Extract the relevant functions (columns):
X = functionalVals[startInds, :].copy()
X = X[:, functionInds]
if p['weightTimepointsFlag']:
sampleWeights = weights
else:
sampleWeights = np.ones(weights.shape) / len(weights) # uniform, sum to 1
# Maybe also regress on fft(xDotTrain) as targets. There are many
# fewer regression points in the fft target: 50 vs. up to 5000 for the
# usual raw derivs, so use the sample weights to balance this out.
if p['regressOnFftAlpha'] > 0:
rawYAlpha = 1 - p['regressOnFftAlpha']
yFftOfXDot = p['fftXDotTrainTarget'][:, v]
y = np.hstack((y, yFftOfXDot )) # Stack the targets
XForFft = p['fftLibFns'][:, functionInds].copy()
XForFft[np.where(np.isnan(XForFft))] = 0 # since fft of constant == nan
X = np.vstack((X, XForFft))
numFftPoints = len(yFftOfXDot)
sampleWeights = \
np.hstack((rawYAlpha * sampleWeights, p['regressOnFftAlpha'] * \
np.ones(numFftPoints) / numFftPoints))
# Finally ready to do the regressions on the segment 'seg':
# Special case: If we are regressing on complex FFT, we need to use lstsq:
if p['fftRegressionTarget'] == 'complex' and p['regressOnFftAlpha'] > 0:
w = sampleWeights.reshape(-1, 1)
betas = lstsq(np.sqrt(w) * X, np.sqrt(w) * y.reshape(-1, 1), rcond=-1)[0]
betas = np.real(betas) # This may be very important and harmful.
coeffsBySeg[v, functionInds, seg] = betas.flatten()
else:
linReg = LinearRegression()
linReg = linReg.fit(X, y, sample_weight=sampleWeights)
coeffsBySeg[v, functionInds, seg] = linReg.coef_
# Regressions have now been done on each segment for variable v.
# For this variable, process the collections of coeffs on the different
# segments to get a single set of coeffs.
prs = coeffsBySeg.copy()
# We are still in variable 'i'.
for j in range(len(p['functionList'])):
if functionsToUseArray[v, j]: # Case: this function is in use for this
# var, so we need to calculate a single coeff.
coeff, dummy = \
combineSegmentCoeffs_fn(prs[v, j, :], False, p['variableNames'][v],
p['functionList'][j], p['snrThreshold'],
p['minNumSegmentResultsToUse'],
p['outputFilename'])
# Output to file, not console
coeffArray[v, j] = coeff
else: # Case: this functional has been culled for this variable.
pass
# All functionals have new coeffs for this variable i, after 2nd fitting
# without the functionals culled due to high stdDev / median.
return coeffArray, functionsToUseArray
# End of cullBasedOnStdOverMedian_fn
#-------------------------------------------------------------------------------
def printWeightedCoeffModel_fn(coeffArray, functionsToUseArray, weightArray, variableNames,
functionList, outputFilename):
"""
Print out a version of the sindy model that shows the weighted coefficients used when culling
functionals. These weights tend to inflate the coefficients of functionals with relatively high
values, and decrease the coefficients of functionals with relatively low values, because a
functional with high values can tolerate a lower coefficient and still have the same impact as
a functional with low values but a higher coefficient.
Parameters
----------
coeffArray : np.array, floats numVars x numFunctionals
functionsToUseArray : np.array of booleans, numVariables x numFunctionals
weightArray : np.array of floats, numVariables x numFunctionals
variableNames : list of str
functionList : list of str
outputFilename : str
Returns
-------
None.
"""
weightedCoeffArray = weightArray * coeffArray
this = printModel_fn(weightedCoeffArray, variableNames, functionList)
console = sys.stdout
with open(outputFilename, 'a') as file:
print('(Weighted coefficients:) \n' + this + '\n', file=file)
file.close()
sys.stdout = console
# End of printWeightedCoefficientModel_fn
#--------------------------------------------------------------------------
def evolveModel_fn(coeffArray, recipes, initConds, timepoints):
"""
Evolve odes using solve_ivp.
Because odeint can blow up and treat '0' coefficients as non-zero, we add some fuss to prevent
this in case it can happen with solve_ivp. Method:
Make a new vector of starting values, with culled variables values set to 0. Use this to
simulate the system. Then tack on constant values for the culled variables. This approach
assumes that if a variable has been zeroed out, then all functionals involving that variable
have been zeroed.
Parameters:
----------
coeffArray : np.array of floats, numVars x numFunctionals. Coeffs for the model to be evolved.
recipes : list of lists, length = numFunctionals. The i'th list gives the indices of the
variables to be multiplied to make the i'th fnal.
initConds : np.array of floats, vector 1 x numVars
timepoints : np.array of floats, vector
Returns:
-------
xEvolved : np.array of floats. numVars x len(timepoints)
"""
# The function used by solve_ivp:
def odeFn(t, s, coef, recipes):
"""
Return the values of s at the next step:
Parameters
----------
t : implicit variable
s : np.array of floats, vector with len = numVars
coef : np.array of floats, numVars x numFunctionals
recipes : list of lists, each list may be an int (or np.int64), or a list of ints
Returns
-------
df : np.array of floats, vector with len = numVars
"""
coef = np.array(coef)
coef = coef.reshape(len(s), -1)
fnals = np.zeros(len(recipes))
for i in range(len(fnals)):
r = recipes[i]
if isinstance(r, (int, np.integer)):
if r == -1: # -1 means constant term
fnals[i] = 1
else:
fnals[i] = s[r]
else:
temp = 1
for j in range(len(r)):
temp = temp * s[r[j]]
fnals[i] = temp
df = np.zeros(s.shape)
for i in range(len(df)):
df[i] = np.dot(fnals, coef[i, ])
return df
method = 'LSODA' # 'RK45'. For stiff equations: ‘Radau’ or ‘BDF’
startEnd = [timepoints[0], timepoints[-1]]
argList = []
argList.append(coeffArray)
argList.append(recipes)
# Fussing to prevent possible blow-ups of zeroed-out variables:
# First remove any variables that have all zero coefficients:
temp = np.abs(coeffArray) > 1e-5
zeroedOutVars = np.where(np.sum(temp, axis=1) == 0)[0] # The culled variables.
# Make a set of starting data with zeros in the culled variables:
initCondsCulledVarsZeroed = initConds.copy()
if len(zeroedOutVars) > 0:
initCondsCulledVarsZeroed[zeroedOutVars] = 0
xEvolved = np.zeros((len(timepoints), len(initConds))) # Initialize
sol = solve_ivp(odeFn, t_span=startEnd, y0=initCondsCulledVarsZeroed, method=method,
t_eval=timepoints, args=argList)
if sol.success:
xEvolved = (sol.y).transpose()
else:
print('Error notice: solve_ivp failed.')
# Fill in the constant vars:
if len(zeroedOutVars) > 0:
xEvolved[:, zeroedOutVars] = np.tile(initConds[zeroedOutVars], (len(timepoints), 1))
return xEvolved # could also return sol.t
# End of evolveModel_fn
#-------------------------------------------------------------------------
def generateVariableEnvelopes_fn(xData, numInWindow, dt, shrinkFactor=1):
"""
For each point in a time-series, find the max and min values in a neighborhood for each
variable.
Parameters
----------
xData : np.array of floats, numTimepoints x numVariables
numInWindow : scalar int
dt : scalar float
shrinkFactor : scalar float >= 1
Returns
-------
varLocalMax: np.array of floats, size xData.shape
varLocalMin: np.array of floats, size xData.shape
"""
# If required, shrink data towards local mean values:
if shrinkFactor > 1:
for i in range(xData.shape[1]):
slopeX, stdX, meanX = calculateSlopeAndStd_fn(xData[:, i], dt, numInWindow)
xData[:, i] = (xData[:, i] - meanX) / shrinkFactor + meanX
half = int(np.floor(numInWindow / 2))
varLocalMax = np.zeros(xData.shape)
varLocalMin = np.zeros(xData.shape)
for i in range(xData.shape[0]): # loop over timepoints in this variable
startInd = max(0, i - half)
endInd = min(xData.shape[0], i + half)
varLocalMax[i, :] = np.max(xData[startInd:endInd, :], axis=0)
varLocalMin[i, :] = np.min(xData[startInd:endInd, :], axis=0)
return varLocalMax, varLocalMin
# End of generateVariableEnvelopes_fn
#------------------------------------------------------------------------
def calculateFiguresOfMerit_fn(x, xTrue, fftPowerTrue, localMin, localMax, stdDevVector,
meanVector, medianVector, fomTimepoints, maxPhaseShift):
"""
Evolve a model and calculate various figures of merit.
NOTE: We assume that xEvolved includes only timepoints in 'fomTimepointInds'. For
'inEnvelopeFoM' we take the max over small phase shifts of the whole time-series.
Parameters
----------
x : np.array of floats, numTimepoints x numVariables
xTrue : np.array of floats, numTimepoints x numVariables
fftPowerTrue : np.array of floats, numFftPoints x numVariables
localMin : np.array of floats, numTimepoints x numVariables
localMax : np.array of floats, numTimepoints x numVariables
stdDevVector : np.array of floats, numTimepoints x numVariables
meanVector : np.array of floats, numTimepoints x numVariables
medianVector : np.array of floats, numTimepoints x numVariables
fomTimepoints : np.array (list?) of ints
maxPhaseShift : scalar int. Must be >= 1
Returns
-------
fomDict: dict
"""
if len(x.shape) == 1: # Case: 1-dim with shape (val, ) rather than (val, 1)
x = np.expand_dims(x, 1)
phaseShifts= np.array(range(-maxPhaseShift, maxPhaseShift, 3)) # hop by 3s for speed
first = maxPhaseShift # Used to prevent overshooting.
final = x.shape[0] - maxPhaseShift # Ditto.
fomDict = dict()
inBoundsFoM = np.zeros(x.shape[1])
inEnvelopeFoM = np.zeros(x.shape[1])
inEnvPhaseShiftVals = np.zeros(len(phaseShifts))
globalMin = np.min(localMin, axis=0)
globalMax = np.max(localMax, axis=0)
for i in range(x.shape[1]):
inBoundsFoM[i] = \
np.sum(np.logical_and(x[:, i] > globalMin[i],
x[:, i] < globalMax[i])) / len(fomTimepoints)
for j in range(len(phaseShifts)):
pS = phaseShifts[j]
inEnvPhaseShiftVals[j] = \
np.sum(np.logical_and(x[first + pS:final + pS, i] > \
localMin[first:final, i],
x[first + pS:final + pS, i] < \
localMax[first:final, i])) / len(fomTimepoints)
inEnvelopeFoM[i] = max(inEnvPhaseShiftVals)
# Statistics:
temp = np.std(x, axis=0)
stdDevFoM = (temp - stdDevVector) / stdDevVector
stdDevFoM[stdDevFoM > 100] = 100 # To avoid uselessly big numbers
temp = np.mean(x, axis=0)
meanFoM = (temp - meanVector) / meanVector
meanFoM[meanFoM > 100] = 100 # To avoid uselessly big numbers
meanFoM[meanFoM < -100] = -100 # To avoid uselessly big numbers
temp = np.median(x, axis=0)
medianFoM = (temp - medianVector) / medianVector
medianFoM[medianFoM > 100] = 100 # To avoid uselessly big numbers
medianFoM[medianFoM < -100] = -100 # To avoid uselessly big numbers
# Correlation of FFT:
numFftPoints = fftPowerTrue.shape[0]
fftPower = np.zeros([numFftPoints, x.shape[1]])
fftCorrelationFoM = np.zeros(x.shape[1])
for i in range(x.shape[1]):
x1 = x[:, i].copy()
x1 = x1 - np.mean(x1)
xP = pow(np.real(np.fft.fft(x1)), 2) # power spectrum of fft
temp = xP[0:numFftPoints] / np.sum(xP[0:numFftPoints])
temp[np.isnan(temp)] = 0
fftPower[:, i] = temp
# Handle case that a var is identically 0:
if not np.isnan(np.sum(fftPowerTrue[:, i])):
fftCorrelationFoM[i] = np.dot(fftPower[:, i], fftPowerTrue[:, i]) / \
np.dot(fftPowerTrue[:, i], fftPowerTrue[:, i]) # Normalize
else:
fftCorrelationFoM[i] = 0
# Correlation of histograms:
numHistogramBins = 100
histogramRange = np.array((np.min(xTrue, axis=0), np.max(xTrue, axis=0))) # 2 x numVars
theseHistograms = np.zeros([numHistogramBins, x.shape[1]])
theseHistogramBins = theseHistograms.copy()
histogramCorrelationFoM = np.zeros(x.shape[1])
for i in range(x.shape[1]):
histTrue = np.histogram(xTrue[:, i], bins=numHistogramBins)[0]
temp = np.histogram(x[:, i], bins=numHistogramBins,
range=(histogramRange[0, i], histogramRange[1, i]))
theseHistograms[:, i] = temp[0]
theseHistogramBins[:, i] = temp[1][0:-1]
histogramCorrelationFoM[i] = np.dot(histTrue, theseHistograms[:, i]) / \
np.dot(histTrue, histTrue)
# # Print to console:
# print('inBoundsFoM = ' + str(np.round(inBoundsFoM,2)) + \
# ', inEnvelopeFoM = ' + str(np.round(inEnvelopeFoM, 2)) + \
# ', stdDevFoM = ' + str(np.round(stdDevFoM, 2)) + \
# ', meanFoM = ' + str(np.round(meanFoM, 2)) + \
# ', medianFoM = ' + str(np.round(medianFoM, 2)) + \
# ', fftCorrelationFoM = ' + str(np.round(fftCorrelationFoM, 2)) + \
# ', histogramCorrelationFoM = ' + str(np.round(histogramCorrelationFoM, 2)) + '\n')
# Bundle FoMs into dict for return:
fomDict['inBoundsFoM'] = inBoundsFoM
fomDict['inEnvelopeFoM'] = inEnvelopeFoM
fomDict['stdDevFoM'] = stdDevFoM
fomDict['meanFoM'] = meanFoM
fomDict['medianFoM'] = medianFoM
fomDict['fftCorrelationFoM'] = fftCorrelationFoM
fomDict['fftPower'] = fftPower
fomDict['histogramCorrelationFoM'] = histogramCorrelationFoM
fomDict['histograms'] = theseHistograms
fomDict['histogramBins'] = theseHistogramBins
return fomDict
# End of calculateFiguresOfMerit_fn
#--------------------------------------------------------------
def findSpansOfFunctionals_fn(fnArray, fnVals, fnValsNoisy, wts, includeConstantFlag, p):
"""
Find which functionals are in the linear span of the final selected functionals (per variable).
Return R-squared values for each {functional, variable} pair (each variable has a few
selected functionals).
Parameters
----------
fnArray : np.array of booleans, numVars x numFunctionals in library. Shows which functionals
were used for each variable.
fnVals : np.array, numTimepoints (used) x numFunctionals. Gives values of functionals at
timepoints, using the smoothed time-series
fnValsNoisy : np.array, numTimepoints (used) x numFunctionals. Gives values of functionals at
timepoints, but using the post-added noise, pre-smoothing time-series.
wts : np.array, numTimepoints (used) x numVars. Gives weights of timepoints
includeConstantFlag : bool
p : dict of miscellaneous parameters, including:
functionList : list of str
variableNames : list of str
outputFilename : str
plottingThreshold : float
windowLengthForFomEnvelopes : int
dt : float
margin : int
maxFnValRatio : float
minNumStartIndsToUse : int
Returns
-------
rSq : np.array of floats, size fnArray.shape. Gives the R-squared from linear fits of
each functional using just the selected functionals (per variable).
betaZeros : np.array of floats, size fnArray.shape. Gives .intercept_ of each fit
betas : np.array of lists, array has size fnArray.shape. Gives the .coef_ of each fit
"""
betaZeros = np.zeros(fnArray.shape) # to save the intercepts
betas = np.empty(fnArray.shape, dtype=object) # to save the coefficients
rSq = np.zeros(fnArray.shape) # to save R-squared values
lrSp = LinearRegression(fit_intercept=includeConstantFlag)
for v in range(fnArray.shape[0]):
if np.sum(fnArray[v, :]) == 0:
console = sys.stdout
with open(p['outputFilename'], 'a') as file:
print(p['variableNames'][v] + ' has no functionals left in sparse library.',
file=file)
sys.stdout = console
file.close()
else: # case: there are functionals in this variable's sparse library
basis = np.where(fnArray[v, :])[0]
basisStr = str(np.array(p['functionList'])[basis]).replace('[','').replace(']','')
X = fnVals[:, fnArray[v, :]] # The values of the selected functionals
for i in range(fnArray.shape[1]): # num functionals
if fnArray[v, i]:
rSq[v, i] = 1
else:
y = fnVals[:, i]
# Exclude any timepoints from the regression with very large differences in
# functional values used as features:
miniFnArray = np.ones((1, X.shape[1]), dtype=bool)
removeMarginFlag = False # since for FoM timepoint assessment, we don't ignore
# a margin at each end.
nonConstantIndices = np.where(np.array(p['functionList']) != '1')[0]
goodInds = \
parseTimepointsByMagnitudesOfVariables_fn(X.copy(), miniFnArray,
nonConstantIndices, p['margin'],
p['maxFnValRatio'],
p['minNumStartIndsToUse'],
removeMarginFlag)
# goodInds is 1 x numTimepoints vector
theseWts = wts[:, v].copy()
theseWts[np.logical_not(goodInds.flatten())] = 0
# theseWts = theseWts / np.sum(goodInds) # normalize the sum
lrSp = lrSp.fit(X, y, sample_weight=theseWts)
rSq[v, i] = lrSp.score(X, y, sample_weight=theseWts)
betaZeros[v, i] = lrSp.intercept_
betas[v, i] = lrSp.coef_
# plot functionals that are somewhat well-fit:
showNoiseEnvelopesFlag = True
if rSq[v, i] > p['plottingThreshold'] and rSq[v, i] < 1:
# < 1 to ignore basis functionals
yHat = lrSp.predict(X)
plt.figure()
plt.title('var = ' + p['variableNames'][v] + ', basis = ' + \
basisStr + '\n' + 'functional = ' + \
p['functionList'][i] + ', R_sq = ' + str(np.round(rSq[v, i], 2)),
fontsize=14, fontweight='bold')
plt.scatter(np.array(range(len(y))), y, s=3, c='k',
label='functional values')
plt.scatter(np.array(range(len(y))), yHat, s=3, c='r',
label='fit values')
if showNoiseEnvelopesFlag:
slopeY, stdY, meanY = \
calculateSlopeAndStd_fn(fnValsNoisy[:, i], p['dt'],
p['windowLengthForFomEnvelopes'])
plt.plot(meanY + 2 * stdY, 'g', label='two std dev of noisy data')
plt.plot(meanY - 2 * stdY, 'g')
plt.xlabel('FoM timepoints', fontsize=14, fontweight='bold')
plt.ylabel('functional value', fontsize=14, fontweight='bold')
plt.legend(fontsize=14)
return rSq, betaZeros, betas
# End of findSpansOfFunctionals_fn
#---------------------------------------------------------------------
def findSpansOfFunctionalsLeaveOneOut_fn(fnArray, fnVals, fnValsNoisy, wts, includeConstantFlag,
p):
"""
Considering the retained functionals (per variable), find which functionals are in the linear
span of the other functionals (leave-one-out).
Return rR-squared values for each {functional, variable} pair (each variable has a few
selected functionals).
Parameters
----------
fnArray : np.array of booleans, numVars x numFunctionals in library. Shows which functionals
are active for each variable.
fnVals : np.array, numTimepoints (used) x numFunctionals. Gives values of functionals at
timepoints, using the post-smoothed time-series.
fnValsNoisy : np.array, numTimepoints (used) x numFunctionals. Gives values of functionals at
timepoints, using the post-added noise and pre-smoothing time-series.
wts : np.array, numTimepoints (used) x numVars. Gives weights of timepoints
includeConstantFlag : bool
p : dict of miscellaneous parameters, including:
functionList : list of str
variableNames : list of str
outputFilename : str
windowLengthForFomEnvelopes : int
dt : float
margin : int
maxFnValRatio : float
minNumStartIndsToUse : int
plottingThreshold : float
Returns
-------
rSq : np.array of floats, size fnArray.shape. Gives the R-squared from linear fits of
each functional using just the selected functionals (per variable).
betaZeros : np.array of floats, size fnArray.shape. The {i,j}th entry is the intercept for the
fit of the j'th functional using the active functionals, for the i'th variable.
betas : np.array of objects, size fnArray.shape. The {i,j}th object is the vector of beta
coefficients found by fitting the j'th functional using the active functionals, for
the i'th variable. If the j'th functional is not active, the object = []. Else its
length = # active functionals for the variable.
"""
betaZeros = np.zeros(fnArray.shape) # to save .intercept_
betas = np.empty(fnArray.shape, dtype=object) # to save .coef_
rSq = np.zeros(fnArray.shape)
lrSp = LinearRegression(fit_intercept=includeConstantFlag)
for v in range(fnArray.shape[0]): # loop over numVars
if np.sum(fnArray[v, :]) <= 1:
console = sys.stdout
with open(p['outputFilename'], 'a') as file:
print(p['variableNames'][v] + ' has <= 1 functional left in library.', file=file)
sys.stdout = console
file.close()
else: # case: there are functionals in this variable's sparse library
inds = np.where(fnArray[v, :]==True)[0]
for i in inds: # num functionals
y = fnVals[:, i]
others = inds[inds != i].copy()
basisStr = str(np.array(p['functionList'])[others]).replace('[','').replace(']','')
X = fnVals[:, others].copy() # the other retained functionals
# Exclude any timepoints from the regression with very large differences in
# functional values used as features:
miniFnArray = | np.ones((1, X.shape[1])) | numpy.ones |
"""
Python implementation of the LiNGAM algorithms.
The LiNGAM Project: https://sites.google.com/site/sshimizu06/lingam
"""
import itertools
import numbers
import warnings
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.utils import check_array
from .direct_lingam import DirectLiNGAM
from .hsic import hsic_test_gamma
from .utils import predict_adaptive_lasso, find_all_paths
class LongitudinalLiNGAM:
"""Implementation of Longitudinal LiNGAM algorithm [1]_
References
----------
.. [1] <NAME>, <NAME>, and <NAME>. Estimation of causal structures
in longitudinal data using non-Gaussianity. In Proc. 23rd IEEE International
Workshop on Machine Learning for Signal Processing (MLSP2013), pp. 1--6, Southampton, United Kingdom, 2013.
"""
def __init__(self, n_lags=1, measure="pwling", random_state=None):
"""Construct a model.
Parameters
----------
n_lags : int, optional (default=1)
Number of lags.
measure : {'pwling', 'kernel'}, default='pwling'
Measure to evaluate independence : 'pwling' or 'kernel'.
random_state : int, optional (default=None)
``random_state`` is the seed used by the random number generator.
"""
self._n_lags = n_lags
self._measure = measure
self._random_state = random_state
self._causal_orders = None
self._adjacency_matrices = None
def fit(self, X_list):
"""Fit the model to datasets.
Parameters
----------
X_list : list, shape [X, ...]
Longitudinal multiple datasets for training, where ``X`` is an dataset.
The shape of ``X`` is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
# Check parameters
if not isinstance(X_list, (list, np.ndarray)):
raise ValueError("X_list must be a array-like.")
if len(X_list) < 2:
raise ValueError("X_list must be a list containing at least two items")
self._T = len(X_list)
self._n = check_array(X_list[0]).shape[0]
self._p = check_array(X_list[0]).shape[1]
X_t = []
for X in X_list:
X = check_array(X)
if X.shape != (self._n, self._p):
raise ValueError("X_list must be a list with the same shape")
X_t.append(X.T)
M_tau, N_t = self._compute_residuals(X_t)
B_t, causal_orders = self._estimate_instantaneous_effects(N_t)
B_tau = self._estimate_lagged_effects(B_t, M_tau)
# output B(t,t), B(t,t-τ)
self._adjacency_matrices = np.empty(
(self._T, 1 + self._n_lags, self._p, self._p)
)
self._adjacency_matrices[:, :] = np.nan
for t in range(1, self._T):
self._adjacency_matrices[t, 0] = B_t[t]
for l in range(self._n_lags):
if t - l != 0:
self._adjacency_matrices[t, l + 1] = B_tau[t, l]
self._residuals = np.zeros((self._T, self._n, self._p))
for t in range(self._T):
self._residuals[t] = N_t[t].T
self._causal_orders = causal_orders
return self
def bootstrap(self, X_list, n_sampling, start_from_t=1):
"""Evaluate the statistical reliability of DAG based on the bootstrapping.
Parameters
----------
X_list : array-like, shape (X, ...)
Longitudinal multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
n_sampling : int
Number of bootstrapping samples.
Returns
-------
results : array-like, shape (BootstrapResult, ...)
Returns the results of bootstrapping for multiple datasets.
"""
# Check parameters
if not isinstance(X_list, (list, np.ndarray)):
raise ValueError("X_list must be a array-like.")
if len(X_list) < 2:
raise ValueError("X_list must be a list containing at least two items")
self._T = len(X_list)
self._n = check_array(X_list[0]).shape[0]
self._p = check_array(X_list[0]).shape[1]
X_t = []
for X in X_list:
X = check_array(X)
if X.shape != (self._n, self._p):
raise ValueError("X_list must be a list with the same shape")
X_t.append(X)
# Bootstrapping
adjacency_matrices = np.zeros(
(n_sampling, self._T, 1 + self._n_lags, self._p, self._p)
)
total_effects = np.zeros((n_sampling, self._T * self._p, self._T * self._p))
for i in range(n_sampling):
resampled_X_t = np.empty((self._T, self._n, self._p))
indices = np.random.randint(0, self._n, size=(self._n,))
for t in range(self._T):
resampled_X_t[t] = X_t[t][indices, :]
self.fit(resampled_X_t)
adjacency_matrices[i] = self._adjacency_matrices
# Calculate total effects
for from_t in range(start_from_t, self._T):
for c, from_ in enumerate(self._causal_orders[from_t]):
to_t = from_t
for to in self._causal_orders[from_t][c + 1 :]:
total_effects[
i, to_t * self._p + to, from_t * self._p + from_
] = self.estimate_total_effect(X_t, from_t, from_, to_t, to)
for to_t in range(from_t + 1, self._T):
for to in self._causal_orders[to_t]:
total_effects[
i, to_t * self._p + to, from_t * self._p + from_
] = self.estimate_total_effect(X_t, from_t, from_, to_t, to)
return LongitudinalBootstrapResult(self._T, adjacency_matrices, total_effects)
def estimate_total_effect(self, X_t, from_t, from_index, to_t, to_index):
"""Estimate total effect using causal model.
Parameters
----------
X_t : array-like, shape (n_samples, n_features)
Original data, where n_samples is the number of samples
and n_features is the number of features.
from _t :
The timepoint of source variable.
from_index :
Index of source variable to estimate total effect.
to_t :
The timepoint of destination variable.
to_index :
Index of destination variable to estimate total effect.
Returns
-------
total_effect : float
Estimated total effect.
"""
# Check from/to causal order
if to_t == from_t:
from_order = self._causal_orders[to_t].index(from_index)
to_order = self._causal_orders[from_t].index(to_index)
if from_order > to_order:
warnings.warn(
f"The estimated causal effect may be incorrect because "
f"the causal order of the destination variable (to_t={to_t}, to_index={to_index}) "
f"is earlier than the source variable (from_t={from_t}, from_index={from_index})."
)
elif to_t < from_t:
warnings.warn(
f"The estimated causal effect may be incorrect because "
f"the causal order of the destination variable (to_t={to_t}) "
f"is earlier than the source variable (from_t={from_t})."
)
# X + lagged X
# n_features * (to + from + n_lags)
X_joined = np.zeros((self._n, self._p * (2 + self._n_lags)))
X_joined[:, 0 : self._p] = X_t[to_t]
for tau in range(1 + self._n_lags):
pos = self._p + self._p * tau
X_joined[:, pos : pos + self._p] = X_t[from_t - tau]
am = np.concatenate([*self._adjacency_matrices[from_t]], axis=1)
# from_index + parents indices
parents = np.where(np.abs(am[from_index]) > 0)[0]
predictors = [from_index + self._p]
predictors.extend(parents + self._p)
# Estimate total effect
coefs = predict_adaptive_lasso(X_joined, predictors, to_index)
return coefs[0]
def get_error_independence_p_values(self):
"""Calculate the p-value matrix of independence between error variables.
Returns
-------
independence_p_values : array-like, shape (n_features, n_features)
p-value matrix of independence between error variables.
"""
E_list = np.empty((self._T, self._n, self._p))
for t, resid in enumerate(self.residuals_):
B_t = self._adjacency_matrices[t, 0]
E_list[t] = np.dot(np.eye(B_t.shape[0]) - B_t, resid.T).T
p_values_list = np.zeros([self._T, self._p, self._p])
p_values_list[:, :, :] = np.nan
for t in range(1, self._T):
p_values = np.zeros([self._p, self._p])
for i, j in itertools.combinations(range(self._p), 2):
_, p_value = hsic_test_gamma(
np.reshape(E_list[t][:, i], [self._n, 1]),
np.reshape(E_list[t][:, j], [self._n, 1]),
)
p_values[i, j] = p_value
p_values[j, i] = p_value
p_values_list[t] = p_values
return p_values_list
def _compute_residuals(self, X_t):
"""Compute residuals N(t)"""
M_tau = np.zeros((self._T, self._n_lags, self._p, self._p))
N_t = np.zeros((self._T, self._p, self._n))
N_t[:, :, :] = np.nan
for t in range(1, self._T):
# predictors
X_predictors = np.zeros((self._n, self._p * (1 + self._n_lags)))
for tau in range(self._n_lags):
pos = self._p * tau
X_predictors[:, pos : pos + self._p] = X_t[t - (tau + 1)].T
# estimate M(t,t-τ) by regression
X_target = X_t[t].T
for i in range(self._p):
reg = LinearRegression()
reg.fit(X_predictors, X_target[:, i])
for tau in range(self._n_lags):
pos = self._p * tau
M_tau[t, tau, i] = reg.coef_[pos : pos + self._p]
# Compute N(t)
N_t[t] = X_t[t]
for tau in range(self._n_lags):
N_t[t] = N_t[t] - | np.dot(M_tau[t, tau], X_t[t - (tau + 1)]) | numpy.dot |
#All code copyright 2014, <NAME>. All rights reserved.
import numpy as np
def arc_distance(lon0=None, lat0=None, lon1=None, lat1=None, R=1.,
input_coords='radians'):
"""
Gets the arc distance between (lon0, lat0) and (lon1, lat1).
Either pair can be a pair or an array. R is the radius of the
sphere. Uses the formula from the spherical law of cosines.
`input_coords` specifies whether the inputs are in radians (default)
or degrees.
Returns arc distance.
"""
if input_coords == 'degrees':
lon0, lat0 = np.radians(lon0), np.radians(lat0)
lon1, lat1 = np.radians(lon1), np.radians(lat1)
# spherical law of cosines
aa = np.arccos(np.sin(lat0) * np.sin(lat1)
+ | np.cos(lat0) | numpy.cos |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utilities for dealing with HEALPix projections and mappings."""
import copy
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.units import Quantity
from gammapy.utils.array import is_power2
from ..axes import MapAxes
from ..coord import MapCoord, skycoord_to_lonlat
from ..geom import Geom, pix_tuple_to_idx
from ..utils import INVALID_INDEX, coordsys_to_frame, frame_to_coordsys
from .io import HPX_FITS_CONVENTIONS, HpxConv
from .utils import (
coords_to_vec,
get_nside_from_pix_size,
get_pix_size_from_nside,
get_subpixels,
get_superpixels,
match_hpx_pix,
nside_to_order,
parse_hpxregion,
ravel_hpx_index,
unravel_hpx_index,
)
# Not sure if we should expose this in the docs or not:
# HPX_FITS_CONVENTIONS, HpxConv
__all__ = ["HpxGeom"]
class HpxGeom(Geom):
"""Geometry class for HEALPIX maps.
This class performs mapping between partial-sky indices (pixel
number within a HEALPIX region) and all-sky indices (pixel number
within an all-sky HEALPIX map). Multi-band HEALPIX geometries use
a global indexing scheme that assigns a unique pixel number based
on the all-sky index and band index. In the single-band case the
global index is the same as the HEALPIX index.
By default the constructor will return an all-sky map.
Partial-sky maps can be defined with the ``region`` argument.
Parameters
----------
nside : `~numpy.ndarray`
HEALPIX nside parameter, the total number of pixels is
12*nside*nside. For multi-dimensional maps one can pass
either a single nside value or a vector of nside values
defining the pixel size for each image plane. If nside is not
a scalar then its dimensionality should match that of the
non-spatial axes.
nest : bool
True -> 'NESTED', False -> 'RING' indexing scheme
frame : str
Coordinate system, "icrs" | "galactic"
region : str or tuple
Spatial geometry for partial-sky maps. If none the map will
encompass the whole sky. String input will be parsed
according to HPX_REG header keyword conventions. Tuple
input can be used to define an explicit list of pixels
encompassed by the geometry.
axes : list
Axes for non-spatial dimensions.
"""
is_hpx = True
is_region = False
def __init__(self, nside, nest=True, frame="icrs", region=None, axes=None):
# FIXME: Require NSIDE to be power of two when nest=True
self._nside = np.array(nside, ndmin=1)
self._axes = MapAxes.from_default(axes, n_spatial_axes=1)
if self.nside.size > 1 and self.nside.shape != self.shape_axes:
raise ValueError(
"Wrong dimensionality for nside. nside must "
"be a scalar or have a dimensionality consistent "
"with the axes argument."
)
self._nest = nest
self._frame = frame
self._ipix = None
self._region = region
self._create_lookup(region)
self._npix = self._npix * np.ones(self.shape_axes, dtype=int)
def _create_lookup(self, region):
"""Create local-to-global pixel lookup table."""
if isinstance(region, str):
ipix = [
self.get_index_list(nside, self._nest, region)
for nside in self._nside.flat
]
self._ipix = [
ravel_hpx_index((p, i * np.ones_like(p)), np.ravel(self.npix_max))
for i, p in enumerate(ipix)
]
self._region = region
self._indxschm = "EXPLICIT"
self._npix = np.array([len(t) for t in self._ipix])
if self.nside.ndim > 1:
self._npix = self._npix.reshape(self.nside.shape)
self._ipix = np.concatenate(self._ipix)
elif isinstance(region, tuple):
region = [np.asarray(t) for t in region]
m = np.any(np.stack([t >= 0 for t in region]), axis=0)
region = [t[m] for t in region]
self._ipix = ravel_hpx_index(region, self.npix_max)
self._ipix = np.unique(self._ipix)
region = unravel_hpx_index(self._ipix, self.npix_max)
self._region = "explicit"
self._indxschm = "EXPLICIT"
if len(region) == 1:
self._npix = np.array([len(region[0])])
else:
self._npix = np.zeros(self.shape_axes, dtype=int)
idx = np.ravel_multi_index(region[1:], self.shape_axes)
cnt = np.unique(idx, return_counts=True)
self._npix.flat[cnt[0]] = cnt[1]
elif region is None:
self._region = None
self._indxschm = "IMPLICIT"
self._npix = self.npix_max
else:
raise ValueError(f"Invalid region string: {region!r}")
def local_to_global(self, idx_local):
"""Compute a local index (partial-sky) from a global (all-sky) index.
Returns
-------
idx_global : tuple
A tuple of pixel index vectors with global HEALPIX pixel indices
"""
if self._ipix is None:
return idx_local
if self.nside.size > 1:
idx = ravel_hpx_index(idx_local, self._npix)
else:
idx_tmp = tuple(
[idx_local[0]] + [np.zeros(t.shape, dtype=int) for t in idx_local[1:]]
)
idx = ravel_hpx_index(idx_tmp, self._npix)
idx_global = unravel_hpx_index(self._ipix[idx], self.npix_max)
return idx_global[:1] + tuple(idx_local[1:])
def global_to_local(self, idx_global, ravel=False):
"""Compute global (all-sky) index from a local (partial-sky) index.
Parameters
----------
idx_global : tuple
A tuple of pixel indices with global HEALPix pixel indices.
ravel : bool
Return a raveled index.
Returns
-------
idx_local : tuple
A tuple of pixel indices with local HEALPIX pixel indices.
"""
if (
isinstance(idx_global, int)
or (isinstance(idx_global, tuple) and isinstance(idx_global[0], int))
or isinstance(idx_global, np.ndarray)
):
idx_global = unravel_hpx_index(np.array(idx_global, ndmin=1), self.npix_max)
if self.nside.size == 1:
idx = np.array(idx_global[0], ndmin=1)
else:
idx = ravel_hpx_index(idx_global, self.npix_max)
if self._ipix is not None:
retval = np.full(idx.size, -1, "i")
m = np.isin(idx.flat, self._ipix)
retval[m] = np.searchsorted(self._ipix, idx.flat[m])
retval = retval.reshape(idx.shape)
else:
retval = idx
if self.nside.size == 1:
idx_local = tuple([retval] + list(idx_global[1:]))
else:
idx_local = unravel_hpx_index(retval, self._npix)
m = np.any(np.stack([t == INVALID_INDEX.int for t in idx_local]), axis=0)
for i, t in enumerate(idx_local):
idx_local[i][m] = INVALID_INDEX.int
if not ravel:
return idx_local
else:
return ravel_hpx_index(idx_local, self.npix)
def cutout(self, position, width, **kwargs):
"""Create a cutout around a given position.
Parameters
----------
position : `~astropy.coordinates.SkyCoord`
Center position of the cutout region.
width : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
Diameter of the circular cutout region.
Returns
-------
cutout : `~gammapy.maps.WcsNDMap`
Cutout map
"""
if not self.is_regular:
raise ValueError("Can only do a cutout from a regular map.")
width = u.Quantity(width, "deg").value
return self.create(
nside=self.nside,
nest=self.nest,
width=width,
skydir=position,
frame=self.frame,
axes=self.axes,
)
def coord_to_pix(self, coords):
import healpy as hp
coords = MapCoord.create(
coords, frame=self.frame, axis_names=self.axes.names
).broadcasted
theta, phi = coords.theta, coords.phi
if self.axes:
idxs = self.axes.coord_to_idx(coords, clip=True)
bins = self.axes.coord_to_pix(coords)
# FIXME: Figure out how to handle coordinates out of
# bounds of non-spatial dimensions
if self.nside.size > 1:
nside = self.nside[tuple(idxs)]
else:
nside = self.nside
m = ~np.isfinite(theta)
theta[m] = 0.0
phi[m] = 0.0
pix = hp.ang2pix(nside, theta, phi, nest=self.nest)
pix = tuple([pix]) + bins
if np.any(m):
for p in pix:
p[m] = INVALID_INDEX.int
else:
pix = (hp.ang2pix(self.nside, theta, phi, nest=self.nest),)
return pix
def pix_to_coord(self, pix):
import healpy as hp
if self.axes:
bins = []
vals = []
for i, ax in enumerate(self.axes):
bins += [pix[1 + i]]
vals += [ax.pix_to_coord(pix[1 + i])]
idxs = pix_tuple_to_idx(bins)
if self.nside.size > 1:
nside = self.nside[idxs]
else:
nside = self.nside
ipix = np.round(pix[0]).astype(int)
m = ipix == INVALID_INDEX.int
ipix[m] = 0
theta, phi = hp.pix2ang(nside, ipix, nest=self.nest)
coords = [np.degrees(phi), np.degrees(np.pi / 2.0 - theta)]
coords = tuple(coords + vals)
if np.any(m):
for c in coords:
c[m] = INVALID_INDEX.float
else:
ipix = np.round(pix[0]).astype(int)
theta, phi = hp.pix2ang(self.nside, ipix, nest=self.nest)
coords = (np.degrees(phi), np.degrees(np.pi / 2.0 - theta))
return coords
def pix_to_idx(self, pix, clip=False):
# FIXME: Look for better method to clip HPX indices
# TODO: copy idx to avoid modifying input pix?
# pix_tuple_to_idx seems to always make a copy!?
idx = pix_tuple_to_idx(pix)
idx_local = self.global_to_local(idx)
for i, _ in enumerate(idx):
if clip:
if i > 0:
np.clip(idx[i], 0, self.axes[i - 1].nbin - 1, out=idx[i])
else:
np.clip(idx[i], 0, None, out=idx[i])
else:
if i > 0:
mask = (idx[i] < 0) | (idx[i] >= self.axes[i - 1].nbin)
np.putmask(idx[i], mask, -1)
else:
mask = (idx_local[i] < 0) | (idx[i] < 0)
np.putmask(idx[i], mask, -1)
return tuple(idx)
@property
def axes(self):
"""List of non-spatial axes."""
return self._axes
@property
def axes_names(self):
"""All axes names"""
return ["skycoord"] + self.axes.names
@property
def shape_axes(self):
"""Shape of non-spatial axes."""
return self.axes.shape
@property
def data_shape(self):
"""Shape of the Numpy data array matching this geometry."""
npix_shape = tuple([np.max(self.npix)])
return (npix_shape + self.axes.shape)[::-1]
@property
def data_shape_axes(self):
"""Shape of data of the non-spatial axes and unit spatial axes."""
return self.axes.shape[::-1] + (1,)
@property
def ndim(self):
"""Number of dimensions (int)."""
return len(self._axes) + 2
@property
def ordering(self):
"""HEALPix ordering ('NESTED' or 'RING')."""
return "NESTED" if self.nest else "RING"
@property
def nside(self):
"""NSIDE in each band."""
return self._nside
@property
def order(self):
"""ORDER in each band (``NSIDE = 2 ** ORDER``).
Set to -1 for bands with NSIDE that is not a power of 2.
"""
return nside_to_order(self.nside)
@property
def nest(self):
"""Is HEALPix order nested? (bool)."""
return self._nest
@property
def npix(self):
"""Number of pixels in each band.
For partial-sky geometries this can
be less than the number of pixels for the band NSIDE.
"""
return self._npix
@property
def npix_max(self):
"""Max. number of pixels"""
maxpix = 12 * self.nside ** 2
return maxpix * np.ones(self.shape_axes, dtype=int)
@property
def frame(self):
return self._frame
@property
def projection(self):
"""Map projection."""
return "HPX"
@property
def region(self):
"""Region string."""
return self._region
@property
def is_allsky(self):
"""Flag for all-sky maps."""
return self._region is None
@property
def is_regular(self):
"""Flag identifying whether this geometry is regular in non-spatial dimensions.
False for multi-resolution or irregular geometries.
If true all image planes have the same pixel geometry.
"""
if self.nside.size > 1 or self.region == "explicit":
return False
else:
return True
@property
def center_coord(self):
"""Map coordinates of the center of the geometry (tuple)."""
lon, lat, frame = skycoord_to_lonlat(self.center_skydir)
return tuple([lon, lat]) + self.axes.center_coord
@property
def center_pix(self):
"""Pixel coordinates of the center of the geometry (tuple)."""
return self.coord_to_pix(self.center_coord)
@property
def center_skydir(self):
"""Sky coordinate of the center of the geometry.
Returns
-------
center : `~astropy.coordinates.SkyCoord`
Center position
"""
# TODO: simplify
import healpy as hp
if self.is_allsky:
lon, lat = 0.0, 0.0
elif self.region == "explicit":
idx = unravel_hpx_index(self._ipix, self.npix_max)
nside = self._get_nside(idx)
vec = hp.pix2vec(nside, idx[0], nest=self.nest)
vec = np.array([np.mean(t) for t in vec])
lonlat = hp.vec2ang(vec, lonlat=True)
lon, lat = lonlat[0], lonlat[1]
else:
tokens = parse_hpxregion(self.region)
if tokens[0] in ["DISK", "DISK_INC"]:
lon, lat = float(tokens[1]), float(tokens[2])
elif tokens[0] == "HPX_PIXEL":
nside_pix = int(tokens[2])
ipix_pix = int(tokens[3])
if tokens[1] == "NESTED":
nest_pix = True
elif tokens[1] == "RING":
nest_pix = False
else:
raise ValueError(f"Invalid ordering scheme: {tokens[1]!r}")
theta, phi = hp.pix2ang(nside_pix, ipix_pix, nest_pix)
lat = np.degrees((np.pi / 2) - theta)
lon = np.degrees(phi)
return SkyCoord(lon, lat, frame=self.frame, unit="deg")
@property
def pixel_scales(self):
self.angle_ = """Pixel scale.
Returns
-------
angle: `~astropy.coordinates.Angle`
"""
return get_pix_size_from_nside(self.nside) * u.deg
def interp_weights(self, coords, idxs=None):
"""Get interpolation weights for given coords
Parameters
----------
coords : `MapCoord` or dict
Input coordinates
idxs : `~numpy.ndarray`
Indices for non-spatial axes.
Returns
-------
weights : `~numpy.ndarray`
Interpolation weights
"""
import healpy as hp
coords = MapCoord.create(coords, frame=self.frame).broadcasted
if idxs is None:
idxs = self.coord_to_idx(coords, clip=True)[1:]
theta, phi = coords.theta, coords.phi
m = ~np.isfinite(theta)
theta[m] = 0
phi[m] = 0
if not self.is_regular:
nside = self.nside[tuple(idxs)]
else:
nside = self.nside
pix, wts = hp.get_interp_weights(nside, theta, phi, nest=self.nest)
wts[:, m] = 0
pix[:, m] = INVALID_INDEX.int
if not self.is_regular:
pix_local = [self.global_to_local([pix] + list(idxs))[0]]
else:
pix_local = [self.global_to_local(pix, ravel=True)]
# If a pixel lies outside of the geometry set its index to the center pixel
m = pix_local[0] == INVALID_INDEX.int
if m.any():
coords_ctr = [coords.lon, coords.lat]
coords_ctr += [ax.pix_to_coord(t) for ax, t in zip(self.axes, idxs)]
idx_ctr = self.coord_to_idx(coords_ctr)
idx_ctr = self.global_to_local(idx_ctr)
pix_local[0][m] = (idx_ctr[0] * np.ones(pix.shape, dtype=int))[m]
pix_local += [np.broadcast_to(t, pix_local[0].shape) for t in idxs]
return pix_local, wts
@property
def ipix(self):
"""HEALPIX pixel and band indices for every pixel in the map."""
return self.get_idx()
def is_aligned(self, other):
"""Check if HEALPIx geoms and extra axes are aligned.
Parameters
----------
other : `HpxGeom`
Other geom.
Returns
-------
aligned : bool
Whether geometries are aligned
"""
for axis, otheraxis in zip(self.axes, other.axes):
if axis != otheraxis:
return False
if not self.nside == other.nside:
return False
elif not self.frame == other.frame:
return False
elif not self.nest == other.nest:
return False
else:
return True
def to_nside(self, nside):
"""Upgrade or downgrade the reoslution to a given nside
Parameters
----------
nside : int
Nside
Returns
-------
geom : `~HpxGeom`
A HEALPix geometry object.
"""
if not self.is_regular:
raise ValueError("Upgrade and degrade only implemented for standard maps")
axes = copy.deepcopy(self.axes)
return self.__class__(
nside=nside, nest=self.nest, frame=self.frame, region=self.region, axes=axes
)
def to_binsz(self, binsz):
"""Change pixel size of the geometry.
Parameters
----------
binsz : float or `~astropy.units.Quantity`
New pixel size. A float is assumed to be in degree.
Returns
-------
geom : `WcsGeom`
Geometry with new pixel size.
"""
binsz = u.Quantity(binsz, "deg").value
if self.is_allsky:
return self.create(
binsz=binsz,
frame=self.frame,
axes=copy.deepcopy(self.axes),
)
else:
return self.create(
skydir=self.center_skydir,
binsz=binsz,
width=self.width.to_value("deg"),
frame=self.frame,
axes=copy.deepcopy(self.axes),
)
def separation(self, center):
"""Compute sky separation wrt a given center.
Parameters
----------
center : `~astropy.coordinates.SkyCoord`
Center position
Returns
-------
separation : `~astropy.coordinates.Angle`
Separation angle array (1D)
"""
coord = self.to_image().get_coord()
return center.separation(coord.skycoord)
def to_swapped(self):
"""Geometry copy with swapped ORDERING (NEST->RING or vice versa).
Returns
-------
geom : `~HpxGeom`
A HEALPix geometry object.
"""
axes = copy.deepcopy(self.axes)
return self.__class__(
self.nside,
not self.nest,
frame=self.frame,
region=self.region,
axes=axes,
)
def to_image(self):
return self.__class__(
np.max(self.nside), self.nest, frame=self.frame, region=self.region
)
def to_cube(self, axes):
axes = copy.deepcopy(self.axes) + axes
return self.__class__(
np.max(self.nside),
self.nest,
frame=self.frame,
region=self.region,
axes=axes,
)
def _get_neighbors(self, idx):
import healpy as hp
nside = self._get_nside(idx)
idx_nb = (hp.get_all_neighbours(nside, idx[0], nest=self.nest),)
idx_nb += tuple([t[None, ...] * np.ones_like(idx_nb[0]) for t in idx[1:]])
return idx_nb
def _pad_spatial(self, pad_width):
if self.is_allsky:
raise ValueError("Cannot pad an all-sky map.")
idx = self.get_idx(flat=True)
idx_r = ravel_hpx_index(idx, self.npix_max)
# TODO: Pre-filter indices to find those close to the edge
idx_nb = self._get_neighbors(idx)
idx_nb = ravel_hpx_index(idx_nb, self.npix_max)
for _ in range(pad_width):
mask_edge = np.isin(idx_nb, idx_r, invert=True)
idx_edge = idx_nb[mask_edge]
idx_edge = np.unique(idx_edge)
idx_r = np.sort(np.concatenate((idx_r, idx_edge)))
idx_nb = unravel_hpx_index(idx_edge, self.npix_max)
idx_nb = self._get_neighbors(idx_nb)
idx_nb = ravel_hpx_index(idx_nb, self.npix_max)
idx = unravel_hpx_index(idx_r, self.npix_max)
return self.__class__(
self.nside.copy(),
self.nest,
frame=self.frame,
region=idx,
axes=copy.deepcopy(self.axes),
)
def crop(self, crop_width):
if self.is_allsky:
raise ValueError("Cannot crop an all-sky map.")
idx = self.get_idx(flat=True)
idx_r = ravel_hpx_index(idx, self.npix_max)
# TODO: Pre-filter indices to find those close to the edge
idx_nb = self._get_neighbors(idx)
idx_nb = ravel_hpx_index(idx_nb, self.npix_max)
for _ in range(crop_width):
# Mask of pixels that have at least one neighbor not
# contained in the geometry
mask_edge = np.any(np.isin(idx_nb, idx_r, invert=True), axis=0)
idx_r = idx_r[~mask_edge]
idx_nb = idx_nb[:, ~mask_edge]
idx = unravel_hpx_index(idx_r, self.npix_max)
return self.__class__(
self.nside.copy(),
self.nest,
frame=self.frame,
region=idx,
axes=copy.deepcopy(self.axes),
)
def upsample(self, factor):
if not is_power2(factor):
raise ValueError("Upsample factor must be a power of 2.")
if self.is_allsky:
return self.__class__(
self.nside * factor,
self.nest,
frame=self.frame,
region=self.region,
axes=copy.deepcopy(self.axes),
)
idx = list(self.get_idx(flat=True))
nside = self._get_nside(idx)
idx_new = get_subpixels(idx[0], nside, nside * factor, nest=self.nest)
for i in range(1, len(idx)):
idx[i] = idx[i][..., None] * np.ones(idx_new.shape, dtype=int)
idx[0] = idx_new
return self.__class__(
self.nside * factor,
self.nest,
frame=self.frame,
region=tuple(idx),
axes=copy.deepcopy(self.axes),
)
def downsample(self, factor, axis_name=None):
if not is_power2(factor):
raise ValueError("Downsample factor must be a power of 2.")
if axis_name is not None:
raise ValueError("Currently the only valid axis name is None.")
if self.is_allsky:
return self.__class__(
self.nside // factor,
self.nest,
frame=self.frame,
region=self.region,
axes=copy.deepcopy(self.axes),
)
idx = list(self.get_idx(flat=True))
nside = self._get_nside(idx)
idx_new = get_superpixels(idx[0], nside, nside // factor, nest=self.nest)
idx[0] = idx_new
return self.__class__(
self.nside // factor,
self.nest,
frame=self.frame,
region=tuple(idx),
axes=copy.deepcopy(self.axes),
)
@classmethod
def create(
cls,
nside=None,
binsz=None,
nest=True,
frame="icrs",
region=None,
axes=None,
skydir=None,
width=None,
):
"""Create an HpxGeom object.
Parameters
----------
nside : int or `~numpy.ndarray`
HEALPix NSIDE parameter. This parameter sets the size of
the spatial pixels in the map.
binsz : float or `~numpy.ndarray`
Approximate pixel size in degrees. An NSIDE will be
chosen that corresponds to a pixel size closest to this
value. This option is superseded by nside.
nest : bool
True for HEALPIX "NESTED" indexing scheme, False for "RING" scheme
frame : {"icrs", "galactic"}, optional
Coordinate system, either Galactic ("galactic") or Equatorial ("icrs").
skydir : tuple or `~astropy.coordinates.SkyCoord`
Sky position of map center. Can be either a SkyCoord
object or a tuple of longitude and latitude in deg in the
coordinate system of the map.
region : str
HPX region string. Allows for partial-sky maps.
width : float
Diameter of the map in degrees. If set the map will
encompass all pixels within a circular region centered on
``skydir``.
axes : list
List of axes for non-spatial dimensions.
Returns
-------
geom : `~HpxGeom`
A HEALPix geometry object.
Examples
--------
>>> from gammapy.maps import HpxGeom, MapAxis
>>> axis = MapAxis.from_bounds(0,1,2)
>>> geom = HpxGeom.create(nside=16)
>>> geom = HpxGeom.create(binsz=0.1, width=10.0)
>>> geom = HpxGeom.create(nside=64, width=10.0, axes=[axis])
>>> geom = HpxGeom.create(nside=[32,64], width=10.0, axes=[axis])
"""
if nside is None and binsz is None:
raise ValueError("Either nside or binsz must be defined.")
if nside is None and binsz is not None:
nside = get_nside_from_pix_size(binsz)
if skydir is None:
lon, lat = (0.0, 0.0)
elif isinstance(skydir, tuple):
lon, lat = skydir
elif isinstance(skydir, SkyCoord):
lon, lat, frame = skycoord_to_lonlat(skydir, frame=frame)
else:
raise ValueError(f"Invalid type for skydir: {type(skydir)!r}")
if region is None and width is not None:
region = f"DISK({lon},{lat},{width/2})"
return cls(nside, nest=nest, frame=frame, region=region, axes=axes)
@classmethod
def from_header(cls, header, hdu_bands=None, format=None):
"""Create an HPX object from a FITS header.
Parameters
----------
header : `~astropy.io.fits.Header`
The FITS header
hdu_bands : `~astropy.io.fits.BinTableHDU`
The BANDS table HDU.
format : str, optional
FITS convention. If None the format is guessed. The following
formats are supported:
- "gadf"
- "fgst-ccube"
- "fgst-ltcube"
- "fgst-bexpcube"
- "fgst-srcmap"
- "fgst-template"
- "fgst-srcmap-sparse"
- "galprop"
- "galprop2"
- "healpy"
Returns
-------
hpx : `~HpxGeom`
HEALPix geometry.
"""
if format is None:
format = HpxConv.identify_hpx_format(header)
conv = HPX_FITS_CONVENTIONS[format]
axes = MapAxes.from_table_hdu(hdu_bands, format=format)
if header["PIXTYPE"] != "HEALPIX":
raise ValueError(
f"Invalid header PIXTYPE: {header['PIXTYPE']} (must be HEALPIX)"
)
if header["ORDERING"] == "RING":
nest = False
elif header["ORDERING"] == "NESTED":
nest = True
else:
raise ValueError(
f"Invalid header ORDERING: {header['ORDERING']} (must be RING or NESTED)"
)
if hdu_bands is not None and "NSIDE" in hdu_bands.columns.names:
nside = hdu_bands.data.field("NSIDE").reshape(axes.shape).astype(int)
elif "NSIDE" in header:
nside = header["NSIDE"]
elif "ORDER" in header:
nside = 2 ** header["ORDER"]
else:
raise ValueError("Failed to extract NSIDE or ORDER.")
try:
frame = coordsys_to_frame(header[conv.frame])
except KeyError:
frame = header.get("COORDSYS", "icrs")
try:
region = header["HPX_REG"]
except KeyError:
try:
region = header["HPXREGION"]
except KeyError:
region = None
return cls(nside, nest, frame=frame, region=region, axes=axes)
@classmethod
def from_hdu(cls, hdu, hdu_bands=None):
"""Create an HPX object from a BinTable HDU.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
The FITS HDU
hdu_bands : `~astropy.io.fits.BinTableHDU`
The BANDS table HDU
Returns
-------
hpx : `~HpxGeom`
HEALPix geometry.
"""
# FIXME: Need correct handling of IMPLICIT and EXPLICIT maps
# if HPX region is not defined then geometry is defined by
# the set of all pixels in the table
if "HPX_REG" not in hdu.header:
pix = (hdu.data.field("PIX"), hdu.data.field("CHANNEL"))
else:
pix = None
return cls.from_header(hdu.header, hdu_bands=hdu_bands, pix=pix)
def to_header(self, format="gadf", **kwargs):
"""Build and return FITS header for this HEALPIX map."""
header = fits.Header()
format = kwargs.get("format", HPX_FITS_CONVENTIONS[format])
# FIXME: For some sparse maps we may want to allow EXPLICIT
# with an empty region string
indxschm = kwargs.get("indxschm", None)
if indxschm is None:
if self._region is None:
indxschm = "IMPLICIT"
elif self.is_regular == 1:
indxschm = "EXPLICIT"
else:
indxschm = "LOCAL"
if "FGST" in format.convname.upper():
header["TELESCOP"] = "GLAST"
header["INSTRUME"] = "LAT"
header[format.frame] = frame_to_coordsys(self.frame)
header["PIXTYPE"] = "HEALPIX"
header["ORDERING"] = self.ordering
header["INDXSCHM"] = indxschm
header["ORDER"] = np.max(self.order)
header["NSIDE"] = | np.max(self.nside) | numpy.max |
"""
FluctuatingBackground.py
Author: <NAME>
Affiliation: UCLA
Created on: Mon Oct 10 14:29:54 PDT 2016
Description:
"""
import numpy as np
from math import factorial
from ..physics import Cosmology
from ..util import ParameterFile
from ..util.Stats import bin_c2e
from scipy.special import erfinv
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
from scipy.integrate import quad, simps
from ..physics.Hydrogen import Hydrogen
from ..physics.HaloModel import HaloModel
from ..util.Math import LinearNDInterpolator
from ..populations.Composite import CompositePopulation
from ..physics.CrossSections import PhotoIonizationCrossSection
from ..physics.Constants import g_per_msun, cm_per_mpc, dnu, s_per_yr, c, \
s_per_myr, erg_per_ev, k_B, m_p, dnu, g_per_msun
root2 = np.sqrt(2.)
four_pi = 4. * np.pi
class Fluctuations(object): # pragma: no cover
def __init__(self, grid=None, **kwargs):
"""
Initialize a FluctuatingBackground object.
Creates an object capable of modeling fields that fluctuate spatially.
"""
self._kwargs = kwargs.copy()
self.pf = ParameterFile(**kwargs)
# Some useful physics modules
if grid is not None:
self.grid = grid
self.cosm = grid.cosm
else:
self.grid = None
self.cosm = Cosmology()
self._done = {}
@property
def zeta(self):
if not hasattr(self, '_zeta'):
raise AttributeError('Must set zeta by hand!')
return self._zeta
@zeta.setter
def zeta(self, value):
self._zeta = value
@property
def zeta_X(self):
if not hasattr(self, '_zeta_X'):
raise AttributeError('Must set zeta_X by hand!')
return self._zeta_X
@zeta_X.setter
def zeta_X(self, value):
self._zeta_X = value
@property
def hydr(self):
if not hasattr(self, '_hydr'):
if self.grid is None:
self._hydr = Hydrogen(**self.pf)
else:
self._hydr = self.grid.hydr
return self._hydr
@property
def xset(self):
if not hasattr(self, '_xset'):
xset_pars = \
{
'xset_window': 'tophat-real',
'xset_barrier': 'constant',
'xset_pdf': 'gaussian',
}
xset = ares.physics.ExcursionSet(**xset_pars)
xset.tab_M = pop.halos.tab_M
xset.tab_sigma = pop.halos.tab_sigma
xset.tab_ps = pop.halos.tab_ps_lin
xset.tab_z = pop.halos.tab_z
xset.tab_k = pop.halos.tab_k_lin
xset.tab_growth = pop.halos.tab_growth
self._xset = xset
return self._xset
def _overlap_region(self, dr, R1, R2):
"""
Volume of intersection between two spheres of radii R1 < R2.
"""
Vo = np.pi * (R2 + R1 - dr)**2 \
* (dr**2 + 2. * dr * R1 - 3. * R1**2 \
+ 2. * dr * R2 + 6. * R1 * R2 - 3. * R2**2) / 12. / dr
if type(Vo) == np.ndarray:
# Small-scale vs. large Scale
SS = dr <= R2 - R1
LS = dr >= R1 + R2
Vo[LS == 1] = 0.0
if type(R1) == np.ndarray:
Vo[SS == 1] = 4. * np.pi * R1[SS == 1]**3 / 3.
else:
Vo[SS == 1] = 4. * np.pi * R1**3 / 3.
return Vo
def IV(self, dr, R1, R2):
"""
Just a vectorized version of the overlap calculation.
"""
return self._overlap_region(dr, R1, R2)
def intersectional_volumes(self, dr, R1, R2, R3):
IV = self.IV
V11 = IV(dr, R1, R1)
zeros = np.zeros_like(V11)
if np.all(R2 == 0):
return V11, zeros, zeros, zeros, zeros, zeros
V12 = IV(dr, R1, R2)
V22 = IV(dr, R2, R2)
if np.all(R3 == 0):
return V11, V12, V22, zeros, zeros, zeros
V13 = IV(dr, R1, R3)
V23 = IV(dr, R2, R3)
V33 = IV(dr, R3, R3)
return V11, V12, V22, V13, V23, V33
def overlap_volumes(self, dr, R1, R2):
"""
Overlap volumes, i.e., volumes in which a source affects two points
in different ways. For example, V11 is the volume in which a source
ionizes both points (at separation `dr`), V12 is the volume in which
a source ionizes one point and heats the other, and so on.
In this order: V11, V12, V13, V22, V23, V33
"""
IV = self.IV
V1 = 4. * np.pi * R1**3 / 3.
if self.pf['ps_temp_model'] == 1:
V2 = 4. * np.pi * (R2**3 - R1**3) / 3.
else:
V2 = 4. * np.pi * R2**3 / 3.
Vt = 4. * np.pi * R2**3 / 3.
V11 = IV(dr, R1, R1)
if self.pf['ps_include_temp'] and self.pf['ps_temp_model'] == 2:
V12 = V1
else:
V12 = 2 * IV(dr, R1, R2) - IV(dr, R1, R1)
V22 = IV(dr, R2, R2)
if self.pf['ps_temp_model'] == 1:
V22 += -2. * IV(dr, R1, R2) + IV(dr, R1, R1)
if self.pf['ps_include_temp'] and self.pf['ps_temp_model'] == 1:
V1n = V1 - IV(dr, R1, R2)
elif self.pf['ps_include_temp'] and self.pf['ps_temp_model'] == 2:
V1n = V1
else:
V1n = V1 - V11
V2n = V2 - IV(dr, R2, R2)
if self.pf['ps_temp_model'] == 1:
V2n += IV(dr, R1, R2)
# 'anything' to one point, 'nothing' to other.
# Without temperature fluctuations, same as V1n
if self.pf['ps_include_temp']:
Van = Vt - IV(dr, R2, R2)
else:
Van = V1n
return V11, V12, V22, V1n, V2n, Van
def exclusion_volumes(self, dr, R1, R2, R3):
"""
Volume in which a single source only affects one
"""
pass
@property
def heating_ongoing(self):
if not hasattr(self, '_heating_ongoing'):
self._heating_ongoing = True
return self._heating_ongoing
@heating_ongoing.setter
def heating_ongoing(self, value):
self._heating_ongoing = value
def BubbleShellFillingFactor(self, z, R_s=None):
"""
"""
# Hard exit.
if not self.pf['ps_include_temp']:
return 0.0
Qi = self.MeanIonizedFraction(z)
if self.pf['ps_temp_model'] == 1:
R_i, M_b, dndm_b = self.BubbleSizeDistribution(z)
if Qi == 1:
return 0.0
if type(R_s) is np.ndarray:
nz = R_i > 0
const_rsize = np.allclose(np.diff(R_s[nz==1] / R_i[nz==1]), 0.0)
if const_rsize:
fvol = (R_s[0] / R_i[0])**3 - 1.
Qh = Qi * fvol
else:
V = 4. * np.pi * (R_s**3 - R_i**3) / 3.
Mmin = self.Mmin(z) * self.zeta
Qh = self.get_prob(z, M_b, dndm_b, Mmin, V,
exp=False, ep=0.0, Mmax=None)
#raise NotImplemented("No support for absolute scaling of hot bubbles yet.")
if (Qh > (1. - Qi) * 1.): #or Qh > 0.5: #or Qi > 0.5:
self.heating_ongoing = 0
Qh = np.minimum(Qh, 1. - Qi)
return Qh
else:
# This will get called if temperature fluctuations are off
return 0.0
elif self.pf['ps_temp_model'] == 2:
Mmin = self.Mmin(z) * self.zeta_X
R_i, M_b, dndm_b = self.BubbleSizeDistribution(z, ion=False)
V = 4. * np.pi * R_i**3 / 3.
Qh = self.get_prob(z, M_b, dndm_b, Mmin, V,
exp=False, ep=0.0, Mmax=None)
#Qh = self.BubbleFillingFactor(z, ion=False)
#print('Qh', Qh)
return np.minimum(Qh, 1. - Qi)
else:
raise NotImplemented('Uncrecognized option for BSD.')
#return min(Qh, 1.), min(Qc, 1.)
@property
def bsd_model(self):
return self.pf['bubble_size_dist'].lower()
def MeanIonizedFraction(self, z, ion=True):
Mmin = self.Mmin(z)
logM = np.log10(Mmin)
if ion:
if not self.pf['ps_include_ion']:
return 0.0
zeta = self.zeta
return np.minimum(1.0, zeta * self.halos.fcoll_2d(z, logM))
else:
if not self.pf['ps_include_temp']:
return 0.0
zeta = self.zeta_X
# Assume that each heated region contains the same volume
# of fully-ionized material.
Qi = self.MeanIonizedFraction(z, ion=True)
Qh = zeta * self.halos.fcoll_2d(z, logM) - Qi
return np.minimum(1.0 - Qi, Qh)
def delta_shell(self, z):
"""
Relative density != relative over-density.
"""
if not self.pf['ps_include_temp']:
return 0.0
if self.pf['ps_temp_model'] == 2:
return self.delta_bubble_vol_weighted(z, ion=False)
delta_i_bar = self.delta_bubble_vol_weighted(z)
rdens = self.pf["bubble_shell_rdens_zone_0"]
return rdens * (1. + delta_i_bar) - 1.
def BulkDensity(self, z, R_s):
Qi = self.MeanIonizedFraction(z)
#Qh = self.BubbleShellFillingFactor(z, R_s)
Qh = self.MeanIonizedFraction(z, ion=False)
delta_i_bar = self.delta_bubble_vol_weighted(z)
delta_h_bar = self.delta_shell(z)
if self.pf['ps_igm_model'] == 2:
delta_hal_bar = self.mean_halo_overdensity(z)
Qhal = self.Qhal(z, Mmax=self.Mmin(z))
else:
Qhal = 0.0
delta_hal_bar = 0.0
return -(delta_i_bar * Qi + delta_h_bar * Qh + delta_hal_bar * Qhal) \
/ (1. - Qi - Qh - Qhal)
def BubbleFillingFactor(self, z, ion=True, rescale=True):
"""
Fraction of volume filled by bubbles.
This is never actually used, but for reference, the mean ionized
fraction would be 1 - exp(-this). What we actually do is re-normalize
the bubble size distribution to guarantee Q = zeta * fcoll. See
MeanIonizedFraction and BubbleSizeDistribution for more details.
"""
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
if self.bsd_model is None:
R_i = self.pf['bubble_size']
V_i = 4. * np.pi * R_i**3 / 3.
ni = self.BubbleDensity(z)
Qi = 1. - np.exp(-ni * V_i)
elif self.bsd_model in ['fzh04', 'hmf']:
# Smallest bubble is one around smallest halo.
# Don't actually need its mass, just need index to correctly
# truncate integral.
Mmin = self.Mmin(z) * zeta
# M_b should just be self.m? No.
R_i, M_b, dndm_b = self.BubbleSizeDistribution(z, ion=ion,
rescale=rescale)
V_i = 4. * np.pi * R_i**3 / 3.
iM = np.argmin(np.abs(Mmin - M_b))
Qi = np.trapz(dndm_b[iM:] * M_b[iM:] * V_i[iM:], x=np.log(M_b[iM:]))
# This means reionization is over.
if self.bsd_model == 'fzh04':
if self._B0(z, zeta) <= 0:
return 1.
else:
raise NotImplemented('Uncrecognized option for BSD.')
return min(Qi, 1.)
# Grab heated phase to enforce BC
#Rs = self.BubbleShellRadius(z, R_i)
#Vsh = 4. * np.pi * (Rs - R_i)**3 / 3.
#Qh = np.trapz(dndm * Vsh * M_b, x=np.log(M_b))
#if lya and self.pf['bubble_pod_size_func'] in [None, 'const', 'linear']:
# Rc = self.BubblePodRadius(z, R_i, zeta, zeta_lya)
# Vc = 4. * np.pi * (Rc - R_i)**3 / 3.
#
# if self.pf['powspec_rescale_Qlya']:
# # This isn't actually correct since we care about fluxes
# # not number of photons, but fine for now.
# Qc = min(zeta_lya * self.halos.fcoll_2d(z, np.log10(self.Mmin(z))), 1)
# else:
# Qc = np.trapz(dndlnm[iM:] * Vc[iM:], x=np.log(M_b[iM:]))
#
# return min(Qc, 1.)
#
#elif lya and self.pf['bubble_pod_size_func'] == 'fzh04':
# return self.BubbleFillingFactor(z, zeta_lya, None, lya=False)
#else:
@property
def tab_Mmin(self):
if not hasattr(self, '_tab_Mmin'):
raise AttributeError('Must set Mmin by hand (right now)')
return self._tab_Mmin
@tab_Mmin.setter
def tab_Mmin(self, value):
if type(value) is not np.ndarray:
value = np.ones_like(self.halos.tab_z) * value
else:
assert value.size == self.halos.tab_z.size
self._tab_Mmin = value
def Mmin(self, z):
return np.interp(z, self.halos.tab_z, self.tab_Mmin)
def mean_halo_bias(self, z):
bias = self.halos.Bias(z)
M_h = self.halos.tab_M
iz_h = np.argmin(np.abs(z - self.halos.tab_z))
iM_h = np.argmin(np.abs(self.Mmin(z) - M_h))
dndm_h = self.halos.tab_dndm[iz_h]
return 1.0
#return simps(M_h * dndm_h * bias, x=np.log(M_h)) \
# / simps(M_h * dndm_h, x=np.log(M_h))
def tab_bubble_bias(self, zeta):
if not hasattr(self, '_tab_bubble_bias'):
func = lambda z: self._fzh04_eq22(z, zeta)
self._tab_bubble_bias = np.array(map(func, self.halos.tab_z_ps))
return self._tab_bubble_bias
def _fzh04_eq22(self, z, ion=True):
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
iz = np.argmin(np.abs(z - self.halos.tab_z))
s = self.sigma
S = s**2
#return 1. + ((self.LinearBarrier(z, zeta, zeta) / S - (1. / self._B0(z, zeta))) \
# / self._growth_factor(z))
return 1. + (self._B0(z, zeta)**2 / S / self._B(z, zeta, zeta))
def bubble_bias(self, z, ion=True):
"""
Eq. 9.24 in Loeb & Furlanetto (2013) or Eq. 22 in FZH04.
"""
return self._fzh04_eq22(z, ion)
#iz = np.argmin(np.abs(z - self.halos.tab_z_ps))
#
#x, y = self.halos.tab_z_ps, self.tab_bubble_bias(zeta)[iz]
#
#
#
#m = (y[-1] - y[-2]) / (x[-1] - x[-2])
#
#return m * z + y[-1]
#iz = np.argmin(np.abs(z - self.halos.tab_z))
#s = self.sigma
#S = s**2
#
##return 1. + ((self.LinearBarrier(z, zeta, zeta) / S - (1. / self._B0(z, zeta))) \
## / self._growth_factor(z))
#
#fzh04 = 1. + (self._B0(z, zeta)**2 / S / self._B(z, zeta, zeta))
#
#return fzh04
def mean_bubble_bias(self, z, ion=True):
"""
"""
R, M_b, dndm_b = self.BubbleSizeDistribution(z, ion=ion)
#if ('h' in term) or ('c' in term) and self.pf['powspec_temp_method'] == 'shell':
# R_s, Rc = self.BubbleShellRadius(z, R_i)
# R = R_s
#else:
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
V = 4. * np.pi * R**3 / 3.
Mmin = self.Mmin(z) * zeta
iM = np.argmin(np.abs(Mmin - self.m))
bHII = self.bubble_bias(z, ion)
#tmp = dndm[iM:]
#print(z, len(tmp[np.isnan(tmp)]), len(bHII[np.isnan(bHII)]))
#imax = int(min(np.argwhere(np.isnan(R_i))))
if ion and self.pf['ps_include_ion']:
Qi = self.MeanIonizedFraction(z)
elif ion and not self.pf['ps_include_ion']:
raise NotImplemented('help')
elif (not ion) and self.pf['ps_include_temp']:
Qi = self.MeanIonizedFraction(z, ion=False)
elif ion and self.pf['ps_include_temp']:
Qi = self.MeanIonizedFraction(z, ion=False)
else:
raise NotImplemented('help')
return np.trapz(dndm_b[iM:] * V[iM:] * bHII[iM:] * M_b[iM:],
x=np.log(M_b[iM:])) / Qi
#def delta_bubble_mass_weighted(self, z, zeta):
# if self._B0(z, zeta) <= 0:
# return 0.
#
# R_i, M_b, dndm_b = self.BubbleSizeDistribution(z, zeta)
# V_i = 4. * np.pi * R_i**3 / 3.
#
# Mmin = self.Mmin(z) * zeta
# iM = np.argmin(np.abs(Mmin - self.m))
# B = self._B(z, zeta)
# rho0 = self.cosm.mean_density0
#
# dm_ddel = rho0 * V_i
#
# return simps(B[iM:] * dndm_b[iM:] * M_b[iM:], x=np.log(M_b[iM:]))
def delta_bubble_vol_weighted(self, z, ion=True):
if not self.pf['ps_include_ion']:
return 0.0
if not self.pf['ps_include_xcorr_ion_rho']:
return 0.0
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
if self._B0(z, zeta) <= 0:
return 0.
R_i, M_b, dndm_b = self.BubbleSizeDistribution(z, ion=ion)
V_i = 4. * np.pi * R_i**3 / 3.
Mmin = self.Mmin(z) * zeta
iM = np.argmin(np.abs(Mmin - self.m))
B = self._B(z, ion=ion)
return np.trapz(B[iM:] * dndm_b[iM:] * V_i[iM:] * M_b[iM:],
x=np.log(M_b[iM:]))
#def mean_bubble_overdensity(self, z, zeta):
# if self._B0(z, zeta) <= 0:
# return 0.
#
# R_i, M_b, dndm_b = self.BubbleSizeDistribution(z, zeta)
# V_i = 4. * np.pi * R_i**3 / 3.
#
# Mmin = self.Mmin(z) * zeta
# iM = np.argmin(np.abs(Mmin - self.m))
# B = self._B(z, zeta)
# rho0 = self.cosm.mean_density0
#
# dm_ddel = rho0 * V_i
#
# return simps(B[iM:] * dndm_b[iM:] * M_b[iM:], x=np.log(M_b[iM:]))
def mean_halo_abundance(self, z, Mmin=False):
M_h = self.halos.tab_M
iz_h = np.argmin(np.abs(z - self.halos.tab_z))
if Mmin:
iM_h = np.argmin(np.abs(self.Mmin(z) - M_h))
else:
iM_h = 0
dndm_h = self.halos.tab_dndm[iz_h]
return np.trapz(M_h * dndm_h, x=np.log(M_h))
def spline_cf_mm(self, z):
if not hasattr(self, '_spline_cf_mm_'):
self._spline_cf_mm_ = {}
if z not in self._spline_cf_mm_:
iz = np.argmin(np.abs(z - self.halos.tab_z_ps))
self._spline_cf_mm_[z] = interp1d(np.log(self.halos.tab_R),
self.halos.tab_cf_mm[iz], kind='cubic', bounds_error=False,
fill_value=0.0)
return self._spline_cf_mm_[z]
def excess_probability(self, z, R, ion=True):
"""
This is the excess probability that a point is ionized given that
we already know another point (at distance r) is ionized.
"""
# Function of bubble mass (bubble size)
bHII = self.bubble_bias(z, ion)
bbar = self.mean_bubble_bias(z, ion)
if R < self.halos.tab_R.min():
print("R too small")
if R > self.halos.tab_R.max():
print("R too big")
xi_dd = self.spline_cf_mm(z)(np.log(R))
#if term == 'ii':
return bHII * bbar * xi_dd
#elif term == 'id':
# return bHII * bbar * xi_dd
#else:
# raise NotImplemented('help!')
def _K(self, zeta):
return erfinv(1. - (1. / zeta))
def _growth_factor(self, z):
return np.interp(z, self.halos.tab_z, self.halos.tab_growth,
left=np.inf, right=np.inf)
def _delta_c(self, z):
return self.cosm.delta_c0 / self._growth_factor(z)
def _B0(self, z, ion=True):
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
iz = np.argmin(np.abs(z - self.halos.tab_z))
s = self.sigma
# Variance on scale of smallest collapsed object
sigma_min = self.sigma_min(z)
return self._delta_c(z) - root2 * self._K(zeta) * sigma_min
def _B1(self, z, ion=True):
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
iz = np.argmin(np.abs(z - self.halos.tab_z))
s = self.sigma #* self.halos.growth_factor[iz]
sigma_min = self.sigma_min(z)
return self._K(zeta) / np.sqrt(2. * sigma_min**2)
def _B(self, z, ion=True, zeta_min=None):
return self.LinearBarrier(z, ion, zeta_min=zeta_min)
def LinearBarrier(self, z, ion=True, zeta_min=None):
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
iz = np.argmin(np.abs(z - self.halos.tab_z))
s = self.sigma #/ self.halos.growth_factor[iz]
if zeta_min is None:
zeta_min = zeta
return self._B0(z, ion) + self._B1(z, ion) * s**2
def Barrier(self, z, ion=True, zeta_min=None):
"""
Full barrier.
"""
if ion:
zeta = self.zeta
else:
zeta = self.zeta_X
if zeta_min is None:
zeta_min = zeta
#iz = np.argmin(np.abs(z - self.halos.tab_z))
#D = self.halos.growth_factor[iz]
sigma_min = self.sigma_min(z)
#Mmin = self.Mmin(z)
#sigma_min = np.interp(Mmin, self.halos.M, self.halos.sigma_0)
delta = self._delta_c(z)
return delta - np.sqrt(2.) * self._K(zeta) \
* | np.sqrt(sigma_min**2 - self.sigma**2) | numpy.sqrt |
# This script is used to produce fitting and confidence interval for results in python.
#
#%%
import numpy as np
from scipy.optimize import curve_fit
from scipy.stats.distributions import t
#%%
from numpy import cos, sin, exp, pi, meshgrid
def KentFunc(Xin, theta, phi, psi, kappa, beta, A):
# Assume theta_z, phi_z are column vectors ([0,2 pi]), theta, phi, psi are
# rotational scaler ([0,2 pi])
theta_z, phi_z = Xin[:, 0], Xin[:, 1]
Z = np.array([cos(theta_z) * cos(phi_z), sin(theta_z) * cos(phi_z), sin(phi_z)]).T # M by 3 finally
coord = SO3(theta, phi, psi)
mu1 = coord[:, 0:1] # col vector
# mu23 = coord[:, 1:3] # 2 col vectors, 3 by 2
mu2 = coord[:, 1:2] # 2 col vectors, 3 by 2
mu3 = coord[:, 2:3] # 2 col vectors, 3 by 2
fval = A * exp(kappa * Z @ mu1 + beta * ((Z @ mu2) ** 2 - (Z @ mu3) ** 2))
return fval[:, 0]
def KentFunc_bsl(Xin, theta, phi, psi, kappa, beta, A, bsl):
# Assume theta_z, phi_z are column vectors ([0,2 pi]), theta, phi, psi are
# rotational scaler ([0,2 pi])
theta_z, phi_z = Xin[:, 0], Xin[:, 1]
Z = np.array([cos(theta_z) * cos(phi_z), sin(theta_z) * cos(phi_z), sin(phi_z)]).T # M by 3 finally
coord = SO3(theta, phi, psi)
mu1 = coord[:, 0:1] # col vector
# mu23 = coord[:, 1:3] # 2 col vectors, 3 by 2
mu2 = coord[:, 1:2] # 2 col vectors, 3 by 2
mu3 = coord[:, 2:3] # 2 col vectors, 3 by 2
fval = A * exp(kappa * Z @ mu1 + beta * ((Z @ mu2) ** 2 - (Z @ mu3) ** 2)) + bsl
return fval[:, 0]
def SO3(theta, phi, psi):
orig = np.array([[cos(theta)*cos(phi), sin(theta)*cos(phi), sin(phi)],
[-sin(theta) , cos(theta) , 0],
[cos(theta)*sin(phi), sin(theta)*sin(phi), -cos(phi)]]).T
Rot23 = np.array([[1, 0, 0],
[0, cos(psi), sin(psi)],
[0, -sin(psi), cos(psi)]])
coord = orig @ Rot23
return coord
#%%
def fit_Kent(theta_arr, phi_arr, act_map):
phi_grid, theta_grid = meshgrid(phi_arr, theta_arr)
Xin = np.array([theta_grid.flatten(), phi_grid.flatten()]).T
fval = act_map.flatten()
try: # avoid fitting failure to crash the whole thing.
param, pcov = curve_fit(KentFunc, Xin, fval,
p0=[0, 0, pi / 2, 0.1, 0.1, 0.1],
bounds=([-pi, -pi / 2, 0, 0, 0, 0],
[pi, pi / 2, pi, np.inf, np.inf, np.inf]))
sigmas = np.diag(pcov) ** 0.5
return param, sigmas
except RuntimeError as err:
print(type(err))
print(err)
return np.ones(6)*np.nan, np.ones(6)*np.nan
def fit_Kent_bsl(theta_arr, phi_arr, act_map):
phi_grid, theta_grid = meshgrid(phi_arr, theta_arr)
Xin = np.array([theta_grid.flatten(), phi_grid.flatten()]).T
fval = act_map.flatten()
try: # avoid fitting failure to crash the whole thing.
param, pcov = curve_fit(KentFunc_bsl, Xin, fval,
p0=[0, 0, pi / 2, 0.1, 0.1, 0.1, 0.001],
bounds=([-pi, -pi / 2, 0, 0, 0, 0, 0],
[pi, pi / 2, pi, np.inf, np.inf, np.inf, np.inf]))
sigmas = np.diag(pcov) ** 0.5
return param, sigmas
except RuntimeError as err:
print(type(err))
print(err)
return np.ones(7)*np.nan, np.ones(7)*np.nan
#%
def fit_stats(act_map, param, func=KentFunc):
"""Generate fitting statistics from scipy's curve fitting"""
phi_grid, theta_grid = meshgrid(phi_arr, theta_arr)
Xin = np.array([theta_grid.flatten(), phi_grid.flatten()]).T
fval = act_map.flatten()
fpred = func(Xin, *param) # KentFunc
res = fval - fpred
rsquare = 1 - (res**2).mean() / fval.var()
return res.reshape(act_map.shape), rsquare
#%% Testing the fitting functionalilty
ang_step = 18
theta_arr = np.arange(-90, 90.1, ang_step) / 180 * pi
phi_arr = np.arange(-90, 90.1, ang_step) / 180 * pi
phi_grid, theta_grid = meshgrid(phi_arr, theta_arr)
Xin = np.array([theta_grid.flatten(), phi_grid.flatten()]).T
fval = KentFunc(Xin, *[0, 0, pi/2, 0.1, 0.1, 1])
param, pcov = curve_fit(KentFunc, Xin, fval, p0=[-1, 1, pi/2, 0.1, 0.1, 0.2])
# Note python fitting will treat each data point as separate. He cannot estimate CI like matlab.
fval = KentFunc_bsl(Xin, *[0, 0, pi/2, 0.1, 0.1, 1, 5])
param, pcov = curve_fit(KentFunc_bsl, Xin, fval, p0=[-1, 1, pi/2, 0.1, 0.1, 0.2,0.001])
#%%
fval = KentFunc_bsl(Xin, *[0, 0, pi/2, 0.1, 0.1, 1, 5])
#%% Testing the fitting functionalilty under noise
ang_step = 18
theta_arr = np.arange(-90, 90.1, ang_step) / 180 * pi
phi_arr = np.arange(-90, 90.1, ang_step) / 180 * pi
phi_grid, theta_grid = meshgrid(phi_arr, theta_arr)
Xin = np.array([theta_grid.flatten(), phi_grid.flatten()]).T
fval = KentFunc(Xin, *[1, 0, pi/2, 0.1, 0.1, 0.1]) + np.random.randn(Xin.shape[0]) * 0.01
param, pcov = curve_fit(KentFunc, Xin, fval,
p0=[0, 0, pi/2, 0.1, 0.1, 0.1],
bounds=([-pi, -pi/2, 0, -np.inf, 0, 0],
[ pi, pi/2, pi, np.inf, np.inf, np.inf]))
print(param)
print(pcov)
#%% Using lmfit package
import numpy as np
import lmfit # trying out another package.
x = np.linspace(0.3, 10, 100)
| np.random.seed(0) | numpy.random.seed |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 20:52:06 2020
@author: Nicolai
"""
from ITestbenchBase import ITestbenchBase
import numpy as np
import scipy.integrate as integrate
import time
import psutil
import gc
class CiPdeBase(ITestbenchBase):
"""
Abstract class that implements the ITestbenchBase interface.
It further provides functionality that is used by the CI solver.
Attributes
----------
opt_algo: IOptAlgoBase
implementation of an IOptAlgoBase for any optimisation algorithm
kernel: IKernelBase
KernelGauss or any other implementation of the IKernelBase
pop_history: list
population at every iteration during the optimisation
fit_history: list
fitness value at every iteration during the optimisation
cr_history: list
crossover probability at every iteration
f_history: list
scalar factor at every iteration
_nb: list
list of evaluation point (tupels) on the boundary
_nc: list
list of inner evaluation points stored as tupels
_weight_reference: float
denominator of weighting factor stored to only compute once
_lx: float
lower x value at of the domain
_ux: float
upper x limit of the domain
_xi: list
weighting factor for inner collocation points
_phi: list
weighting factor for boundary point
Methods
-------
pde_string(): string
getter for returning the _pde_string that holds a short description of the problem
exec_time(): float
getter for returning the execution time taken for solving the probem
mem_consumption(): int
getter for returning the memory consumption of the solver
exact(x): flaot
takes a numpy array, returns the function value of the exact solution
approx(x): float
takes a numpy array,returns the function value of the approximate solution
normL2(): float
returns the distance between the exact and the approximate solution
solve(): None
not implemented, must be overridden by the child class that is specific to a pde
nc_weight(xi): float
calculates the weighting factor for one specific xi from nc
fitness_func(kernels): float
objective function passed to the optimisation algorithm, not implemented, must be overridden by the child class that is specific to a pde
_ly(x): float
lower y boundary of the domain
_uy(x): float
upper y boundary of the domain
"""
def __init__(self, opt_algo, kernel, nb, nc):
self.opt_algo = opt_algo
self.kernel = kernel
self._pde_string = ""
self._exec_time = 0.0
self._mem_consumption = 0
self.pop_history = []
self.fit_history = []
self.cr_history = []
self.f_history = []
self.sol_kernel = np.array([])
self._nc = nc
self._nb = nb
# inner weightin factor reference term
temp_denominator = []
for xk in nc:
temp_xk_xj = []
for xj in nb:
temp_xk_xj.append(np.linalg.norm(np.array([xk[0], xk[1]])-np.array([xj[0], xj[1]])))
temp_denominator.append(min(temp_xk_xj))
self._weight_reference = max(temp_denominator)
self._xi = []
self._phi = []
self._kappa = 0
self._lx = None
self._ux = None
self._ly = None
self._uy = None
@property
def pde_string(self):
return self._pde_string
@property
def exec_time(self):
return self._exec_time
@property
def mem_consumption(self):
return self._mem_consumption
def exact(self, x): pass
def approx(self, x):
try:
return self.kernel.solution(self.sol_kernel, x)
except ValueError:
print("self.sol_kernel is empty, try to call solve() first")
def normL2(self):
difference_func = lambda x,y: \
(self.approx(np.array([x,y])) - self.exact(np.array([x,y]))) * \
(self.approx(np.array([x,y])) - self.exact(np.array([x,y])))
return np.sqrt(integrate.dblquad(difference_func, self._lx, self._ux, self._ly, self._uy)[0])
def fitness_func(self, kernels): pass
def nc_weight(self, xi):
temp_xi_xj = []
for xj in self._nb:
temp_xi_xj.append(np.linalg.norm(np.array([xi[0], xi[1]])- | np.array([xj[0], xj[1]]) | numpy.array |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
from __future__ import division
from itertools import chain, combinations
import warnings
from itertools import combinations_with_replacement as combinations_w_r
from distutils.version import LooseVersion
import numpy as np
from scipy import sparse
from scipy import stats
from scipy import optimize
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import string_types
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import boxcox, nanpercentile, nanmedian
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import (check_is_fitted, check_random_state,
FLOAT_DTYPES)
from ._encoders import OneHotEncoder
BOUNDS_THRESHOLD = 1e-7
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'QuantileTransformer',
'PowerTransformer',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'quantile_transform',
'power_transform',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.nanmean(X, axis)
if with_std:
scale_ = np.nanstd(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = np.nanmean(Xr, axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = np.nanmean(Xr, axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>>
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 1))
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[0. 0. ]
[0.25 0.25]
[0.5 0.5 ]
[1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[1.5 0. ]]
See also
--------
minmax_scale: Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
data_min = np.nanmin(X, axis=0)
data_max = np.nanmax(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual features do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
scale_ : ndarray or None, shape (n_features,)
Per feature relative scaling of the data. Equal to ``None`` when
``with_std=False``.
.. versionadded:: 0.17
*scale_*
mean_ : ndarray or None, shape (n_features,)
The mean value for each feature in the training set.
Equal to ``None`` when ``with_mean=False``.
var_ : ndarray or None, shape (n_features,)
The variance for each feature in the training set. Used to compute
`scale_`. Equal to ``None`` when ``with_std=False``.
n_samples_seen_ : int or array, shape (n_features,)
The number of samples processed by the estimator for each feature.
If there are not missing samples, the ``n_samples_seen`` will be an
integer, otherwise it will be an array.
Will be reset on new calls to fit, but increments across
``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler
>>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]
>>> scaler = StandardScaler()
>>> print(scaler.fit(data))
StandardScaler(copy=True, with_mean=True, with_std=True)
>>> print(scaler.mean_)
[0.5 0.5]
>>> print(scaler.transform(data))
[[-1. -1.]
[-1. -1.]
[ 1. 1.]
[ 1. 1.]]
>>> print(scaler.transform([[2, 2]]))
[[3. 3.]]
See also
--------
scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, <NAME>., <NAME>, and <NAME>. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
# if n_samples_seen_ is an integer (i.e. no missing values), we need to
# transform it to a NumPy array of shape (n_features,) required by
# incr_mean_variance_axis and _incremental_variance_axis
if (hasattr(self, 'n_samples_seen_') and
isinstance(self.n_samples_seen_, (int, np.integer))):
self.n_samples_seen_ = np.repeat(self.n_samples_seen_,
X.shape[1]).astype(np.int64)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
sparse_constructor = (sparse.csr_matrix
if X.format == 'csr' else sparse.csc_matrix)
counts_nan = sparse_constructor(
(np.isnan(X.data), X.indices, X.indptr),
shape=X.shape).sum(axis=0).A.ravel()
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = (X.shape[0] -
counts_nan).astype(np.int64)
if self.with_std:
# First pass
if not hasattr(self, 'scale_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
if hasattr(self, 'scale_'):
self.n_samples_seen_ += X.shape[0] - counts_nan
else:
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = np.zeros(X.shape[1], dtype=np.int64)
# First pass
if not hasattr(self, 'scale_'):
self.mean_ = .0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
if not self.with_mean and not self.with_std:
self.mean_ = None
self.var_ = None
self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0)
else:
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
# for backward-compatibility, reduce n_samples_seen_ to an integer
# if the number of samples is the same for each feature (i.e. no
# missing values)
if np.ptp(self.n_samples_seen_) == 0:
self.n_samples_seen_ = self.n_samples_seen_[0]
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y='deprecated', copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
copy : bool, optional (default: None)
Copy the input X or not.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Transformed array.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import MaxAbsScaler
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = MaxAbsScaler().fit(X)
>>> transformer
MaxAbsScaler(copy=True)
>>> transformer.transform(X)
array([[ 0.5, -1. , 1. ],
[ 1. , 0. , 0. ],
[ 0. , 1. , -0.5]])
See also
--------
maxabs_scale: Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0, ignore_nan=True)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.nanmax(np.abs(X), axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature by
computing the relevant statistics on the samples in the training
set. Median and interquartile range are then stored to be used on
later data using the ``transform`` method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This will cause ``transform`` to raise an exception when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
Examples
--------
>>> from sklearn.preprocessing import RobustScaler
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> transformer = RobustScaler().fit(X)
>>> transformer
RobustScaler(copy=True, quantile_range=(25.0, 75.0), with_centering=True,
with_scaling=True)
>>> transformer.transform(X)
array([[ 0. , -2. , 0. ],
[-1. , 0. , 0.4],
[ 1. , 0. , -1.6]])
See also
--------
robust_scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
# at fit, convert sparse matrices to csc for optimized computation of
# the quantiles
X = check_array(X, accept_sparse='csc', copy=self.copy, estimator=self,
dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
if self.with_centering:
if sparse.issparse(X):
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
self.center_ = nanmedian(X, axis=0)
else:
self.center_ = None
if self.with_scaling:
quantiles = []
for feature_idx in range(X.shape[1]):
if sparse.issparse(X):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
column_data = np.zeros(shape=X.shape[0], dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
else:
column_data = X[:, feature_idx]
quantiles.append(nanpercentile(column_data,
self.quantile_range))
quantiles = np.transpose(quantiles)
self.scale_ = quantiles[1] - quantiles[0]
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
else:
self.scale_ = None
return self
def transform(self, X):
"""Center and scale the data.
Parameters
----------
X : {array-like, sparse matrix}
The data used to scale along the specified axis.
"""
check_is_fitted(self, 'center_', 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
check_is_fitted(self, 'center_', 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
Returns
-------
self : instance
"""
n_samples, n_features = check_array(X, accept_sparse=True).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X):
"""Transform data to polynomial features
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
The data to transform, row by row.
Sparse input should preferably be in CSC format.
Returns
-------
XP : np.ndarray or CSC sparse matrix, shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES, accept_sparse='csc')
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
if sparse.isspmatrix(X):
columns = []
for comb in combinations:
if comb:
out_col = 1
for col_idx in comb:
out_col = X[:, col_idx].multiply(out_col)
columns.append(out_col)
else:
columns.append(sparse.csc_matrix(np.ones((X.shape[0], 1))))
XP = sparse.hstack(columns, dtype=X.dtype).tocsc()
else:
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
for i, comb in enumerate(combinations):
XP[:, i] = X[:, comb].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Examples
--------
>>> from sklearn.preprocessing import Normalizer
>>> X = [[4, 1, 2, 2],
... [1, 3, 9, 3],
... [5, 7, 5, 1]]
>>> transformer = Normalizer().fit(X) # fit does nothing.
>>> transformer
Normalizer(copy=True, norm='l2')
>>> transformer.transform(X)
array([[0.8, 0.2, 0.4, 0.4],
[0.1, 0.3, 0.9, 0.3],
[0.5, 0.7, 0.5, 0.1]])
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
normalize: Equivalent function without the estimator API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Examples
--------
>>> from sklearn.preprocessing import Binarizer
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = Binarizer().fit(X) # fit does nothing.
>>> transformer
Binarizer(copy=True, threshold=0.0)
>>> transformer.transform(X)
array([[1., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the estimator API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
Examples
--------
>>> from sklearn.preprocessing import KernelCenterer
>>> from sklearn.metrics.pairwise import pairwise_kernels
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> K = pairwise_kernels(X, metric='linear')
>>> K
array([[ 9., 2., -2.],
[ 2., 14., -13.],
[ -2., -13., 21.]])
>>> transformer = KernelCenterer().fit(K)
>>> transformer
KernelCenterer()
>>> transformer.transform(K)
array([[ 5., 0., -5.],
[ 0., 14., -14.],
[ -5., -14., 19.]])
"""
def __init__(self):
# Needed for backported inspect.signature compatibility with PyPy
pass
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y='deprecated', copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[1., 0., 1.],
[1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.full((n_samples, 1), value), X))
class QuantileTransformer(BaseEstimator, TransformerMixin):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X) # doctest: +ELLIPSIS
array([...])
See also
--------
quantile_transform : Equivalent function without the estimator API.
PowerTransformer : Perform mapping to a normal distribution using a power
transform.
StandardScaler : Perform standardization that is faster, but less robust
to outliers.
RobustScaler : Perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, n_quantiles=1000, output_distribution='uniform',
ignore_implicit_zeros=False, subsample=int(1e5),
random_state=None, copy=True):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn("'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect.")
n_samples, n_features = X.shape
references = self.references_ * 100
# numpy < 1.9 bug: np.percentile 2nd argument needs to be a list
if LooseVersion(np.__version__) < '1.9':
references = references.tolist()
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(n_samples,
size=self.subsample,
replace=False)
col = col.take(subsample_idx, mode='clip')
self.quantiles_.append(nanpercentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix CSC, shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative.
"""
n_samples, n_features = X.shape
references = self.references_ * 100
# numpy < 1.9 bug: np.percentile 2nd argument needs to be a list
if LooseVersion(np.__version__) < '1.9':
references = references.tolist()
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = (self.subsample * len(column_nnz_data) //
n_samples)
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample,
dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data),
dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(nanpercentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
self : object
"""
if self.n_quantiles <= 0:
raise ValueError("Invalid value for 'n_quantiles': %d. "
"The number of quantiles must be at least one."
% self.n_quantiles)
if self.subsample <= 0:
raise ValueError("Invalid value for 'subsample': %d. "
"The number of subsamples must be at least one."
% self.subsample)
if self.n_quantiles > self.subsample:
raise ValueError("The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles,
self.subsample))
X = self._check_inputs(X)
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles,
endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature"""
if self.output_distribution == 'normal':
output_distribution = 'norm'
else:
output_distribution = self.output_distribution
output_distribution = getattr(stats, output_distribution)
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform PDF
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
X_col = output_distribution.cdf(X_col)
# find index for lower and higher bounds
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
lower_bounds_idx = (X_col - BOUNDS_THRESHOLD <
lower_bound_x)
upper_bounds_idx = (X_col + BOUNDS_THRESHOLD >
upper_bound_x)
isfinite_mask = ~np.isnan(X_col)
X_col_finite = X_col[isfinite_mask]
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do ascending, and the
# lower for descending). We take the mean of these two
X_col[isfinite_mask] = .5 * (
np.interp(X_col_finite, quantiles, self.references_)
- np.interp(-X_col_finite, -quantiles[::-1],
-self.references_[::-1]))
else:
X_col[isfinite_mask] = np.interp(X_col_finite,
self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output PDF
if not inverse:
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
X_col = output_distribution.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = output_distribution.ppf(BOUNDS_THRESHOLD -
np.spacing(1))
clip_max = output_distribution.ppf(1 - (BOUNDS_THRESHOLD -
np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
return X_col
def _check_inputs(self, X, accept_sparse_negative=False):
"""Check inputs before fit and transform"""
X = check_array(X, accept_sparse='csc', copy=self.copy,
dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
if (not accept_sparse_negative and not self.ignore_implicit_zeros
and (sparse.issparse(X) and np.any(X.data < 0))):
raise ValueError('QuantileTransformer only accepts'
' non-negative sparse matrices.')
# check the output PDF
if self.output_distribution not in ('normal', 'uniform'):
raise ValueError("'output_distribution' has to be either 'normal'"
" or 'uniform'. Got '{}' instead.".format(
self.output_distribution))
return X
def _check_is_fitted(self, X):
"""Check the inputs before transforming"""
check_is_fitted(self, 'quantiles_')
# check that the dimension of X are adequate with the fitted data
if X.shape[1] != self.quantiles_.shape[1]:
raise ValueError('X does not have the same number of features as'
' the previously fitted data. Got {} instead of'
' {}.'.format(X.shape[1],
self.quantiles_.shape[1]))
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, optional (default=False)
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray, shape (n_samples, n_features)
Projected data
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx],
X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx],
inverse)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx],
inverse)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X)
self._check_is_fitted(X)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X, accept_sparse_negative=True)
self._check_is_fitted(X)
return self._transform(X, inverse=True)
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=False):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0)
... # doctest: +ELLIPSIS
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
power_transform : Maps data to a normal distribution using a
power transformation.
scale : Performs standardization that is faster, but less robust
to outliers.
robust_scale : Performs robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
class PowerTransformer(BaseEstimator, TransformerMixin):
"""Apply a power transform featurewise to make data more Gaussian-like.
Power transforms are a family of parametric, monotonic transformations
that are applied to make data more Gaussian-like. This is useful for
modeling issues related to heteroscedasticity (non-constant variance),
or other situations where normality is desired.
Currently, PowerTransformer supports the Box-Cox transform and the
Yeo-Johson transform. The optimal parameter for stabilizing variance and
minimizing skewness is estimated through maximum likelihood.
Box-Cox requires input data to be strictly positive, while Yeo-Johnson
supports both positive or negative data.
By default, zero-mean, unit-variance normalization is applied to the
transformed data.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
method : str, (default='yeo-johnson')
The power transform method. Available methods are:
- 'yeo-johnson' [1]_, works with positive and negative values
- 'box-cox' [2]_, only works with strictly positive values
standardize : boolean, default=True
Set to True to apply zero-mean, unit-variance normalization to the
transformed output.
copy : boolean, optional, default=True
Set to False to perform inplace computation during transformation.
Attributes
----------
lambdas_ : array of float, shape (n_features,)
The parameters of the power transformation for the selected features.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import PowerTransformer
>>> pt = PowerTransformer()
>>> data = [[1, 2], [3, 2], [4, 5]]
>>> print(pt.fit(data))
PowerTransformer(copy=True, method='yeo-johnson', standardize=True)
>>> print(pt.lambdas_)
[1.38668178e+00 5.93926346e-09]
>>> print(pt.transform(data))
[[-1.31616039 -0.70710678]
[ 0.20998268 -0.70710678]
[ 1.1061777 1.41421356]]
See also
--------
power_transform : Equivalent function without the estimator API.
QuantileTransformer : Maps data to a standard normal distribution with
the parameter `output_distribution='normal'`.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
References
----------
.. [1] <NAME> and <NAME>, "A new family of power transformations to
improve normality or symmetry." Biometrika, 87(4), pp.954-959,
(2000).
.. [2] <NAME> and <NAME>, "An Analysis of Transformations", Journal
of the Royal Statistical Society B, 26, 211-252 (1964).
"""
def __init__(self, method='yeo-johnson', standardize=True, copy=True):
self.method = method
self.standardize = standardize
self.copy = copy
def fit(self, X, y=None):
"""Estimate the optimal parameter lambda for each feature.
The optimal lambda parameter for minimizing skewness is estimated on
each feature independently using maximum likelihood.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data used to estimate the optimal transformation parameters.
y : Ignored
Returns
-------
self : object
"""
self._fit(X, y=y, force_transform=False)
return self
def fit_transform(self, X, y=None):
return self._fit(X, y, force_transform=True)
def _fit(self, X, y=None, force_transform=False):
X = self._check_input(X, check_positive=True, check_method=True)
if not self.copy and not force_transform: # if call from fit()
X = X.copy() # force copy so that fit does not change X inplace
optim_function = {'box-cox': self._box_cox_optimize,
'yeo-johnson': self._yeo_johnson_optimize
}[self.method]
self.lambdas_ = []
for col in X.T:
with | np.errstate(invalid='ignore') | numpy.errstate |
import numpy as np
from taps.models.models import Model
class MullerBrown(Model):
""" Muller Brown Potential
.. math::
\\begin{equation}
V\\left(x,y\\right) =
\\sum_{\\mu=1}^{4}{A_\\mu e^{a_\\mu \\left(x-x_\\mu^0\\right)^2
+ b_\\mu \\left(x-x_\\mu^0\\right) \\left(y-y_\\mu^0\\right)
+ c_\\mu\\left(y-y_\\mu^0\\right)^2}}
\\end{equation}
* Initial position = (-0.55822365, 1.44172582)
* Final position = (0.6234994, 0.02803776)
Parameters
----------
A = np.array([-200, -100, -170, 15])
a = np.array([-1, -1, -6.5, 0.7])
b = np.array([0, 0, 11, 0.6])
c = np.array([-10, -10, -6.5, 0.7])
x0 = np.array([1, 0, -0.5, -1])
y0 = np.array([0, 0.5, 1.5, 1])
potential_unit = 'unitless'
Example
-------
>>> import numpy as np
>>> N = 300
>>> x = np.linspace(-0.55822365, 0.6234994, N)
>>> y = np.linspace(1.44172582, 0.02803776, N)
>>> paths.coords = np.array([x, y])
"""
implemented_properties = {'potential', 'gradients', 'hessian'}
A = np.array([-200, -100, -170, 15]) / 100
a = np.array([-1, -1, -6.5, 0.7])
b = np.array([0, 0, 11, 0.6])
c = np.array([-10, -10, -6.5, 0.7])
x0 = np.array([1, 0, -0.5, -1])
y0 = np.array([0, 0.5, 1.5, 1])
potential_unit = 'unitless'
def calculate(self, paths, coords, properties=['potential'],
**kwargs):
"""
x : N shape array
y : N shape array
return D x N
"""
if not isinstance(coords, np.ndarray):
coords = coords.coords
if len(coords.shape) == 1:
coords = coords[:, np.newaxis]
x, y = coords
A, a, b, c, x0, y0 = self.A, self.a, self.b, self.c, self.x0, self.y0
x_x0 = (x[:, np.newaxis] - x0) # N x 4
y_y0 = (y[:, np.newaxis] - y0) # N x 4
Vk = A * | np.exp(a * x_x0 ** 2 + b * x_x0 * y_y0 + c * y_y0 ** 2) | numpy.exp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.