input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<reponame>Cyberface/nrutils_dev<filename>nrutils/handlers/bam.py
#
from nrutils.core.basics import *
from glob import glob as ls
from os.path import getctime
from numpy import array,cross,zeros,dot,abs,sqrt,sign
from numpy.linalg import inv, norm
from numpy import sum as asum
# Determine whether the folder containing a metadta file is valid: can it be used to reference waveform data?
def validate( metadata_file_location, config = None ):
#
from os.path import isfile as exist
from os.path import abspath,join,basename
from os import pardir
#
run_dir = abspath( join( metadata_file_location, pardir ) )+'/'
# The folder is valid if there is l=m=2 mode data in the following dirs
status = len( ls( run_dir + '/Psi4ModeDecomp/psi3col*l2.m2.gz' ) ) > 0
# ignore directories with certain tags in filename
ignore_tags = ['backup','old']
for tag in ignore_tags:
status = status and not ( tag in run_dir )
#
a = basename(metadata_file_location).split(config.metadata_id)[0]
b = parent(metadata_file_location)
status = status and ( a in b )
#
return status
# Learn the metadta (file) for this type of NR waveform
def learn_metadata( metadata_file_location ):
# Try to load the related par file as well as the metadata file
par_file_location = metadata_file_location[:-3]+'par'
raw_metadata = smart_object( [metadata_file_location,par_file_location] )
# shortand
y = raw_metadata
# # Useful for debugging -- show what's in y
# y.show()
#
standard_metadata = smart_object()
# shorthand
x = standard_metadata
# Keep NOTE of important information
x.note = ''
# Creation date of metadata file
x.date_number = getctime( metadata_file_location )
'''
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Calculate derivative quantities %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
'''
# Masses
x.m1 = y.mass1
x.m2 = y.mass2
# NOTE that some bbh files may not have after_junkradiation_spin data (i.e. empty). In these cases we will take the initial spin data
S1 = array( [ y.after_junkradiation_spin1x, y.after_junkradiation_spin1y, y.after_junkradiation_spin1z ] )
S2 = array( [ y.after_junkradiation_spin2x, y.after_junkradiation_spin2y, y.after_junkradiation_spin2z ] )
#%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%#
# NOTE that sometimes the afterjunk spins may not be stored correctely or at all in the bbh files. Therefore an additional validation step is needed here.
S1bool = S1.astype(list).astype(bool)
S2bool = S2.astype(list).astype(bool)
x.isafterjunk = S1bool.all() and S2bool.all()
#%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%@%%#
# If the data is to be stored using afterjunk parameters:
if x.isafterjunk:
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# Use afterjunk information #
msg = cyan('Initial parameters corresponding to the bbh file\'s aftrejunktime will be used to populate metadata.')
alert(msg,'bam.py')
x.note += msg
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# find puncture data locations
puncture_data_1_location = ls( parent( metadata_file_location )+ 'moving_puncture_integrate1*' )[0]
puncture_data_2_location = ls( parent( metadata_file_location )+ 'moving_puncture_integrate2*' )[0]
# load puncture data
puncture_data_1,_ = smart_load( puncture_data_1_location )
puncture_data_2,_ = smart_load( puncture_data_2_location )
# Mask away the initial junk region using the after-junk time given in the bbh metadata
after_junkradiation_time = y.after_junkradiation_time
after_junkradiation_mask = puncture_data_1[:,-1] > after_junkradiation_time
puncture_data_1 = puncture_data_1[ after_junkradiation_mask, : ]
puncture_data_2 = puncture_data_2[ after_junkradiation_mask, : ]
R1 = array( [ puncture_data_1[0,0],puncture_data_1[0,1],puncture_data_1[0,2], ] )
R2 = array( [ puncture_data_2[0,0],puncture_data_2[0,1],puncture_data_2[0,2], ] )
# NOTE that here the shift is actually contained within puncture_data, and NOTE that the shift is -1 times the velocity
P1 = x.m1 * -array( [ puncture_data_1[0,3],puncture_data_1[0,4],puncture_data_1[0,5], ] )
P2 = x.m2 * -array( [ puncture_data_2[0,3],puncture_data_2[0,4],puncture_data_2[0,5], ] )
else:
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# Use initial data information #
msg = cyan('Warning:')+yellow(' The afterjunk spins appear to have been stored incorrectly. All parameters according to the initial data (as stored in the bbh files) will be stored. ')
warning(msg,'bam.py')
x.note += msg
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# Spins
S1 = array( [ y.initial_bh_spin1x, y.initial_bh_spin1y, y.initial_bh_spin1z ] )
S2 = array( [ y.initial_bh_spin2x, y.initial_bh_spin2y, y.initial_bh_spin2z ] )
# Momenta
P1 = array( [ y.initial_bh_momentum1x, y.initial_bh_momentum1y, y.initial_bh_momentum1z ] )
P2 = array( [ y.initial_bh_momentum2x, y.initial_bh_momentum2y, y.initial_bh_momentum2z ] )
# positions
R1 = array( [ y.initial_bh_position1x, y.initial_bh_position1y, y.initial_bh_position1z ] )
R2 = array( [ y.initial_bh_position2x, y.initial_bh_position2y, y.initial_bh_position2z ] )
# Estimate the component angular momenta
try:
L1 = cross(R1,P1)
L2 = cross(R2,P2)
except:
error('There was an insurmountable problem encountered when trying to load initial binary configuration. For example, %s. The guy at the soup shop says "No soup for you!!"'%red('P1 = '+str(P1)))
# Extract and store the initial adm energy
x.madm = y.initial_ADM_energy
# Store the initial linear momenta
x.P1 = P1; x.P2 = P2
x.S1 = S1; x.S2 = S2
# Estimate the initial biary separation (afterjunk), and warn the user if this value is significantly different than the bbh file
x.b = norm(R1-R2) # float( y.initial_separation )
if abs( y.initial_separation - norm(R1-R2) ) > 1e-1:
msg = cyan('Warning:')+' The estimated after junk binary separation is significantly different than the value stored in the bbh file: '+yellow('x from calculation = %f, x from bbh file=%f' % (norm(R1-R2),y.initial_separation) )+'. The user should understand whether this is an erorr or not.'
x.note += msg
warning(msg,'bam.py')
# Let the use know that the binary separation is possibly bad
if x.b<4:
msg = cyan('Warning:')+' The estimated initial binary separation is very small. This may be due to an error in the puncture data. You may wish to use the initial binary separation from the bbh file which is %f'%y.initial_separation+'. '
warning(msg,'bam.py')
x.note += msg
#
x.R1 = R1; x.R2 = R2
#
x.L1 = L1; x.L2 = L2
#
x.valid = True
# Load irriducible mass data
irr_mass_file_list = ls(parent(metadata_file_location)+'hmass_2*gz')
if len(irr_mass_file_list)>0:
irr_mass_file = irr_mass_file_list[0]
irr_mass_data,mass_status = smart_load(irr_mass_file)
else:
mass_status = False
# Load spin data
spin_file_list = ls(parent(metadata_file_location)+'hspin_2*gz')
if len(spin_file_list)>0:
spin_file = spin_file_list[0]
spin_data,spin_status = smart_load(spin_file)
else:
spin_status = False
# Estimate final mass and spin
if mass_status and spin_status:
Sf = spin_data[-1,1:]
irrMf = irr_mass_data[-1,1]
x.__irrMf__ = irrMf
irrMf_squared = irrMf**2
Sf_squared = norm(Sf)**2
x.mf = sqrt( irrMf_squared + Sf_squared / (4*irrMf_squared) ) / (x.m1+x.m2)
#
x.Sf = Sf
x.Xf = x.Sf/(x.mf*x.mf)
x.xf = sign(x.Sf[-1])*norm(x.Sf)/(x.mf*x.mf)
else:
from numpy import nan
x.Sf = nan*array([0.0,0.0,0.0])
x.Xf = nan*array([0.0,0.0,0.0])
x.mf = nan
x.xf = nan
#
return standard_metadata, raw_metadata
# There are instances when having the extraction radius rather than the extraction paramer is useful.
# Here we define a function which maps between extraction_parameter and extraction radius -- IF such
# a map can be constructed.
def extraction_map( this, # this may be an nrsc object or an gwylm object (it must have a raw_metadata attribute )
extraction_parameter ): # The extraction parameter that will be converted to radius
'''Given an extraction parameter, return an extraction radius'''
# NOTE that while some BAM runs have extraction radius information stored in the bbh file in various ways, this does not appear to the case for all simulations. The invariants_modes_r field appears to be more reliable.
if 'invariants_modes_r' in this.raw_metadata.__dict__:
_map_ = [ float(k) for k in this.raw_metadata.invariants_modes_r ]
elif 'extraction_radius' in this.raw_metadata.__dict__:
# We start from 1 not 0 here becuase the first element should be a string "finite-radius"
_map_ = [ float(k) for k in this.raw_metadata.extraction_radius[1:] ]
#
extraction_radius = _map_[ extraction_parameter-1 ]
return extraction_radius
# Estimate a good extraction radius and level for an input scentry object from the BAM catalog
def infer_default_level_and_extraction_parameter( this, # An scentry object
desired_exraction_radius=None, # (Optional) The desired extraction radius in M, where M is the initial ADM mass
verbose=None ): # Toggel to let the people know
'''Estimate a good extraction radius and level for an input scentry object from the BAM catalog'''
# NOTE that input must be scentry object
# Import useful things
from glob import glob
from numpy import array,argmin
# Handle the extraction radius input
# NOTE that the default value of 90 is chosen to ensure that there is always a ringdown
desired_exraction_radius = 90 if desired_exraction_radius is None else desired_exraction_radius
# Find all l=m=2 waveforms
search_string = this.simdir() + '/Psi4ModeDecomp/*l2.m2*.gz'
file_list = glob( search_string )
# For all results
exr,lev,rad = [],[],[]
for f in file_list:
# Split filename string to find level and extraction parameter
f.replace('//','/')
f = f.split('/')[-1]
parts = f.split('.') # e.g. "psi3col.r6.l6.l2.m2.gz".split('.')
exr_,lev_ = int(parts[1][-1]),int(parts[2][-1])
# Also get related extraction radius (M)
rad_ = extraction_map( this, exr_ )
# Append lists
exr.append(exr_);lev.append(lev_);rad.append(rad_)
# NOTE that we will use the extraction radius that is closest | |
0x0d, 2), (IDX_2, 0x0f, 2), (IDX_2, 0x09, 2), (IDX_2, 0x03, 2),
(IDX_2, 0x0d, 2), (IDX_2, 0x0a, 2), (IDX_8, 0x24, 8), (IDX_2, 0x08, 2), (IDX_3, 0x06, 3),
]
# --------------------------------------------------------------------------------------------------
if __name__ == "__main__":
s = Solver() # our z3 solver
x_vars, var2xy = { }, { }
# -------------------------------------------------------------------------
# Reconstruct equations
# -------------------------------------------------------------------------
print '[+] Reconstructing equations...'
for fam, rval, n in equations: # for each equation
eq = []
for i in range(n):
y, x = fam.pop(0), fam.pop(0) # get coordinates from the right family
var = 'x_%d_%d' % (y, x) # create 'x' variable
if var not in x_vars: # variable already exists?
x_vars[var] = BitVec(var, 32) # create a bitvector variable
var2xy[ x_vars[var] ] = (y, x) # bitvector --> coordinates
eq.append( x_vars[var] )
# add the equation to the constraint set
add = eq[0]
for i in range(1, n): add += eq[i]
s.add( add == rval )
print '[+]', add, '=', str(rval)
for i in range(0, n): # all variables must be different
for j in range(i+1, n):
s.add( And(eq[i] != eq[j]) )
for _, x in x_vars.iteritems(): # all variables are in range [1, 9]
s.add( And(x > 0, x <= 9) )
# -------------------------------------------------------------------------
# Solve equations
# -------------------------------------------------------------------------
print '[+] Solving equations...'
if s.check() == sat:
print '[+] There is a solution!'
m = s.model()
inp = [ [0 for y in range(20)] for x in range(20)]
for _, var in x_vars.iteritems():
y, x = var2xy[var]
inp[y][x] = m.evaluate(var).as_long() # extract solution
print '[+] Dumping array:'
for row in inp:
print ''.join( [str(r) for r in row])
print
print '[+] Packed array:',
pack = ''
for row in inp:
pack += ''.join( [str(r) for r in row])
print pack
else:
print '[+] No solution found :('
# --------------------------------------------------------------------------------------------------
'''
ispo@nogirl:~/ctf/2017/hitcon_ctf$ ./sakura_crack.py
[+] Reconstructing equations...
[+] x_1_6 + x_2_6 = 17
[+] x_1_7 + x_2_7 = 3
[+] x_2_1 + x_3_1 = 3
[+] x_2_2 + x_3_2 + x_4_2 = 24
[+] x_2_4 + x_3_4 = 4
[+] x_1_6 + x_1_7 = 11
[+] x_2_5 + x_3_5 + x_4_5 = 24
[+] x_2_8 + x_3_8 = 17
[+] x_2_9 + x_3_9 = 3
[+] x_2_1 + x_2_2 = 8
[+] x_2_4 + x_2_5 + x_2_6 + x_2_7 + x_2_8 + x_2_9 = 30
[+] x_3_3 + x_4_3 + x_5_3 = 23
[+] x_3_1 + x_3_2 + x_3_3 + x_3_4 + x_3_5 = 26
[+] x_4_6 + x_5_6 = 3
[+] x_3_8 + x_3_9 = 9
[+] x_4_2 + x_4_3 = 17
[+] x_4_5 + x_4_6 = 11
[+] x_5_4 + x_6_4 = 4
[+] x_5_7 + x_6_7 + x_7_7 = 6
[+] x_5_3 + x_5_4 = 9
[+] x_5_6 + x_5_7 = 3
[+] x_6_5 + x_7_5 + x_8_5 = 6
[+] x_6_8 + x_7_8 + x_8_8 = 7
[+] x_7_1 + x_8_1 = 4
[+] x_7_2 + x_8_2 = 16
[+] x_6_4 + x_6_5 = 4
[+] x_6_7 + x_6_8 = 3
[+] x_7_6 + x_8_6 = 16
[+] x_7_9 + x_8_9 = 3
[+] x_7_1 + x_7_2 = 10
[+] x_8_3 + x_9_3 = 3
[+] x_7_5 + x_7_6 + x_7_7 + x_7_8 + x_7_9 = 19
[+] x_8_4 + x_9_4 = 17
[+] x_8_1 + x_8_2 + x_8_3 + x_8_4 + x_8_5 + x_8_6 = 30
[+] x_8_8 + x_8_9 = 3
[+] x_9_3 + x_9_4 = 10
[+] x_1_12 + x_2_12 + x_3_12 + x_4_12 + x_5_12 = 16
[+] x_1_13 + x_2_13 = 4
[+] x_1_18 + x_2_18 = 17
[+] x_1_19 + x_2_19 = 4
[+] x_1_12 + x_1_13 = 5
[+] x_2_14 + x_3_14 = 17
[+] x_2_16 + x_3_16 + x_4_16 + x_5_16 = 10
[+] x_1_18 + x_1_19 = 10
[+] x_2_17 + x_3_17 + x_4_17 + x_5_17 + x_6_17 = 35
[+] x_2_12 + x_2_13 + x_2_14 = 17
[+] x_3_11 + x_4_11 + x_5_11 = 24
[+] x_2_16 + x_2_17 + x_2_18 + x_2_19 = 21
[+] x_3_15 + x_4_15 = 17
[+] x_3_11 + x_3_12 = 8
[+] x_3_14 + x_3_15 + x_3_16 + x_3_17 = 27
[+] x_4_13 + x_5_13 + x_6_13 + x_7_13 + x_8_13 = 16
[+] x_4_11 + x_4_12 + x_4_13 = 18
[+] x_4_15 + x_4_16 + x_4_17 = 15
[+] x_5_14 + x_6_14 + x_7_14 + x_8_14 = 30
[+] x_5_18 + x_6_18 + x_7_18 + x_8_18 + x_9_18 = 16
[+] x_5_19 + x_6_19 + x_7_19 = 6
[+] x_5_11 + x_5_12 + x_5_13 + x_5_14 = 17
[+] x_5_16 + x_5_17 + x_5_18 + x_5_19 = 17
[+] x_6_15 + x_7_15 = 17
[+] x_6_13 + x_6_14 + x_6_15 = 21
[+] x_6_17 + x_6_18 + x_6_19 = 13
[+] x_7_16 + x_8_16 = 3
[+] x_8_11 + x_9_11 = 16
[+] x_7_13 + x_7_14 + x_7_15 + x_7_16 = 21
[+] x_8_12 + x_9_12 = 3
[+] x_7_18 + x_7_19 = 8
[+] x_8_17 + x_9_17 = 16
[+] x_8_11 + x_8_12 + x_8_13 + x_8_14 = 18
[+] x_8_16 + x_8_17 + x_8_18 = 10
[+] x_9_11 + x_9_12 = 11
[+] x_9_17 + x_9_18 = 10
[+] x_11_4 + x_12_4 = 9
[+] x_11_5 + x_12_5 + x_13_5 + x_14_5 = 10
[+] x_11_7 + x_12_7 = 13
[+] x_11_8 + x_12_8 + x_13_8 + x_14_8 + x_15_8 + x_16_8 + x_17_8 + x_18_8 = 44
[+] x_12_2 + x_13_2 + x_14_2 + x_15_2 + x_16_2 + x_17_2 + x_18_2 + x_19_2 = 44
[+] x_11_4 + x_11_5 = 5
[+] x_12_3 + x_13_3 = 6
[+] x_11_7 + x_11_8 = 11
[+] x_12_6 + x_13_6 = 8
[+] x_12_9 + x_13_9 = 5
[+] x_12_2 + x_12_3 + x_12_4 + x_12_5 + x_12_6 + x_12_7 + x_12_8 + x_12_9 = 39
[+] x_13_1 + x_14_1 = 8
[+] x_13_1 + x_13_2 + x_13_3 = 8
[+] x_13_5 + x_13_6 = 4
[+] x_14_4 + x_15_4 = 15
[+] x_13_8 + x_13_9 = 10
[+] x_14_7 + x_15_7 = 9
[+] x_14_1 + x_14_2 = 13
[+] x_14_4 + x_14_5 = 9
[+] x_15_3 + x_16_3 = 11
[+] x_14_7 + x_14_8 = 14
[+] x_15_6 + x_16_6 = 13
[+] x_15_2 + x_15_3 + x_15_4 = 21
[+] x_15_6 + x_15_7 + x_15_8 = 8
[+] x_16_5 + x_17_5 + x_18_5 + x_19_5 = 20
[+] x_16_9 + x_17_9 = 14
[+] x_16_2 + x_16_3 = 12
[+] x_17_1 + x_18_1 = 5
[+] x_16_5 + x_16_6 = 9
[+] x_17_4 + x_18_4 = 10
[+] x_16_8 + x_16_9 = 14
[+] x_17_7 + x_18_7 = 14
[+] x_17_1 + x_17_2 = 12
[+] x_17_4 + x_17_5 = 13
[+] x_18_3 + x_19_3 = 9
[+] x_17_7 + x_17_8 + x_17_9 = 16
[+] x_18_6 + x_19_6 = 6
[+] x_18_1 + x_18_2 + x_18_3 + x_18_4 + x_18_5 + x_18_6 + x_18_7 + x_18_8 = 40
[+] x_19_2 + x_19_3 = 4
[+] x_19_5 + x_19_6 = 11
[+] x_11_12 + x_12_12 + x_13_12 + x_14_12 + x_15_12 = 22
[+] x_11_13 + x_12_13 = 16
[+] x_11_14 + x_12_14 = 7
[+] x_11_16 + x_12_16 + x_13_16 + x_14_16 = 10
[+] x_11_17 + x_12_17 = 11
[+] x_11_12 + x_11_13 + x_11_14 = 10
[+] x_11_16 + x_11_17 = 13
[+] x_12_15 + x_13_15 = 9
[+] x_12_18 + x_13_18 = 10
[+] x_12_19 + x_13_19 = 12
[+] x_12_12 + x_12_13 + x_12_14 + x_12_15 + x_12_16 + x_12_17 + x_12_18 + x_12_19 = 42
[+] x_13_11 + x_14_11 = 17
[+] x_13_11 + x_13_12 = 10
[+] x_13_15 + x_13_16 = 8
[+] x_13_18 + x_13_19 = 10
[+] x_14_17 + x_15_17 = 9
[+] x_14_11 + x_14_12 = 14
[+] x_15_13 + x_16_13 = 8
[+] x_14_16 + x_14_17 = 8
[+] x_15_18 + x_16_18 + x_17_18 + x_18_18 + x_19_18 = 26
[+] x_15_12 + x_15_13 = 8
[+] x_16_14 + x_17_14 + x_18_14 + x_19_14 = 14
[+] x_15_17 + x_15_18 = 9
[+] x_16_19 + x_17_19 = 6
[+] x_17_11 + x_18_11 = 15
[+] x_16_13 + x_16_14 = 7
[+] x_17_12 + x_18_12 = 8
[+] x_17_15 + x_18_15 = 14
[+] x_16_18 + x_16_19 = 6
[+] x_17_11 + x_17_12 = 13
[+] x_17_14 + x_17_15 = 15
[+] x_18_13 + x_19_13 = 9
[+] x_18_16 + x_19_16 = 3
[+] x_17_18 + x_17_19 = 13
[+] x_18_17 + x_19_17 = 10
[+] x_18_11 + x_18_12 + x_18_13 + x_18_14 + x_18_15 + x_18_16 + x_18_17 + x_18_18 = 36
[+] x_19_13 + x_19_14 = 8
[+] x_19_16 + x_19_17 + x_19_18 = 6
[+] Solving equations...
[+] There is a solution!
[+] Dumping array:
00000000000000000000
00000092000041000091
01703781920063804683
02961800810710983700
00890920000936091500
00081012000821602843
00003101200004980931
03700293410003792062
01928370120712801720
00019000000920000910
00000000000000000000
00001406500027104900
00418357920089641275
01250130730910053037
07607208600860002600
00948051200053000360
00570180860005200051
04804905380850690085
01786329400736152840
00310740000003501230
[+] Packed array: | |
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.nets import resnet_v1
import numpy as np
class MultiModal(object):
def __init__(self, mode, learning_rate=0.0001):
self.mode = mode
self.learning_rate = learning_rate
self.hidden_repr_size = 128
self.no_classes = 19
def modDrop(self, layer, is_training, p_mod=.9, keep_prob=.8):
'''
As in Neverova et al. 'ModDrop': std dropout + modality dropping on the input
'''
layer = slim.dropout(layer, keep_prob=keep_prob,
is_training=is_training)
on = tf.cast(tf.random_uniform([1]) - p_mod < 0, tf.float32)
return tf.cond(is_training, lambda: on * layer, lambda: layer)
def single_stream(self, images, modality, is_training, reuse=False):
with tf.variable_scope(modality, reuse=reuse):
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
_, end_points = resnet_v1.resnet_v1_50(
images, self.no_classes, is_training=is_training, reuse=reuse)
# last bottleneck before logits
net = end_points[modality + '/resnet_v1_50/block4']
if 'autoencoder' in self.mode:
return net
with tf.variable_scope(modality + '/resnet_v1_50', reuse=reuse):
bottleneck = slim.conv2d(net, self.hidden_repr_size, [
7, 7], padding='VALID', activation_fn=tf.nn.relu, scope='f_repr')
net = slim.conv2d(bottleneck, self.no_classes, [
1, 1], activation_fn=None, scope='_logits_')
if ('train_hallucination' in self.mode or 'test_disc' in self.mode or 'train_eccv' in self.mode):
return net, bottleneck
return net
def D(self, features, reuse=False):
with tf.variable_scope('discriminator', reuse=reuse):
with slim.arg_scope([slim.fully_connected], weights_initializer=tf.contrib.layers.xavier_initializer(), biases_initializer=tf.constant_initializer(0.0)):
net = slim.fully_connected(
features, 1024, activation_fn=tf.nn.relu, scope='disc_fc1')
# ~ if self.mode == 'train_hallucination_p2':
res = slim.fully_connected(
net, 1024, activation_fn=None, scope='disc_res1')
net = tf.nn.relu(res + net)
res = slim.fully_connected(
net, 1024, activation_fn=None, scope='disc_res2')
net = tf.nn.relu(res + net)
net = slim.fully_connected(
net, 2048, activation_fn=tf.nn.relu, scope='disc_fc2')
net = slim.fully_connected(
net, 3076, activation_fn=tf.nn.relu, scope='disc_fc3')
if self.mode == 'train_hallucination_p2':
net = slim.fully_connected(
net, self.no_classes + 1, activation_fn=None, scope='disc_prob')
elif self.mode == 'train_hallucination':
net = slim.fully_connected(
net, 1, activation_fn=tf.sigmoid, scope='disc_prob')
else:
print('Unrecognized mode')
return net
def decoder(self, features, is_training, reuse=False):
# input features from the resnet should be (batch_size, 7, 7, 2048)
with tf.variable_scope('decoder', reuse=reuse):
with slim.arg_scope([slim.conv2d_transpose], padding='SAME', activation_fn=None,
stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.relu, is_training=is_training):
# (batch_size, 14, 14, channels)
net = slim.conv2d_transpose(
features, 1024, [3, 3], scope='conv_transpose1')
net = slim.batch_norm(net, scope='bn1')
# (batch_size, 28, 28, channels)
net = slim.conv2d_transpose(
net, 512, [3, 3], scope='conv_transpose2')
net = slim.batch_norm(net, scope='bn2')
# (batch_size, 56, 56, channels)
net = slim.conv2d_transpose(
net, 256, [5, 5], scope='conv_transpose3')
net = slim.batch_norm(net, scope='bn3')
# (batch_size, 112, 112, channels)
net = slim.conv2d_transpose(
net, 128, [5, 5], scope='conv_transpose4')
net = slim.batch_norm(net, scope='bn4')
net = slim.conv2d_transpose(net, 3, [
5, 5], activation_fn=tf.nn.tanh, scope='conv_transpose_out') # (batch_size, 224, 224, 3)
# normalize output
RGB_MEAN = tf.constant([123.68, 116.779, 103.939],
dtype=tf.float32, name='rgb_mean')
net = 255 * net - RGB_MEAN
return net
def build_model(self):
if '_rgb' in self.mode or '_depth' in self.mode:
modality = self.mode.split('_')[-1]
self.images = tf.placeholder(
tf.float32, [None, 224, 224, 3], modality + '_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.logits = self.single_stream(
self.images, modality=modality, is_training=self.is_training)
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
if 'train_' in self.mode:
# training stuff
t_vars = tf.trainable_variables()
train_vars = t_vars
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=tf.one_hot(self.labels, self.no_classes)))
gradients = tf.gradients(self.loss, train_vars)
gradients = list(zip(gradients, train_vars))
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
# ~ self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.train_op = self.optimizer.apply_gradients(
grads_and_vars=gradients)
# summary stuff
loss_summary = tf.summary.scalar(
'classification_loss', self.loss)
accuracy_summary = tf.summary.scalar('accuracy', self.accuracy)
self.summary_op = tf.summary.merge(
[loss_summary, accuracy_summary])
elif 'train_double_stream' in self.mode:
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
if self.mode == 'train_double_stream_moddrop':
self.depth_images = self.modDrop(
self.depth_images, is_training=self.is_training)
self.rgb_images = self.modDrop(
self.rgb_images, is_training=self.is_training)
self.depth_logits = self.single_stream(
self.depth_images, modality='depth', is_training=self.is_training)
self.rgb_logits = self.single_stream(
self.rgb_images, modality='rgb', is_training=self.is_training)
self.logits = (self.depth_logits + self.rgb_logits) / 2.
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
# training stuff
t_vars = tf.trainable_variables()
train_vars = t_vars
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=tf.one_hot(self.labels, self.no_classes)))
gradients = tf.gradients(self.loss, train_vars)
gradients = list(zip(gradients, train_vars))
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
# ~ self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.train_op = self.optimizer.apply_gradients(
grads_and_vars=gradients)
# summary stuff
loss_summary = tf.summary.scalar('classification_loss', self.loss)
accuracy_summary = tf.summary.scalar('accuracy', self.accuracy)
self.summary_op = tf.summary.merge(
[loss_summary, accuracy_summary])
elif self.mode == 'test_ensemble_baseline':
# not used, just to recycle eval function
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.rgb1_logits = self.single_stream(
self.rgb_images, modality='rgb1', is_training=self.is_training)
self.rgb_logits = self.single_stream(
self.rgb_images, modality='rgb', is_training=self.is_training)
self.logits = (self.rgb1_logits + self.rgb_logits) / 2.
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
elif 'train_hallucination' in self.mode:
# depth & hall streams
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.depth_logits, self.depth_features = self.single_stream(
self.depth_images, modality='depth', is_training=self.is_training)
self.hall_logits, self.hall_features = self.single_stream(
self.rgb_images, modality='hall', is_training=self.is_training)
# overall acc_hall
self.pred = tf.argmax(tf.squeeze(self.hall_logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
# ~ #hall_acc
# ~ self.hall_pred = tf.argmax(tf.squeeze(self.hall_logits), 1)
# ~ self.hall_correct_pred = tf.equal(self.hall_pred, self.labels)
# ~ self.hall_accuracy = tf.reduce_mean(tf.cast(self.hall_correct_pred, tf.float32))
# ~ #depth_acc
# ~ self.depth_pred = tf.argmax(tf.squeeze(self.depth_logits), 1)
# ~ self.depth_correct_pred = tf.equal(self.depth_pred, self.labels)
# ~ self.depth_accuracy = tf.reduce_mean(tf.cast(self.depth_correct_pred, tf.float32))
# discriminator
self.logits_real = self.D(self.depth_features, reuse=False)
self.logits_fake = self.D(self.hall_features, reuse=True)
# losses
if self.mode == 'train_hallucination':
self.d_loss_real = tf.reduce_mean(
tf.square(self.logits_real - tf.ones_like(self.logits_real)))
self.d_loss_fake = tf.reduce_mean(
tf.square(self.logits_fake - tf.zeros_like(self.logits_fake)))
self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss = tf.reduce_mean(
tf.square(self.logits_fake - tf.ones_like(self.logits_fake)))
# ~ self.d_optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
# ~ self.g_optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
elif self.mode == 'train_hallucination_p2':
fake_labels = self.labels + self.no_classes - \
self.labels # the last class is the fake one
self.d_loss_real = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits_real,
labels=tf.one_hot(self.labels, self.no_classes + 1)))
self.d_loss_fake = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits_fake,
labels=tf.one_hot(fake_labels, self.no_classes + 1)))
self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits_fake,
labels=tf.one_hot(self.labels, self.no_classes + 1)))
else:
print('Error building model')
self.d_optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.g_optimizer = tf.train.AdamOptimizer(self.learning_rate)
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'discriminator' in var.name]
g_vars = [var for var in t_vars if 'hall' in var.name]
# train ops
with tf.variable_scope('train_op', reuse=False):
self.d_train_op = slim.learning.create_train_op(
self.d_loss, self.d_optimizer, variables_to_train=d_vars)
self.g_train_op = slim.learning.create_train_op(
self.g_loss, self.g_optimizer, variables_to_train=g_vars)
# summaries
d_loss_summary = tf.summary.scalar('d_loss', self.d_loss)
g_loss_summary = tf.summary.scalar('g_loss', self.g_loss)
# hall_acc_summary = tf.summary.scalar('hall_acc', self.accuracy)
self.summary_op = tf.summary.merge(
[d_loss_summary, g_loss_summary])
elif self.mode == 'finetune_hallucination':
# depth & hall streams
# not used, just to recycle eval function
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.rgb_logits = self.single_stream(
self.rgb_images, modality='rgb', is_training=self.is_training)
self.hall_logits = self.single_stream(
self.rgb_images, modality='hall', is_training=self.is_training)
self.logits = (self.rgb_logits + self.hall_logits) / 2.
# overall acc_hall
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
# ~ #hall_acc
# ~ self.hall_pred = tf.argmax(tf.squeeze(self.hall_logits), 1)
# ~ self.hall_correct_pred = tf.equal(self.hall_pred, self.labels)
# ~ self.hall_accuracy = tf.reduce_mean(tf.cast(self.hall_correct_pred, tf.float32))
# ~ #rgb_acc
# ~ self.rgb_pred = tf.argmax(tf.squeeze(self.rgb_logits), 1)
# ~ self.rgb_correct_pred = tf.equal(self.rgb_pred, self.labels)
# ~ self.rgb_accuracy = tf.reduce_mean(tf.cast(self.rgb_correct_pred, tf.float32))
# training stuff
t_vars = tf.trainable_variables()
train_vars = t_vars
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=tf.one_hot(self.labels, self.no_classes)))
gradients = tf.gradients(self.loss, train_vars)
gradients = list(zip(gradients, train_vars))
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
# ~ self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.train_op = self.optimizer.apply_gradients(
grads_and_vars=gradients)
# summary stuff
loss_summary = tf.summary.scalar('classification_loss', self.loss)
accuracy_summary = tf.summary.scalar('accuracy', self.accuracy)
self.summary_op = tf.summary.merge(
[loss_summary, accuracy_summary])
elif self.mode == 'test_moddrop':
# rgb & blank depth streams
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
# bad trick to blank out depth....
self.blank_depth = self.depth_images - self.depth_images
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.rgb_logits = self.single_stream(
self.rgb_images, modality='rgb', is_training=self.is_training)
# swap between the two
self.depth_logits = self.single_stream(
self.depth_images, modality='depth', is_training=self.is_training)
# ~ self.depth_logits = self.single_stream(self.blank_depth, modality='depth', is_training=self.is_training)
# overall acc
# swap between the two
self.logits = (self.rgb_logits + self.depth_logits) / 2.
# ~ self.logits = self.rgb_logits
self.pred = tf.argmax(tf.squeeze(self.logits), 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_pred, tf.float32))
elif self.mode == 'test_hallucination':
# rgb & hall streams
# not used, just to recycle eval function
self.depth_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'depth_images')
self.rgb_images = tf.placeholder(
tf.float32, [None, 224, 224, 3], 'rgb_images')
self.labels = tf.placeholder(tf.int64, [None], 'labels')
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.rgb_logits | |
import numpy as np
from skimage.draw import line
from scipy.interpolate import splprep, splev
from skimage.morphology import skeletonize
from scipy import ndimage
from scipy.ndimage import map_coordinates
import random
def extractHRFrontsGen(filename, classes, lonOff, latOff):
fronts = [[] for _ in range(len(classes))]
currentClass = ''
for line in open(filename, 'r'):
content = line.split()
if(len(content)==0):
currentClass = ''
continue
if(content[0] == '48HR'):
break
# If we encounter a no front class keyword of the format, reset the currentClass and go to next line
if(content[0] in ['$$','TROF', 'LOWS', 'HIGHS']):
currentClass = ''
continue
# if we encounter a front class keyword of the format, reset the currentClass and process the line
if(content[0] in ["WARM", "COLD", "OCFNT", "STNRY"]):
currentClass = ''
for idx, className in enumerate(classes):
if(content[0] == className):
currentClass = className
latCoords = np.zeros(len(content)-1)
lonCoords = np.zeros(len(content)-1)
# HR has no classification in intensity
# csb Latitude is in degrees north
# csv Longitude is in degrees west
for idx2, coord in enumerate(content[1:]):
lat = int(coord[:3])/10 - latOff
lon = -int(coord[3:])/10 - lonOff
latCoords[idx2] = lat#round((latRes)//2-(1/latStep)*(lat))%(latRes)
lonCoords[idx2] = lon#round((1/lonStep)*(lon))%(lonRes)
fronts[idx].append(latCoords)
fronts[idx].append(lonCoords)
# Old class continues
elif(currentClass == className):
latCoords = np.zeros(len(content)+1)
lonCoords = np.zeros(len(content)+1)
# set start at end of previous line to leave no gaps
latCoords[0] = fronts[idx][-2][-1]
lonCoords[0] = fronts[idx][-1][-1]
# HR has no classification in intensity
# csb Latitude is in degrees north
# csv Longitude is in degrees west
for idx2, coord in enumerate(content):
lat = int(coord[:3])/10 - latOff
lon = -int(coord[3:])/10 - lonOff
latCoords[idx2+1] = lat#round((latRes)//2-(1/latStep)*(lat))%latRes
lonCoords[idx2+1] = lon#round((1/lonStep)*(lon))%lonRes
fronts[idx].append(latCoords)
fronts[idx].append(lonCoords)
return fronts
def degToRegularGrid(fronts, res):
latRes = (np.abs(180/res[0])+1).astype(np.int32)
lonRes = int(360/res[1])
for type in fronts:
for frontidx in range(0,len(type),2):
for pairIdx in range(len(type[frontidx])):
lat = type[frontidx][pairIdx]
lon = type[frontidx+1][pairIdx]
type[frontidx][pairIdx] = round((latRes)//2+(1/res[0])*(lat))%latRes
type[frontidx+1][pairIdx] = round((1/res[1])*(lon))%lonRes
return fronts
def extractFrontsSelfCreatedNoDuplicates(filename, classes, lonOff, latOff):
fronts = [[] for x in range(len(classes))]
for line in open(filename, 'r'):
content = line.split()
if(len(content)==0):
continue
for idx, className in enumerate(classes):
if(content[0] == className):
latCoords = []#np.zeros((len(content)-1)//2)
lonCoords = []#np.zeros((len(content)-1)//2)
# basis change such than lat ranges from 180 (bot) to 0 (top)
# and lon ranges from 0 left to 360 right
lastLat = -1
lastLon = -1
for idx2 in range(1,len(content),2):
lat = float(content[idx2][1:-1]) - latOff
lon = float(content[idx2+1][:-1]) - lonOff
newLat = lat#round(latRes//2-(1/latStep)*(lat))%latRes
newLon = lon#round((1/lonStep)*(lon))%lonRes
# Only extract a point if it is different from the previous (do not generate duplicates)
if(newLat != lastLat or newLon != lastLon):
lastLat = newLat
lastLon = newLon
latCoords.append(lastLat)
lonCoords.append(lastLon)
fronts[idx].append(np.array(latCoords))
fronts[idx].append(np.array(lonCoords))
return fronts
def extractPolyLines(fronts, lonRes, latRes, thickness = 1):
pls = np.zeros((latRes, lonRes, len(fronts)+1))
pls[:,:,0] = np.ones((latRes,lonRes))
# for each type of front detected
for idx, ft in enumerate(fronts):
# for each individual front of the given type
for instance in range(0,len(ft),2):
latCoords = ft[instance]
lonCoords = ft[instance+1]
for idx2 in range(len(lonCoords)-1):
possWays = np.array([np.linalg.norm(lonCoords[idx2]-lonCoords[idx2+1]), np.linalg.norm(lonCoords[idx2]-(lonCoords[idx2+1]-lonRes)), np.linalg.norm(lonCoords[idx2]-lonRes-lonCoords[idx2+1])])
pos = np.argmin(possWays)
if(pos == 1):
lonCoords[idx2+1] -= lonRes
elif(pos == 2):
lonCoords[idx2] -= lonRes
rr, cc = line(int(latCoords[idx2]), int(lonCoords[idx2]), int(latCoords[idx2+1]), int(lonCoords[idx2+1]) )
for lt in range(-(thickness//2),thickness//2+1):
pls[rr%latRes,(cc+lt)%lonRes, idx+1] = 1
pls[(rr+lt)%latRes,cc%lonRes, idx+1] = 1
pls[rr%latRes,(cc+lt)%lonRes, 0] = 0
pls[(rr+lt)%latRes,cc%lonRes, 0] = 0
return pls
def extractFlatPolyLines(fronts, lonRes, latRes, thickness = 1):
image = np.zeros((latRes, lonRes, 1))
# for each type of front detected
for idx, ft in enumerate(fronts):
# for each individual front of the given type
for instance in range(0,len(ft),2):
latCoords = ft[instance]
lonCoords = ft[instance+1]
# for each coordinate pair of an instance
for idx2 in range(len(lonCoords)-1):
possWays = np.array([np.linalg.norm(lonCoords[idx2]-lonCoords[idx2+1]), np.linalg.norm(lonCoords[idx2]-(lonCoords[idx2+1]-lonRes)), np.linalg.norm(lonCoords[idx2]-lonRes-lonCoords[idx2+1])])
pos = np.argmin(possWays)
if(pos == 1):
lonCoords[idx2+1] -= lonRes
elif(pos == 2):
lonCoords[idx2] -= lonRes
# extract line from [lat,lon] to [lat,lon]
rr, cc = line(int(latCoords[idx2]), int(lonCoords[idx2]), int(latCoords[idx2+1]), int(lonCoords[idx2+1]) )
# idx + 1 as the zero label is used to determine the background
sigma = 3
if(sigma > 0):
norm_fac = 1/(sigma*np.sqrt(2*np.pi))
sigma2 = sigma*sigma
for lt in range(-(thickness//2),1):
lt2 = lt*lt
value = norm_fac * np.exp(-0.5*lt2/sigma2)
print("image value is ", value)
image[rr,(cc+lt)%lonRes,0] = value
image[(rr+lt)%latRes,cc,0] = value
image[rr,(cc-lt)%lonRes,0] = value
image[(rr-lt)%latRes,cc,0] = value
else:
for lt in range(-(thickness//2),thickness//2+1):
image[rr,(cc+lt)%lonRes,0] = idx+1
image[(rr+lt)%latRes,cc,0] = idx+1
return image
def extractLines(fronts, lonRes, latRes):
myLines = []
# for each type of front detected
for idx, ft in enumerate(fronts):
myLines.append([])
# for each individual front of the given type
for instance in range(0,len(ft),2):
latCoords = ft[instance]
lonCoords = ft[instance+1]
# for each coordinate pair of an instance
for idx2 in range(len(lonCoords)-1):
possWays = np.array([np.linalg.norm(lonCoords[idx2]-lonCoords[idx2+1]), np.linalg.norm(lonCoords[idx2]-(lonCoords[idx2+1]-lonRes)), np.linalg.norm(lonCoords[idx2]-lonRes-lonCoords[idx2+1])])
pos = np.argmin(possWays)
if(pos == 1):
lonCoords[idx2+1] -= lonRes
elif(pos == 2):
lonCoords[idx2] -= lonRes
# extract line from [lat,lon] to [lat,lon]
rr, cc = line(int(latCoords[idx2]), int(lonCoords[idx2]), int(latCoords[idx2+1]), int(lonCoords[idx2+1]) )
myLines[idx].append((rr,cc))
return myLines
def drawOffsettedLines(image, line, value, thickness, offset , lonRes, latRes):
rr, cc = line
for lt in range(-(thickness//2),thickness//2+1):
image[(rr+offset[0])%latRes,(cc+lt+offset[1])%lonRes,0] = value
image[(rr+lt+offset[0])%latRes,((cc+offset[1])%lonRes),0] = value
def cropToRange(image, latRange, lonRange, res):
latRange = (90-np.arange(latRange[0], latRange[1], res[0]))/np.abs(res[0])
lonRange = np.arange(lonRange[0], lonRange[1], res[1])/np.abs(res[1])
image = image[latRange.astype(np.int32),:,:]
image = image[:,lonRange.astype(np.int32),:]
return image
class extractFlatPolyLinesInRange():
def __init__ (self, labelGrouping = None, thickness = 1, maxOff = (0,0)):
self.labelGrouping = labelGrouping
self.fieldToNum = {"w":1,"c":2,"o":3,"s":4}
if(self.labelGrouping is None):
self.labelGrouping = "wcos"
groupStrings = self.labelGrouping.split(',')
self.groups = [[self.fieldToNum[member] for member in group] for group in groupStrings]
#print("fpl",self.labelGrouping, self.groups)
self.thickness = thickness
self.maxOff = maxOff
def __call__(self,fronts, latRange, lonRange, res):
latRes = (np.abs(180/res[0])+1).astype(np.int32)
lonRes = int(360/res[1])
# Groupings of different frontal types
ftypes = len(self.groups)
image = np.zeros((latRes, lonRes, 1))
alllines = extractLines(fronts, lonRes, latRes)
# draw the lines
for idx, lines in enumerate(alllines,1):
for grpidx, group in enumerate(self.groups,1):
if idx in group:
tgtGrp = grpidx
for line in lines:
drawOffsettedLines(image, line, tgtGrp, self.thickness, self.maxOff, lonRes, latRes)
# crop the image
image = cropToRange(image, latRange, lonRange, res)
return image
class extractCoordsInRange():
def __init__(self, labelGrouping = None):
self.labelGrouping = labelGrouping
self.fieldToNum = {"w":1,"c":2,"o":3,"s":4}
if(self.labelGrouping is None):
self.labelGrouping = "wcos"
groupStrings = self.labelGrouping.split(',')
self.groups = [[self.fieldToNum[member] for member in group] for group in groupStrings]
self.thickness = 1
self.maxOff = (0,0)
def __call__(self, fronts, latRange, lonRange, res):
latRes = (np.abs(180/res[0])+1).astype(np.int32)
lonRes = int(360/res[1])
# Groupings of different frontal types
ftypes = len(self.groups)
allGroupedFronts = [[] for _ in range(ftypes)]
for grpidx, group in enumerate(self.groups):
for member in group:
allGroupedFronts[grpidx] += fronts[member-1]
# alls fronts are now grouped
# Now: Merge connected Fronts of the same type
groupedFronts = [[] for _ in range(ftypes)]
closeDistance = 3
for grpIdx in range(len(self.groups)):
validList = [True for _ in range(len(allGroupedFronts[grpIdx]))]
for i in range(0,len(allGroupedFronts[grpIdx]),2):
istart = np.array(allGroupedFronts[grpIdx][i:i+2])[:,0]
iend = np.array(allGroupedFronts[grpIdx][i:i+2])[:,-1]
# empty ranges should be removed
if(np.all(istart == iend)):
validList[i] = False
validList[i+1] = False
continue
for j in range(i+2,len(allGroupedFronts[grpIdx]),2):
jstart = np.array(allGroupedFronts[grpIdx][j:j+2])[:,0]
jend = np.array(allGroupedFronts[grpIdx][j:j+2])[:,-1]
if(np.all(jstart == jend)):
continue
# connection type 1
if(np.linalg.norm(istart-jstart)<closeDistance):
allGroupedFronts[grpIdx][j] = np.concatenate((np.flip(allGroupedFronts[grpIdx][i], axis=0), allGroupedFronts[grpIdx][j]), axis = 0)
allGroupedFronts[grpIdx][j+1] = np.concatenate((np.flip(allGroupedFronts[grpIdx][i+1], axis=0), allGroupedFronts[grpIdx][j+1]), axis = 0)
validList[i] = False
validList[i+1] = False
break
elif(np.linalg.norm(istart - jend)< closeDistance):
allGroupedFronts[grpIdx][j] = np.concatenate((allGroupedFronts[grpIdx][j], allGroupedFronts[grpIdx][i]), axis = 0)
allGroupedFronts[grpIdx][j+1] = np.concatenate((allGroupedFronts[grpIdx][j+1], allGroupedFronts[grpIdx][i+1]), axis = 0)
validList[i] = False
validList[i+1] = False
break
elif(np.linalg.norm(iend - jstart)< closeDistance):
allGroupedFronts[grpIdx][j] = np.concatenate((allGroupedFronts[grpIdx][i], allGroupedFronts[grpIdx][j]), axis = 0)
allGroupedFronts[grpIdx][j+1] = np.concatenate((allGroupedFronts[grpIdx][i+1], allGroupedFronts[grpIdx][j+1]), axis = 0)
validList[i] = False
validList[i+1] = False
break
elif(np.linalg.norm(iend - jend)< closeDistance):
allGroupedFronts[grpIdx][j] = np.concatenate((allGroupedFronts[grpIdx][j], np.flip(allGroupedFronts[grpIdx][i], axis = 0)), axis = 0)
allGroupedFronts[grpIdx][j+1] = np.concatenate((allGroupedFronts[grpIdx][j+1], np.flip(allGroupedFronts[grpIdx][i+1], axis = 0)), axis = 0)
validList[i] = False
validList[i+1] = False
break
for i in range(len(validList)):
if(validList[i]):
groupedFronts[grpIdx].append(allGroupedFronts[grpIdx][i])
# groupedFronts now hold a concatenation of same Type fronts where two ends were in the same spot
# Now remove all lines outside the target range
# We define a line outside if both vertices are outside the inspected window
# If only one vertex is outside, we move the vertex to the next border pixel along the line
allGroups = []
# transform from degree range into pixel range (relative to the whole grid)
latRange = (((90-latRange[0])/np.abs(res[0]))%latRes,((90-latRange[1])/np.abs(res[0]))%latRes)
lonOff = 0
if(lonRange[0]<0 and lonRange[1]>0):
lonOff = -180
lonRange = (((lonRange[0]-lonOff)/res[1])%lonRes, ((lonRange[1]-lonOff)/res[1])%lonRes)
for grpidx, frontgroup in enumerate(groupedFronts):
| |
<gh_stars>0
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Ellipse, Circle
from matplotlib.legend_handler import HandlerPatch
import matplotlib.colors as colors
from matplotlib.ticker import FormatStrFormatter
import os
import itertools
import pf_dynamic_cart as pfc
import pf_dynamic_sph as pfs
import pf_static_sph as pss
import Grid
import warnings
from scipy import interpolate
from scipy.optimize import curve_fit, OptimizeWarning, fsolve
from scipy.integrate import simps
import scipy.stats as ss
from timeit import default_timer as timer
from copy import copy
from matplotlib.ticker import NullFormatter
import colors as col
if __name__ == "__main__":
# # Initialization
# matplotlib.rcParams.update({'font.size': 12, 'text.usetex': True})
mpegWriter = animation.writers['ffmpeg'](fps=2, bitrate=1800)
# plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
# Writer = animation.writers['ffmpeg']
# mpegWriter = Writer(fps=20, metadata=dict(artist='Me'), bitrate=1800)
matplotlib.rcParams.update({'font.size': 16, 'font.family': 'Times New Roman', 'text.usetex': True, 'mathtext.fontset': 'dejavuserif'})
higherCutoff = False
cutoffRat = 1.0
betterResolution = False
resRat = 1.0
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (60, 60, 60)
(dx, dy, dz) = (0.25, 0.25, 0.25)
higherCutoff = False
cutoffRat = 1.5
betterResolution = True
resRat = 0.5
# (Lx, Ly, Lz) = (40, 40, 40)
# (dx, dy, dz) = (0.25, 0.25, 0.25)
# (Lx, Ly, Lz) = (21, 21, 21)
# (dx, dy, dz) = (0.375, 0.375, 0.375)
NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
# NGridPoints_cart = 1.37e5
k_max = ((2 * np.pi / dx)**3 / (4 * np.pi / 3))**(1 / 3)
linDimMajor = 0.99 * (k_max * np.sqrt(2) / 2)
linDimMinor = linDimMajor
massRat = 1.0
IRrat = 1
# git test
# Toggle parameters
toggleDict = {'Dynamics': 'real', 'Interaction': 'on', 'Grid': 'spherical', 'Coupling': 'twophonon', 'noCSAmp': True}
# ---- SET OUTPUT DATA FOLDER ----
datapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}'.format(NGridPoints_cart)
animpath = '/Users/kis/Dropbox/VariationalResearch/DataAnalysis/figs'
if higherCutoff is True:
datapath = datapath + '_cutoffRat_{:.2f}'.format(cutoffRat)
if betterResolution is True:
datapath = datapath + '_resRat_{:.2f}'.format(resRat)
datapath = datapath + '/massRatio={:.1f}'.format(massRat)
distdatapath = copy(datapath)
if toggleDict['noCSAmp'] is True:
datapath = datapath + '_noCSAmp'
innerdatapath = datapath + '/redyn_spherical'
distdatapath = distdatapath + '/redyn_spherical'
if toggleDict['Coupling'] == 'frohlich':
innerdatapath = innerdatapath + '_froh_new'
distdatapath = distdatapath + '_froh'
animpath = animpath + '/rdyn_frohlich'
else:
animpath = animpath + '/rdyn_twophonon'
# figdatapath = '/Users/kis/Dropbox/Apps/Overleaf/Quantum Cherenkov Transition in Bose Polaron Systems/figures/figdump'
figdatapath = '/Users/kis/Dropbox/Apps/Overleaf/Cherenkov Polaron Paper pt1/figures/figdump'
# # Analysis of Total Dataset
aIBi = -10
qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
qds_aIBi = qds
PVals = qds['P'].values
tVals = qds['t'].values
n0 = qds.attrs['n0']
gBB = qds.attrs['gBB']
mI = qds.attrs['mI']
mB = qds.attrs['mB']
nu = np.sqrt(n0 * gBB / mB)
mc = mI * nu
aBB = (mB / (4 * np.pi)) * gBB
xi = (8 * np.pi * n0 * aBB)**(-1 / 2)
tscale = xi / nu
Pnorm = PVals / mc
kArray = qds.coords['k'].values
k0 = kArray[0]
kf = kArray[-1]
print(aIBi * xi)
print(mI / mB, IRrat)
IR_lengthscale = 1 / (k0 / (2 * np.pi)) / xi
UV_lengthscale = 1 / (kf / (2 * np.pi)) / xi
print(k0, 1 / IR_lengthscale, IR_lengthscale)
print(kf, 1 / UV_lengthscale, UV_lengthscale)
# aIBi_Vals = np.array([-10.0, -5.0, -2.0, -1.0, -0.75, -0.5])
aIBi_Vals = np.array([-10.0, -5.0, -2.0])
kgrid = Grid.Grid("SPHERICAL_2D")
kgrid.initArray_premade('k', qds.coords['k'].values)
kgrid.initArray_premade('th', qds.coords['th'].values)
kVals = kgrid.getArray('k')
wk_Vals = pfs.omegak(kVals, mB, n0, gBB)
bdiff = 100 * np.abs(wk_Vals - nu * kVals) / (nu * kVals)
kind = np.abs(bdiff - 1).argmin().astype(int)
klin = kVals[kind]
tlin = 2 * np.pi / (nu * kVals[kind])
tlin_norm = tlin / tscale
print(klin, tlin_norm)
print(90 / tscale, 100 / tscale)
print(kVals[-1], kVals[1] - kVals[0])
print(qds.attrs['k_mag_cutoff'] * xi)
print('Np: {0}'.format(qds.coords['k'].values.size * qds.coords['th'].values.size))
# # # # # # #############################################################################################################################
# # # # # # FIG 3 - S(t) CURVES - PRL
# # # # # #############################################################################################################################
# red = col.red.ashexstring()
# green = col.green.ashexstring()
# blue = col.blue.ashexstring()
# colorList = [red, green, blue]
# matplotlib.rcParams.update({'font.size': 12})
# tailFit = True
# logScale = True
# PimpData_roll = False; PimpData_rollwin = 2
# longTime = True
# # tau = 100; tfCutoff = 90; tfstart = 10
# tau = 300; tfCutoff = 200; tfstart = 10
# aIBi_weak = -10.0
# print(aIBi_weak * xi)
# if longTime:
# innerdatapath_longtime = datapath + '_longtime/redyn_spherical'
# qds_w = xr.open_dataset(innerdatapath_longtime + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi_weak))
# else:
# qds_w = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi_weak))
# tVals = qds_w['t'].values
# tsVals = tVals[tVals < tau]
# qds_aIBi_ts_w = qds_w.sel(t=tsVals)
# Pnorm_des = np.array([0.5, 2.2])
# Pinds = np.zeros(Pnorm_des.size, dtype=int)
# for Pn_ind, Pn in enumerate(Pnorm_des):
# Pinds[Pn_ind] = np.abs(Pnorm - Pn).argmin().astype(int)
# fig, ax = plt.subplots()
# for ip, indP in enumerate(Pinds):
# P = PVals[indP]
# DynOv_w = np.abs(qds_aIBi_ts_w.isel(P=indP)['Real_DynOv'].values + 1j * qds_aIBi_ts_w.isel(P=indP)['Imag_DynOv'].values).real.astype(float)
# Pph_ds_w = xr.DataArray(qds_aIBi_ts_w.isel(P=indP)['Pph'].values, coords=[tsVals], dims=['t'])
# if PimpData_roll:
# Pph_ds_w = Pph_ds_w.rolling(t=PimpData_rollwin, center=True).mean().dropna('t')
# vImp_Vals_w = (P - Pph_ds_w.values) / mI
# tvImp_Vals_w = Pph_ds_w['t'].values
# if tailFit is True:
# tfmask = tsVals > tfCutoff
# tfVals = tsVals[tfmask]
# tfLin = tsVals[tsVals > tfstart]
# zD = np.polyfit(np.log(tfVals), np.log(DynOv_w[tfmask]), deg=1)
# if longTime:
# tfLin_plot = tVals[tVals > tfstart]
# else:
# tfLin_plot = tfLin
# fLinD_plot = np.exp(zD[1]) * tfLin_plot**(zD[0])
# ax.plot(tfLin_plot / tscale, fLinD_plot, 'k--', label='')
# if longTime:
# DynOv_w_plot = np.abs(qds_w.isel(P=indP)['Real_DynOv'].values + 1j * qds_w.isel(P=indP)['Imag_DynOv'].values).real.astype(float)
# ax.plot(tVals / tscale, DynOv_w_plot, label='{:.2f}'.format(P / mc), lw=3, color=colorList[ip])
# else:
# ax.plot(tsVals / tscale, DynOv_w, label='{:.2f}'.format(P / mc))
# ax.set_ylabel(r'$|S(t)|$', fontsize=18)
# ax.set_xlabel(r'$t/(\xi c^{-1})$', fontsize=18)
# if logScale is True:
# ax.set_xscale('log')
# ax.set_yscale('log')
# ax.tick_params(which='both', direction='in', right=True, top=True)
# ax.tick_params(which='major', length=6, width=1)
# ax.tick_params(which='minor', length=3, width=1)
# ax.tick_params(axis='x', which='major', pad=10)
# ax.tick_params(axis='both', which='major', labelsize=17)
# ax.tick_params(axis='both', which='minor', labelsize=17)
# # ax.legend(title=r'$v_{\rm imp}(t_{0}) / c$')
# handles, labels = ax.get_legend_handles_labels()
# # fig.legend(handles, labels, title=r'$\langle v_{\rm imp}(t_{0})\rangle / c$', ncol=1, loc='center right', bbox_to_anchor=(0.11, 0.38)))
# fig.subplots_adjust(left=0.2, bottom=0.175, top=0.98, right=0.98)
# fig.legend(handles, labels, title=r'$v_{\rm imp}(t_{0}) / c$', loc=3, bbox_to_anchor=(0.25, 0.25), fontsize=18, title_fontsize=18)
# fig.set_size_inches(6, 3.9)
# filename = '/Fig3_PRL.pdf'
# fig.savefig(figdatapath + filename)
# # # # # # # #############################################################################################################################
# # # # # # # FIG SM3 - LETTER
# # # # # # #############################################################################################################################
# axl = matplotlib.rcParams['axes.linewidth']
# matplotlib.rcParams['axes.linewidth'] = 0.5 * axl
# matplotlib.rcParams.update({'font.size': 12})
# labelsize = 13
# legendsize = 12
# red = col.red.ashexstring()
# green = col.green.ashexstring()
# blue = col.blue.ashexstring()
# colorList = [green, red, blue]
# matplotlib.rcParams.update({'font.size': 12})
# # fig, ax = plt.subplots()
# fig = plt.figure(constrained_layout=False)
# gs = fig.add_gridspec(nrows=1, ncols=1, bottom=0.1, top=0.93, left=0.1, right=0.95)
# ax = fig.add_subplot(gs[0])
# qds = xr.open_dataset('/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_1.11E+08_resRat_0.50/massRatio=1.0_noCSAmp/redyn_spherical' + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# tVals = qds['t'].values
# DynOvExp_NegMask = False
# DynOvExp_Cut = False
# cut = 1e-4
# consecDetection = True
# consecSamples = 10
# def powerfunc(t, a, b):
# return b * t**(-1 * a)
# tmin = 90
# tmax = 100
# tfVals = tVals[(tVals <= tmax) * (tVals >= tmin)]
# rollwin = 1
# aIBi_des = np.array([-10.0, -5.0, -3.5, -2.5, -2.0, -1.75])
# massRat_des = np.array([1.0])
# datapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_1.11E+08_resRat_0.50/massRatio=1.0_noCSAmp'
# massRat_des = np.array([0.5, 1.0, 2.0])
# mdatapaths = []
# for mR in massRat_des:
# if toggleDict['noCSAmp'] is True:
# mdatapaths.append(datapath[0:-11] + '{:.1f}_noCSAmp'.format(mR))
# else:
# mdatapaths.append(datapath[0:-3] + '{:.1f}_noCSAmp'.format(mR))
# if toggleDict['Dynamics'] != 'real' or toggleDict['Grid'] != 'spherical' or toggleDict['Coupling'] != 'twophonon':
# print('SETTING ERROR')
# Pcrit_da = xr.DataArray(np.full((massRat_des.size, aIBi_des.size), np.nan, dtype=float), coords=[massRat_des, aIBi_des], dims=['mRatio', 'aIBi'])
# for inda, aIBi in enumerate(aIBi_des):
# for indm, mRat in enumerate(massRat_des):
# mds = xr.open_dataset(mdatapaths[indm] + '/redyn_spherical/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# Plen = mds.coords['P'].values.size
# Pstart_ind = 0
# PVals = mds.coords['P'].values[Pstart_ind:Plen]
# n0 = mds.attrs['n0']
# gBB = mds.attrs['gBB']
# mI = mds.attrs['mI']
# mB = mds.attrs['mB']
# nu = np.sqrt(n0 * gBB / mB)
# vI0_Vals = (PVals - mds.isel(t=0, P=np.arange(Pstart_ind, Plen))['Pph'].values) / mI
# mds_ts = mds.sel(t=tfVals)
# DynOv_Exponents = np.zeros(PVals.size)
# DynOv_Constants = np.zeros(PVals.size)
# for indP, P in enumerate(PVals):
# DynOv_raw = np.abs(mds_ts.isel(P=indP)['Real_DynOv'].values + 1j * mds_ts.isel(P=indP)['Imag_DynOv'].values).real.astype(float)
# | |
of priors.
Properly initialized, an object of type "FuFPrior"
is callable. On call, it expects a dictionary with
the names and values of the parameters as first
argument and the name of the parameter under consideration
as second argument. The return value should be the
(natural) logarithm of the associated prior probability
distribution.
Parameters
----------
lnp : string, {uniform, jeffreyPS, gaussian, limuniform}
uniform : improper uniform prior. limuniform: proper
uniform prior. 'lower' and 'upper' define the lower and
upper bounds of the interval.
jeffreyPS: Jeffreys
prior for a Poisson scaling parameter. gaussian: A
Gaussian prior. The keyswords 'mu' and 'sig' must be
specified to define the mean and standard deviation of
the Gaussian.
"""
def _uniform(self, **kwargs):
def uniform(ps, n, **rest):
return 0.0
return uniform
def _uniformLimit(self, **kwargs):
if kwargs["upper"] < kwargs["lower"]:
raise(PE.PyAValError("upper needs to be larger than lower",
where="FuFPrior (limited uniform distribution)",
solution="Adapt upper and lower."))
p = np.log(1.0 / (kwargs["upper"] - kwargs["lower"]))
def unilimit(ps, n, **rest):
if (ps[n] >= kwargs["lower"]) and (ps[n] <= kwargs["upper"]):
return p
else:
return -np.Inf
return unilimit
def _jeffreyPoissonScale(self, **kwargs):
def jps(ps, n, **rest):
return -0.5 * np.log(ps[n])
return jps
def _gaussian(self, **kwargs):
r = -0.5 * np.log(2.0 * np.pi * kwargs["sig"]**2)
def gaussianPrior(ps, n, **rest):
return r - (ps[n] - kwargs["mu"])**2 / (2.0 * kwargs["sig"]**2)
return gaussianPrior
def _callDelegator(self, *args, **kwargs):
""" Overwritten by the method to represent __call__ """
raise(PE.PyANotImplemented("_callDelegator is not implemented."))
def __call__(self, *args, **kwargs):
return self._callDelegator(*args, **kwargs)
def __init__(self, lnp, **kwargs):
if isinstance(lnp, six.string_types):
if lnp == "uniform":
self._callDelegator = self._uniform(**kwargs)
elif lnp == "limuniform":
self._callDelegator = self._uniformLimit(**kwargs)
elif lnp == "jeffreyPS":
self._callDelegator = self._jeffreyPoissonScale(**kwargs)
elif lnp == "gaussian":
self._callDelegator = self._gaussian(**kwargs)
else:
raise(PE.PyAValError("No prior defined for " + str(lnp),
where="FuFPrior",
solution="Use either of {uniform, limuniform, jeffreyPS, gaussian}"))
class OneDFit(_OndeDFitParBase, _PyMCSampler):
"""
The base class for fitting objects.
Parameters
----------
parList : list of strings
Contains the names of the properties
defining the model. By default, variables of the same name
are used to represent them.
Attributes
----------
model : array
Used by the `updateModel` method to store the
evaluated model for current parameter settings.
Holds the best-fit model after a call to a fit method.
penaltyFactor : float
The penalty factor used to apply penalties for
enforcing restrictions (default = 10**20).
Notes
-----
The purpose of the class
The purpose of this class is to provide a convenient interface
to various fitting algorithms.
It provides the functionality, which allows for parameter
fitting, but does not implement a particular model.
The class can be used to fit any kind of model, which has
to be implemented in a class, which inherits from
the *OneDFit* class.
Management of fitting parameters
The fitting parameters are managed by a *Params* class
instance, which provides a wealth of possibilities to
influence the behavior of the parameters during the
fitting process. This includes deciding whether a particular
parameter is supposed to be a free fitting parameter,
applying restrictions to limit the valid range for a
parameters, or the introduction of functional dependencies among
different parameters.
Properties versus variable names
Each model is described by a number of *properties*, such
as, for example, mass and radius. These may be represented
by arbitrarily named variables. Normally, it is convenient to
name the variables according to the properties they describe,
which is the default behavior. However, in some cases, for example
if a model consists of two equal subcomponents, such a naming
scheme leads to nonunique variable names, which has to be avoided.
Now it is necessary to distinguish between the *property* and
the describing variable. This class uses the `propMap`
dictionary, which maps property name to
variable name to manage these situations.
Combining fitting objects
Often, it can be convenient to combine a number of simple models
to form a new, more complex one. The *OneDFit* class allows to
combine objects using the arithmetic operators +-\*/,
and the power (\*\*) operator.
Naming scheme for models
For simple models it is convenient to use a one-to-one mapping
between property and variable name. It may, however, become
necessary to deviate from this scheme, for example, to keep
variable names unique. This class supports the following naming scheme:
Each model has a "root name", which is supposed to be a concise
string describing the model (for instance, "Gaussian"). The root
name is attached to the property name using an underscore.
If a complex model consists of more than one component
with the same root name, a component counter, enclosed in
parenthesis, is attached to the variable name.
A variable name could, for example, look like: "mu_Gaussian(1)".
Methods to be implemented in a model class
A valid model class inheriting this interface class must provide
the following methods
- **__init__()** - The constructor.
Defines the set of properties describing the model.
- **evaluate(x)** - An *evaluate* method.
This method takes a single
argument, x, which is an array of points at which
the model is to be evaluated. To access the
current model parameters, this method should use
the set/getitem methods. The return value is
an array holding the model evaluated at the
points given by `x`.
"""
def __init__(self, parList, **kwargs):
_OndeDFitParBase.__init__(self, parList, **kwargs)
# Left and right compo(nent) are necessary for combining models
self.leftCompo = None
self.rightCompo = None
self.penaltyFactor = 1e20
self.model = None
self._fufDS = None
self.fitResult = None
# Determines whether steppar can be used
self._stepparEnabled = False
def _compoWalk(self):
"""
TBD
"""
def walk(c, refs):
refs.append(c)
if c.leftCompo is not None:
walk(c.leftCompo, refs)
if c.rightCompo is not None:
walk(c.rightCompo, refs)
refs = []
walk(self, refs)
for c in refs:
yield c
def renameVariable(self, oldName, newName):
"""
Change name of variable.
Parameters
----------
oldName : string
Current variable name.
newName : string
New variable name.
Notes
-----
Variable names and properties are not the same.
"""
# First, walk down the left and right components (for combined models)
# and change the variable names.
if self.leftCompo is not None:
try:
self.leftCompo.renameVariable(oldName, newName)
except PE.PyAValError:
pass
if self.rightCompo is not None:
try:
self.rightCompo.renameVariable(oldName, newName)
except PE.PyAValError:
pass
# Now do the same for the "top" component
if newName == oldName:
# Ignore identical transformations
return
if newName in list(self.propMap.values()):
raise(PE.PyANameClash("A variable named " + newName +
" does already exist.", where="OneDFit::renameVariable"))
if newName in self.propMap:
if self.propMap[newName] != oldName:
raise(PE.PyANameClash("You may not assign a name to a variable, which corresponds to the name of another property.",
where="OneDFit::renameVariable"))
if not oldName in list(self.propMap.values()):
raise(PE.PyAValError("A variable named " + oldName +
" does not exist.", where="OneDFit::renameVariable"))
for k in six.iterkeys(self.propMap):
if self.propMap[k] == oldName:
self.propMap[k] = newName
break
# Tell the parameter class about the renaming (if this did not already happen)
if not newName in six.iterkeys(self.pars.parameters()):
self.pars.renameParameter(oldName, newName)
def _isComposed(self):
"""
Determines whether current model is "composed".
A model is composed, if there are left and right components.
Returns True if model is composed and False otherwise.
"""
return ((self.leftCompo is not None) and (self.rightCompo is not None))
def __combineRemapping(self, left, right):
"""
This member is essentially a renaming machine. When combining models
it can easily happen that two variables share the same name. If the
models are combined, unique variable names are needed. This method
uses the "root name" and "component counter" to assign new, unique
names to the variables.
Parameters:
- `left`, `right` - Two fitting objects (derived from OneDFit).
"""
def extendCoDat(coDat, c):
ident = c.naming.getRoot()
if not ident in coDat:
coDat[ident] = [c]
else:
coDat[ident].append(c)
return coDat
# Build up a dictionary assigning root to a list of corresponding components
coDat = {}
for c in left._compoWalk():
extendCoDat(coDat, c)
for c in right._compoWalk():
extendCoDat(coDat, c)
for k in six.iterkeys(coDat):
# Loop over all available root names
if len(coDat[k]) == 1:
# Only a | |
<reponame>Zadigo/zineb
import copy
import os
import re
from collections import OrderedDict
from functools import cached_property
from itertools import chain
from typing import Dict, Generator, List, NoReturn, Tuple, Union
from bs4 import BeautifulSoup
from bs4.element import ResultSet, Tag
from w3lib.html import safe_url_string
from w3lib.url import is_url, urljoin
from zineb.extractors._mixins import MultipleRowsMixin
from zineb.settings import settings as global_settings
from zineb.utils.characters import deep_clean
from zineb.utils.decoders import decode_email
from zineb.utils.iteration import drop_while, keep_while
from zineb.utils.paths import is_path
from zineb.utils.urls import replace_urls_suffix
class Extractor:
"""
Base class for every extractor class
"""
def __enter__(self):
raise NotImplementedError('__enter__ should be implemented by the subclasses')
def __exit__(self, exc_type, exc_val, exc_tb):
return False
@cached_property
def cached_items(self):
"""
Return the original list of extracted
elements on the page
"""
raise NotImplementedError(('Subclasses should provide a way'
' to return the orginal data'))
def _check_response(self, response):
from zineb.http.responses import HTMLResponse
if isinstance(response, HTMLResponse):
return response.html_page
return response
def resolve(self, soup: BeautifulSoup) -> NoReturn:
raise NotImplementedError(('Provide functionnalities for quickly '
'extracting items from the HTML page'))
class TableExtractor(Extractor):
"""
Quickly extract a table from an HTML page.
By default this class retrieves the first table of the page if no
additional information is provided on which table to extract.
Parameters
----------
- class_or_id_name (str, Optionnal): the class name of the table. Defaults to None
- has_headers (bool, Optionnal): indicates if the table has headers. Defaults to False
- processors (func, Optionnal): list of functions to process the final result. Defaults to None
Example
-------
extractor = TableExtractor()
extractor.resolve(BeautifulSoup Object)
[[a, b, c], [d, ...]]
By indicating if the table has a header, the header values
which generally corresponds to the first row will be dropped
from the final result.
Finally, you can also pass a set of processors that will modifiy the values
of each rows according to the logic you would have implemented.
def drop_empty_values(value):
if value != '':
return value
extractor = TableExtractor(processors=[drop_empty_values])
extractor.resolve(BeautifulSoup Object)
"""
def __init__(self, class_or_id_name=None, header_position: int=None,
base_url: str=None, processors: List=[]):
self._table = None
self._raw_rows = []
self.values = []
# self.headers = None
self.class_or_id_name = class_or_id_name
self.attrs = None
self.header_position = header_position
self.base_url = base_url
self.processors = processors
def __enter__(self):
return self.get_values
def __iter__(self):
return iter(self.values)
def __repr__(self):
return f"{self.__class__.__name__}({self.values})"
def __call__(self, soup: BeautifulSoup, **kwargs):
"""
Resolve another table by calling the instance
Args:
soup (BeautifulSoup): [description]
Returns:
[type]: [description]
"""
self.__init__(**kwargs)
self.resolve(soup)
return self
def __getitem__(self, index):
return self.values[index]
def __len__(self):
return len(self.values)
# def __add__(self, table_instance):
# if not isinstance(table_instance, TableExtractor):
# raise TypeError("The table to add should be an instance of TableExtractor")
# return pandas.concat(
# [table_instance.get_values, self.get_values],
# axis=1
# )
@property
def first(self) -> Union[Tag, None]:
return self._raw_rows[0]
@property
def get_values(self):
import pandas
values = self.values.copy()
if self.header_position is not None:
values.pop(self.header_position)
instance = chain(*values)
return pandas.Series(data=list(instance))
@classmethod
def as_instance(cls, soup, **kwargs):
instance = cls(**kwargs)
instance.resolve(soup)
return instance
@staticmethod
def _get_rows(element: Tag):
return element.find_all('tr')
def _extract_values(self, elements: ResultSet, include_links: bool=False):
# if self._raw_rows is not None:
rows = []
for row in elements:
new_row = []
for column in row:
if column != '\n':
try:
new_row.append(deep_clean(column.text))
except:
# TODO: For whatever reasons on the
# table header values, colum is directly
# the value of the row instead of <tr>
# which generates an error.
# column = 'A' instead of <tr>A</tr>
# if isinstance(column, str):
# new_row.append(column or None)
new_row.append(None)
# Find the first link in the column
# so that it can be included in the
# row -- This is useful in certain
# cases where the first row of a table
# sometimes has a link to go to a next
# page and it can be interesting to catch
# these kinds of links e.g. go to profile...
if include_links:
link = column.find('a')
if link or link is not None:
href = link.attrs.get('href')
# This is a problematic section especially when used
# in a Pipeline. When the link is not a link e.g. -1,
# this creates an error that is very difficult to resolve
# because the Pipe does not give the full stracktrace.
# Also, only append a link if something is detected.
if is_url(str(href)) or is_path(str(href)):
if self.base_url:
href = urljoin(self.base_url, href)
link = safe_url_string(href)
new_row.extend([href])
# else:
# # Sometimes, especially on badly coded websites,
# # the url/path in the link comes out in a very
# # odd manner e.g Players.asp?Tourn=WU202013&Team=CHN&No=133592
# # which does not allow us to collect
# # the url. If the user knows about this and has
# # provided a root url, we can use that in an
# # attempt to reconcile the path with url
# if self.base_url is not None:
# url = urljoin(self.base_url, href)
# if is_url(url):
# link = safe_url_string(url)
# new_row.extend([url])
rows.append(new_row)
# if self.header_position is not None:
# self.headers = rows.pop(self.header_position)
return rows
# else:
# return self._raw_rows
# def _run_processors(self, rows):
# if self.processors:
# processed_rows = []
# for row in rows:
# for processor in self.processors:
# if not callable(processor):
# raise TypeError(f"Processor should be a callable. Got {processor}")
# row = [processor(value, index=index) for index, value in enumerate(row)]
# processed_rows.append(row)
# return processed_rows
# return rows
def _run_processors(self, rows):
new_row = []
processed_rows = []
if self.processors:
for row in rows:
for processor in self.processors:
if not callable(processor):
raise TypeError(f"Processor should be a callable. Got {processor}.")
if not new_row:
new_row = processor(row)
else:
new_row = processor(new_row)
processed_rows.append(new_row)
new_row = []
return processed_rows
else:
return rows
def get_row(self, index) -> Tag:
try:
return self._raw_rows[index]
except IndexError:
return None
def resolve(self, soup: BeautifulSoup, include_links=False,
limit_to_columns: list=[]):
# Sometimes by accident the "soup" object
# could be None, for example when an object
# was not found on the page.
if soup is None:
raise ValueError(("The BeautifulSoup object is None certainly "
"because the table you were looking for does not exist on the HTML page. "
"Inspect the page and ensure the object exists."))
if self.attrs is None:
# There might be a case where the user
# does not pass the whole HTML page but just
# the section that was parsed beforehand (e.g. the table HTML object)
# directly and doing a find on that soup object
# return None. In that case, we should just test
# if the name equals "table" and continue from there
if soup.name == 'table':
self._table = soup
else:
self._table = soup.find('table')
if self._table is None:
# In case the user passes the table itself
# as oppposed to the whole HTML page, check
# the elements tag and assign it
if soup.name == 'table':
self._table = soup
else:
return self._raw_rows
self.attrs = self._table.attrs
if self.class_or_id_name is not None and self.attrs:
# table_class = self.attrs.get('class', [])
table_class = self._table.get_attribute_list('class', [])
table_class.extend(self._table.get_attribute_list('id', []))
if self.class_or_id_name not in table_class:
self._table = self._table.find_next('table')
if self._table is None:
return self._raw_rows
self.resolve(self._table)
# If no table, just return
# an empty array instead of
# raising an error or showing
# an error
if self._table is None:
return self.values
if not self._table.is_empty_element:
# Option: 1
tbody = self._table.find('tbody')
if tbody is None:
self._raw_rows = self._get_rows(self._table)
else:
if tbody.is_empty_element:
self._raw_rows = self._get_rows(self._table)
else:
self._raw_rows = self._get_rows(tbody)
extracted_values = self._extract_values(
self._raw_rows,
include_links=include_links
)
self.values = self._run_processors(extracted_values)
# Option: 2
# recomposed_table = []
# thead = self._table.find('thead')
# raw_headers = thead.find_all('th')
# self._raw_rows = self._get_rows(self._table.find('tbody'))
# theader_values = self._extract_values(raw_headers)
# tbody_values = self._extract_values(self._raw_rows, include_links=include_links)
# recomposed_table.extend(theader_values)
# recomposed_table.extend(tbody_values)
# self.values = self._run_processors(recomposed_table)
return self.values
def resolve_to_dataframe(self, soup: BeautifulSoup=None, columns: list=[]):
import pandas
if soup is not None:
self.resolve(soup)
if columns:
return pandas.DataFrame(data=self.values, columns=columns)
return pandas.DataFrame(data=self.values)
class MultiTablesExtractor(Extractor):
"""
Extract all the tables on a given page at once
"""
def __init__(self, with_attrs: list = [], header_position: int = None):
self.with_attrs = with_attrs
self.tables_list = OrderedDict()
self._raw_tables = None
def | |
<gh_stars>0
from .connections import connections
from .search import Search
from .exceptions import IllegalOperation
class Index(object):
def __init__(self, name, using='default'):
"""
:arg name: name of the index
:arg using: connection alias to use, defaults to ``'default'``
"""
self._name = name
self._doc_types = {}
self._mappings = {}
self._using = using
self._settings = {}
self._aliases = {}
self._analysis = {}
def clone(self, name, using=None):
"""
Create a copy of the instance with another name or connection alias.
Useful for creating multiple indices with shared configuration::
i = Index('base-index')
i.settings(number_of_shards=1)
i.create()
i2 = i.clone('other-index')
i2.create()
:arg name: name of the index
:arg using: connection alias to use, defaults to ``'default'``
"""
i = Index(name, using=using or self._using)
for attr in ('_doc_types', '_mappings', '_settings', '_aliases',
'_analysis'):
setattr(i, attr, getattr(self, attr).copy())
return i
def _get_connection(self):
return connections.get_connection(self._using)
connection = property(_get_connection)
def mapping(self, mapping):
"""
Associate a mapping (an instance of
:class:`~elasticsearch_dsl.Mapping`) with this index.
This means that, when this index is created, it will contain the
mappings for the document type defined by those mappings.
"""
self._mappings[mapping.doc_type] = mapping
def doc_type(self, doc_type):
"""
Associate a :class:`~elasticsearch_dsl.DocType` subclass with an index.
This means that, when this index is created, it will contain the
mappings for the ``DocType``. If the ``DocType`` class doesn't have a
default index yet, name of the ``Index`` instance will be used. Can be
used as a decorator::
i = Index('blog')
@i.doc_type
class Post(DocType):
title = Text()
# create the index, including Post mappings
i.create()
# .search() will now return a Search object that will return
# properly deserialized Post instances
s = i.search()
"""
name = doc_type._doc_type.name
self._doc_types[name] = doc_type
self._mappings[name] = doc_type._doc_type.mapping
if not doc_type._doc_type.index:
doc_type._doc_type.index = self._name
return doc_type # to use as decorator???
def settings(self, **kwargs):
"""
Add settings to the index::
i = Index('i')
i.settings(number_of_shards=1, number_of_replicas=0)
Multiple calls to ``settings`` will merge the keys, later overriding
the earlier.
"""
self._settings.update(kwargs)
return self
def aliases(self, **kwargs):
"""
Add aliases to the index definition::
i = Index('blog-v2')
i.aliases(blog={}, published={'filter': Q('term', published=True)})
"""
self._aliases.update(kwargs)
return self
def analyzer(self, analyzer):
"""
Explicitly add an analyzer to an index. Note that all custom analyzers
defined in mappings will also be created. This is useful for search analyzers.
Example::
from elasticsearch_dsl import analyzer, tokenizer
my_analyzer = analyzer('my_analyzer',
tokenizer=tokenizer('trigram', 'nGram', min_gram=3, max_gram=3),
filter=['lowercase']
)
i = Index('blog')
i.analyzer(my_analyzer)
"""
d = analyzer.get_analysis_definition()
# empty custom analyzer, probably already defined out of our control
if not d:
return
# merge the definition
# TODO: conflict detection/resolution
for key in d:
self._analysis.setdefault(key, {}).update(d[key])
def search(self):
"""
Rteurn a :class:`~elasticsearch_dsl.Search` object searching over this
index and its ``DocType``\s.
"""
return Search(
using=self._using,
index=self._name,
doc_type=[self._doc_types.get(k, k) for k in self._mappings]
)
def _get_mappings(self):
analysis, mappings = {}, {}
for mapping in self._mappings.values():
mappings.update(mapping.to_dict())
a = mapping._collect_analysis()
# merge the definition
# TODO: conflict detection/resolution
for key in a:
analysis.setdefault(key, {}).update(a[key])
return mappings, analysis
def to_dict(self):
out = {}
if self._settings:
out['settings'] = self._settings
if self._aliases:
out['aliases'] = self._aliases
mappings, analysis = self._get_mappings()
if mappings:
out['mappings'] = mappings
if analysis or self._analysis:
for key in self._analysis:
analysis.setdefault(key, {}).update(self._analysis[key])
out.setdefault('settings', {})['analysis'] = analysis
return out
def create(self, **kwargs):
"""
Creates the index in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.create`` unchanged.
"""
self.connection.indices.create(index=self._name, body=self.to_dict(), **kwargs)
def is_closed(self):
state = self.connection.cluster.state(index=self._name, metric='metadata')
return state['metadata']['indices'][self._name]['state'] == 'close'
def save(self):
"""
Sync the index definition with elasticsearch, creating the index if it
doesn't exist and updating its settings and mappings if it does.
Note some settings and mapping changes cannot be done on an open
index (or at all on an existing index) and for those this method will
fail with the underlying exception.
"""
if not self.exists():
return self.create()
body = self.to_dict()
settings = body.pop('settings', {})
analysis = settings.pop('analysis', None)
if analysis:
if self.is_closed():
# closed index, update away
settings['analysis'] = analysis
else:
# compare analysis definition, if all analysis objects are
# already defined as requested, skip analysis update and
# proceed, otherwise raise IllegalOperation
existing_analysis = self.get_settings()[self._name]['settings']['index'].get('analysis', {})
if any(
existing_analysis.get(section, {}).get(k, None) != analysis[section][k]
for section in analysis
for k in analysis[section]
):
raise IllegalOperation(
'You cannot update analysis configuration on an open index, you need to close index %s first.' % self._name)
# try and update the settings
if settings:
self.put_settings(body=settings)
# update the mappings, any conflict in the mappings will result in an
# exception
mappings = body.pop('mappings', {})
if mappings:
for doc_type in mappings:
self.put_mapping(doc_type=doc_type, body=mappings[doc_type])
def analyze(self, **kwargs):
"""
Perform the analysis process on a text and return the tokens breakdown
of the text.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.analyze`` unchanged.
"""
return self.connection.indices.analyze(index=self._name, **kwargs)
def refresh(self, **kwargs):
"""
Preforms a refresh operation on the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.refresh`` unchanged.
"""
return self.connection.indices.refresh(index=self._name, **kwargs)
def flush(self, **kwargs):
"""
Preforms a flush operation on the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.flush`` unchanged.
"""
return self.connection.indices.flush(index=self._name, **kwargs)
def get(self, **kwargs):
"""
The get index API allows to retrieve information about the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.get`` unchanged.
"""
return self.connection.indices.get(index=self._name, **kwargs)
def open(self, **kwargs):
"""
Opens the index in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.open`` unchanged.
"""
return self.connection.indices.open(index=self._name, **kwargs)
def close(self, **kwargs):
"""
Closes the index in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.close`` unchanged.
"""
return self.connection.indices.close(index=self._name, **kwargs)
def delete(self, **kwargs):
"""
Deletes the index in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.delete`` unchanged.
"""
return self.connection.indices.delete(index=self._name, **kwargs)
def exists(self, **kwargs):
"""
Returns ``True`` if the index already exists in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.exists`` unchanged.
"""
return self.connection.indices.exists(index=self._name, **kwargs)
def exists_type(self, **kwargs):
"""
Check if a type/types exists in the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.exists_type`` unchanged.
"""
return self.connection.indices.exists_type(index=self._name, **kwargs)
def put_mapping(self, **kwargs):
"""
Register specific mapping definition for a specific type.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.put_mapping`` unchanged.
"""
return self.connection.indices.put_mapping(index=self._name, **kwargs)
def get_mapping(self, **kwargs):
"""
Retrieve specific mapping definition for a specific type.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.get_mapping`` unchanged.
"""
return self.connection.indices.get_mapping(index=self._name, **kwargs)
def get_field_mapping(self, **kwargs):
"""
Retrieve mapping definition of a specific field.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.get_field_mapping`` unchanged.
"""
return self.connection.indices.get_field_mapping(index=self._name, **kwargs)
def put_alias(self, **kwargs):
"""
Create an alias for the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.put_alias`` unchanged.
"""
return self.connection.indices.put_alias(index=self._name, **kwargs)
def exists_alias(self, **kwargs):
"""
Return a boolean indicating whether given alias exists for this index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.exists_alias`` unchanged.
"""
return self.connection.indices.exists_alias(index=self._name, **kwargs)
def get_alias(self, **kwargs):
"""
Retrieve a specified alias.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.get_alias`` unchanged.
"""
return self.connection.indices.get_alias(index=self._name, **kwargs)
def delete_alias(self, **kwargs):
"""
Delete specific alias.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.delete_alias`` unchanged.
"""
return self.connection.indices.delete_alias(index=self._name, **kwargs)
def get_settings(self, **kwargs):
"""
Retrieve settings for the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.get_settings`` unchanged.
"""
return self.connection.indices.get_settings(index=self._name, **kwargs)
def put_settings(self, **kwargs):
"""
Change specific index level settings in real time.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.put_settings`` unchanged.
"""
return self.connection.indices.put_settings(index=self._name, **kwargs)
def stats(self, **kwargs):
"""
Retrieve statistics on different operations happening on the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.stats`` unchanged.
"""
return self.connection.indices.stats(index=self._name, **kwargs)
def segments(self, **kwargs):
"""
Provide low level segments information that a Lucene index (shard
level) is built with.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.segments`` unchanged.
"""
return self.connection.indices.segments(index=self._name, **kwargs)
def validate_query(self, **kwargs):
"""
Validate a potentially expensive query without executing it.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.validate_query`` unchanged.
"""
return self.connection.indices.validate_query(index=self._name, **kwargs)
def clear_cache(self, **kwargs):
"""
Clear all caches or specific cached associated with | |
<reponame>jayvdb/pypi_librarian
# coding=utf-8
"""
Build tasks
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import json
import os
import subprocess
import sys
from pynt import task
from pyntcontrib import execute, safe_cd
from semantic_version import Version
PROJECT_NAME = "pypi_librarian"
SRC = '.'
# for multitargeting
PYTHON = "python"
IS_DJANGO = False
IS_TRAVIS = 'TRAVIS' in os.environ
if IS_TRAVIS:
PIPENV = ""
else:
PIPENV = "pipenv run"
GEM_FURY = ""
CURRENT_HASH = None
MAC_LIBS = ":"
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
from build_utils import check_is_aws, skip_if_no_change, execute_with_environment, get_versions, execute_get_text, \
run_gitleaks, timed
# try to stop the "you are already in a pipenv shell noise.
os.environ["PIPENV_VERBOSITY"] = "-1"
@task()
@skip_if_no_change("git_leaks")
@timed()
def git_leaks():
run_gitleaks()
@task()
@skip_if_no_change("git_secrets")
@timed()
def git_secrets():
"""
Install git secrets if possible.
"""
if check_is_aws():
# no easy way to install git secrets on ubuntu.
return
if IS_TRAVIS:
# nothing is edited on travis
return
try:
commands = ["git secrets --install", "git secrets --register-aws"]
for command in commands:
cp = subprocess.run(command.split(" "),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, check=True)
for stream in [cp.stdout, cp.stderr]:
if stream:
for line in stream.decode().split("\n"):
print("*" + line)
except subprocess.CalledProcessError as cpe:
print(cpe)
installed = False
for stream in [cpe.stdout, cpe.stderr]:
if stream:
for line in stream.decode().split("\n"):
print("-" + line)
if "commit-msg already exists" in line:
print("git secrets installed.")
installed = True
break
if not installed:
raise
execute(*("git secrets --scan".strip().split(" ")))
@task()
@timed()
def clean():
"""
Delete all outputs. Blank until I think of a better way to do this.
"""
return
@task()
@skip_if_no_change("formatting")
@timed()
def formatting():
with safe_cd(SRC):
if sys.version_info < (3, 6):
print("Black doesn't work on python 2")
return
command = "{0} black {1}".format(PIPENV, PROJECT_NAME).strip()
print(command)
result = execute_get_text(command)
assert result
changed = []
for line in result.split("\n"):
if "reformatted " in line:
file = line[len("reformatted "):].strip()
changed.append(file)
for change in changed:
command = "git add {0}".format(change)
print(command)
execute(*(command.split(" ")))
@task()
@skip_if_no_change("compile_py")
@timed()
def compile_py():
"""
Catch on the worst syntax errors
"""
with safe_cd(SRC):
execute(PYTHON, "-m", "compileall", PROJECT_NAME)
@task(formatting, compile_py)
@skip_if_no_change("prospector")
@timed()
def prospector():
"""
Catch a few things with a non-strict propector run
"""
with safe_cd(SRC):
command = "{0} prospector {1} --profile {1}_style --pylint-config-file=pylintrc.ini --profile-path=.prospector".format(
PIPENV, PROJECT_NAME).strip().replace(" ", " ")
print(command)
execute(*(command
.split(" ")))
@task()
@skip_if_no_change("detect_secrets")
@timed()
def detect_secrets():
"""
Call detect-secrets tool
"""
# use
# blah blah = "foo" # pragma: whitelist secret
# to ignore a false posites
errors_file = "detect-secrets-results.txt"
print(execute_get_text("pwd"))
command = "{0} detect-secrets --scan --base64-limit 4 --exclude .idea|.js|.min.js|.html|.xsd|" \
"lock.json|synced_folders|.scss|Pipfile.lock|" \
"lint.txt|{1}".format(PIPENV, errors_file).strip()
print(command)
bash_process = subprocess.Popen(command.split(" "),
# shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
foo = bash_process.wait()
out, err = bash_process.communicate() # wait
with open(errors_file, "w+") as file_handle:
if len(out) == 0:
print("Warning- no output from detect secrets. Happens with git hook, but not from ordinary command line.")
return
file_handle.write(out.decode())
with open(errors_file) as f:
try:
data = json.load(f)
except Exception:
print("Can't read json")
exit(-1)
return
if data["results"]:
for result in data["results"]:
print(result)
print("detect-secrets has discovered high entropy strings, possibly passwords?")
exit(-1)
@task(compile_py, formatting, prospector)
@skip_if_no_change("lint")
@timed()
def lint():
"""
Lint
"""
with safe_cd(SRC):
if os.path.isfile("lint.txt"):
execute("rm", "lint.txt")
with safe_cd(SRC):
if IS_DJANGO:
django_bits = "--load-plugins pylint_django "
else:
django_bits = ""
# command += "{0}--rcfile=pylintrc.ini {1}".format(django_bits, PROJECT_NAME).split(" ")
command = "{0} pylint {1} --rcfile=pylintrc.ini {2}".format(PIPENV, django_bits, PROJECT_NAME) \
.strip() \
.replace(" ", " ")
print(command)
command = command.split(" ")
# keep out of src tree, causes extraneous change detections
lint_output_file_name = "lint.txt"
with open(lint_output_file_name, "w") as outfile:
env = config_pythonpath()
subprocess.call(command, stdout=outfile, env=env)
fatal_errors = sum(1 for line in open(lint_output_file_name)
if "no-member" in line or \
"no-name-in-module" in line or \
"import-error" in line)
if fatal_errors > 0:
for line in open(lint_output_file_name):
if "no-member" in line or \
"no-name-in-module" in line or \
"import-error" in line:
print(line)
print("Fatal lint errors : {0}".format(fatal_errors))
exit(-1)
cutoff = 100
num_lines = sum(1 for line in open(lint_output_file_name)
if "*************" not in line
and "---------------------" not in line
and "Your code has been rated at" not in line)
if num_lines > cutoff:
raise TypeError("Too many lines of lint : {0}, max {1}".format(num_lines, cutoff))
@task(lint)
@skip_if_no_change("nose_tests")
@timed()
def nose_tests():
"""
Nose tests
"""
# with safe_cd(SRC):
if IS_DJANGO:
command = "{0} manage.py test -v 2".format(PYTHON)
# We'd expect this to be MAC or a build server.
my_env = config_pythonpath()
execute_with_environment(command, env=my_env)
else:
my_env = config_pythonpath()
if IS_TRAVIS:
command = "{0} -m nose {1}".format(PYTHON, "test").strip()
else:
command = "{0} {1} -m nose {2}".format(PIPENV, PYTHON, "test").strip()
print(command)
execute_with_environment(command, env=my_env)
def config_pythonpath():
"""
Add to PYTHONPATH
"""
if check_is_aws():
env = "DEV"
else:
env = "MAC"
my_env = {'ENV': env,
"PIPENV_VERBOSITY": "-1"}
for key, value in os.environ.items():
my_env[key] = value
my_env["PYTHONPATH"] = my_env.get("PYTHONPATH",
"") + MAC_LIBS
print(my_env["PYTHONPATH"])
return my_env
@task()
@timed()
def coverage():
"""
Coverage, which is a bit redundant with nose test
"""
print("Coverage tests always re-run")
with safe_cd(SRC):
my_env = config_pythonpath()
command = "{0} py.test {1} --cov={2} --cov-report html:coverage --cov-fail-under 40 --verbose".format(
PIPENV,
"test", PROJECT_NAME)
execute_with_environment(command, my_env)
@task()
@skip_if_no_change("docs")
@timed()
def docs():
"""
Docs
"""
with safe_cd(SRC):
with safe_cd("docs"):
my_env = config_pythonpath()
command = "{0} make html".format(PIPENV).strip()
print(command)
execute_with_environment(command, env=my_env)
@task()
@timed()
def pip_check():
"""
Are packages ok?
"""
execute("pip", "check")
execute("twine", "check")
if PIPENV and not IS_TRAVIS:
execute("pipenv", "check")
execute("safety", "check", "-r", "requirements_dev.txt")
@task()
@timed()
def compile_mark_down():
"""
Convert MD to RST
"""
# print("Not compiling README.md because moderately complex MD makes pypi rst parser puke.")
with safe_cd(SRC):
if IS_TRAVIS:
command = "pandoc --from=markdown --to=rst --output=README.rst README.md".strip().split(
" ")
else:
command = "{0} pandoc --from=markdown --to=rst --output=README.rst README.md".format(PIPENV).strip().split(
" ")
execute(*(command))
@task()
@skip_if_no_change("mypy")
@timed()
def mypy():
"""
Are types ok?
"""
if sys.version_info < (3, 4):
print("Mypy doesn't work on python < 3.4")
return
if IS_TRAVIS:
command = "{0} -m mypy {1} --ignore-missing-imports --strict".format(PYTHON, PROJECT_NAME).strip()
else:
command = "{0} mypy {1} --ignore-missing-imports --strict".format(PIPENV, PROJECT_NAME).strip()
bash_process = subprocess.Popen(command.split(" "),
# shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = bash_process.communicate() # wait
mypy_file = "mypy_errors.txt"
with open(mypy_file, "w+") as lint_file:
lines = out.decode().split("\n")
for line in lines:
if "build_utils.py" in line:
continue
if "test.py" in line:
continue
if "tests.py" in line:
continue
if "/test_" in line:
continue
if "/tests_" in line:
continue
else:
lint_file.writelines([line + "\n"])
num_lines = sum(1 for line in open(mypy_file) if line and line.strip(" \n"))
max_lines = 25
if num_lines > max_lines:
raise TypeError("Too many lines of mypy : {0}, max {1}".format(num_lines, max_lines))
@task()
@timed()
def pin_dependencies():
"""
Create requirement*.txt
"""
with safe_cd(SRC):
execute(*("{0} pipenv_to_requirements".format(PIPENV).strip().split(" ")))
@task()
@timed()
def jiggle_version():
with safe_cd(SRC):
command = "{0} jiggle_version here --module={1}".format(PIPENV, PROJECT_NAME).strip()
execute(*(command.split(" ")))
@task()
@timed()
def check_setup_py():
# deprecated in favor of twine check.
return
# if
# ValueError: ZIP does not support timestamps before 1980
# then run this to ID
# find . -mtime +13700 -ls
with safe_cd(SRC):
if IS_TRAVIS:
execute(PYTHON, *("setup.py check -r -s".split(" ")))
else:
execute(*("{0} {1} setup.py check -r -s".format(PIPENV, PYTHON).strip().split(" ")))
@task()
@skip_if_no_change("vulture", expect_files="dead_code.txt")
@timed()
def dead_code():
"""
This also finds code you are working on today!
"""
with safe_cd(SRC):
if IS_TRAVIS:
command = "{0} vulture {1}".format(PYTHON, PROJECT_NAME).strip().split()
else:
command = "{0} vulture {1}".format(PIPENV, PROJECT_NAME).strip().split()
output_file_name = "dead_code.txt"
with open(output_file_name, "w") as outfile:
env = config_pythonpath()
subprocess.call(command, stdout=outfile, env=env)
cutoff = 1000
print("High cutt off for dead code because not even out of beta")
num_lines = sum(1 for line in open(output_file_name) if line)
if num_lines > cutoff:
print("Too many lines of dead code : {0}, max {1}".format(num_lines, cutoff))
exit(-1)
@task(compile_mark_down, formatting, mypy, detect_secrets, git_secrets, dead_code, nose_tests, coverage, compile_py,
lint,
check_setup_py, pin_dependencies, jiggle_version) # docs ... later
@skip_if_no_change("package")
@timed()
def package():
"""
package, but don't upload
"""
with safe_cd(SRC):
for folder in ["build", "dist", PROJECT_NAME + ".egg-info"]:
execute("rm", "-rf", folder)
with safe_cd(SRC):
execute(PYTHON, "setup.py", "sdist", "--formats=gztar,zip")
with safe_cd(SRC):
execute("twine", "check", "dist/*.gz")
@task(package)
@timed()
def gemfury():
"""
Push to gem fury, a repo with private options
"""
# fury login
# fury push dist/*.gz --as=YOUR_ACCT
# fury push dist/*.whl --as=YOUR_ACCT
cp = subprocess.run(("fury login --as={0}".format(GEM_FURY).split(" ")),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, check=True)
print(cp.stdout)
about = {}
with open(os.path.join(SRC, PROJECT_NAME, "__version__.py")) as f:
exec(f.read(), about)
version = Version(about["__version__"])
print("Have version : " + str(version))
print("Preparing to upload")
if version not in get_versions():
for kind in ["gz", "whl"]:
try:
files = glob.glob("{0}dist/*.{1}".format(SRC.replace(".", ""), kind))
for file_name in files:
cp = subprocess.run(("fury push {0} --as={1}".format(file_name, GEM_FURY).split(" ")),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, check=True)
print("result of fury push")
for stream in [cp.stdout, cp.stderr]:
if stream:
for line in stream.decode().split("\n"):
print(line)
except subprocess.CalledProcessError as cpe:
print("result of fury push- got error")
for stream in [cp.stdout, cp.stderr]:
if stream:
for line in | |
<reponame>Bingyan-Liu/DDSC-NET<filename>evaluation_metrics_for_segmentation.py
import numpy as np
from scipy import misc
from os import path, makedirs
from utils.file_management import get_filenames, save_csv_mean_segmentation_performance, save_csv_segmentation_table
EPS = 1e-7
def dice_coefficient_1(binary_segmentation, binary_gt_label,binary_segmentation_not,binary_gt_label_not):
'''
Compute the Dice coefficient between two binary segmentation.
Dice coefficient is defined as here: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
Input:
binary_segmentation: binary 2D numpy array representing the region of interest as segmented by the algorithm
binary_gt_label: binary 2D numpy array representing the region of interest as provided in the database
binary_segmentation_not:binary_segmentation的取反 用来计算TN
binary_gt_label_not:binary_gt_label的取反 用来计算TN
Output:
dice_value: Dice coefficient between the segmentation and the ground truth
'''
# turn all variables to booleans, just in case
binary_segmentation = np.asarray(binary_segmentation, dtype=np.bool)
binary_gt_label = np.asarray(binary_gt_label, dtype=np.bool)
# compute the intersection
intersection = np.logical_and(binary_segmentation, binary_gt_label)
# compute the TP+FP+FN
OR_logical = np.logical_or(binary_segmentation, binary_gt_label)
#compute the TN
TN = np.logical_and(binary_segmentation_not, binary_gt_label_not)
# count the number of True pixels in the binary segmentation
segmentation_pixels = float(np.sum(binary_segmentation.flatten()))
# same for the ground truth
gt_label_pixels = float(np.sum(binary_gt_label.flatten()))
# same for the intersection
intersection = float(np.sum(intersection.flatten()))
#same for the TN
TN = float(np.sum(TN.flatten()))
# same for the tp+fp+fn
OR_logical = float(np.sum(OR_logical.flatten()))
# count the number of TN+FP pixels in the gt_label
gt_label_not_pixels = float(np.sum(binary_gt_label_not.flatten()))
# compute the Dice coefficient
dice_value = 2 * intersection / (segmentation_pixels + gt_label_pixels)
# compute the JACCARD
JACCARD = intersection / OR_logical
# compute the accuracy
Accuracy = (intersection + TN) / (TN + OR_logical)
# compute the Sensitibity
Sen = intersection / gt_label_pixels
# compute the SPC
Spc = TN / gt_label_not_pixels
# compute the precision
Pre = intersection / segmentation_pixels
# return it
return dice_value,JACCARD,Accuracy,Sen,Pre
def evaluate_binary_segmentation_1(segmentation, gt_label):
'''
Compute the evaluation metrics of the REFUGE challenge by comparing the segmentation with the ground truth
Input:
segmentation: binary 2D numpy array representing the segmentation, with 0: optic cup, 128: optic disc, 255: elsewhere.
gt_label: binary 2D numpy array representing the ground truth annotation, with the same format
Output:
cup_dice: Dice coefficient for the optic cup
disc_dice: Dice coefficient for the optic disc
cdr: absolute error between the vertical cup to disc ratio as estimated from the segmentation vs. the gt_label, in pixels
'''
# compute the Dice coefficient for the optic cup
cup_dice,cup_jac,cup_acc,cup_sen,cup_pre = dice_coefficient_1(segmentation==0, gt_label==0,segmentation>0,gt_label>0)
# compute the Dice coefficient for the optic disc
disc_dice,disc_jac,disc_acc,disc_sen,disc_pre = dice_coefficient_1(segmentation<255, gt_label<255,segmentation==255,gt_label==255)
# compute the absolute error between the cup to disc ratio estimated from the segmentation vs. the gt label
cdr = absolute_error(vertical_cup_to_disc_ratio(segmentation), vertical_cup_to_disc_ratio(gt_label))
return cup_dice,cup_jac,cup_acc,cup_sen,cup_pre,disc_dice,disc_jac,disc_acc,disc_sen,disc_pre,cdr
def dice_coefficient(binary_segmentation, binary_gt_label):
'''
Compute the Dice coefficient between two binary segmentation.
Dice coefficient is defined as here: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
Input:
binary_segmentation: binary 2D numpy array representing the region of interest as segmented by the algorithm
binary_gt_label: binary 2D numpy array representing the region of interest as provided in the database
Output:
dice_value: Dice coefficient between the segmentation and the ground truth
'''
# turn all variables to booleans, just in case
binary_segmentation = np.asarray(binary_segmentation, dtype=np.bool)
binary_gt_label = np.asarray(binary_gt_label, dtype=np.bool)
# compute the intersection
intersection = np.logical_and(binary_segmentation, binary_gt_label)
# count the number of True pixels in the binary segmentation
segmentation_pixels = float(np.sum(binary_segmentation.flatten()))
# same for the ground truth
gt_label_pixels = float(np.sum(binary_gt_label.flatten()))
# same for the intersection
intersection = float(np.sum(intersection.flatten()))
# compute the Dice coefficient
dice_value = 2 * intersection / (segmentation_pixels + gt_label_pixels)
# return it
return dice_value
def vertical_diameter(binary_segmentation):
'''
Get the vertical diameter from a binary segmentation.
The vertical diameter is defined as the "fattest" area of the binary_segmentation parameter.
Input:
binary_segmentation: a boolean 2D numpy array representing a region of interest.
Output:
diameter: the vertical diameter of the structure, defined as the largest diameter between the upper and the lower interfaces
'''
# turn the variable to boolean, just in case
binary_segmentation = np.asarray(binary_segmentation, dtype=np.bool)
# get the sum of the pixels in the vertical axis
vertical_axis_diameter = np.sum(binary_segmentation, axis=0)
# pick the maximum value
diameter = np.max(vertical_axis_diameter)
# return it
return float(diameter)
def vertical_cup_to_disc_ratio(segmentation):
'''
Compute the vertical cup-to-disc ratio from a given labelling map.
The vertical cup to disc ratio is defined as here: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1722393/pdf/v082p01118.pdf
Input:
segmentation: binary 2D numpy array representing a segmentation, with 0: optic cup, 128: optic disc, 255: elsewhere.
Output:
cdr: vertical cup to disc ratio
'''
# compute the cup diameter
cup_diameter = vertical_diameter(segmentation==0)
# compute the disc diameter
disc_diameter = vertical_diameter(segmentation<255)
return cup_diameter / (disc_diameter + EPS)
def absolute_error(predicted, reference):
'''
Compute the absolute error between a predicted and a reference outcomes.
Input:
predicted: a float value representing a predicted outcome
reference: a float value representing the reference outcome
Output:
abs_err: the absolute difference between predicted and reference
'''
return abs(predicted - reference)
def evaluate_binary_segmentation(segmentation, gt_label):
'''
Compute the evaluation metrics of the REFUGE challenge by comparing the segmentation with the ground truth
Input:
segmentation: binary 2D numpy array representing the segmentation, with 0: optic cup, 128: optic disc, 255: elsewhere.
gt_label: binary 2D numpy array representing the ground truth annotation, with the same format
Output:
cup_dice: Dice coefficient for the optic cup
disc_dice: Dice coefficient for the optic disc
cdr: absolute error between the vertical cup to disc ratio as estimated from the segmentation vs. the gt_label, in pixels
'''
# compute the Dice coefficient for the optic cup
cup_dice = dice_coefficient(segmentation==0, gt_label==0)
# compute the Dice coefficient for the optic disc
disc_dice = dice_coefficient(segmentation<255, gt_label<255)
# compute the absolute error between the cup to disc ratio estimated from the segmentation vs. the gt label
cdr = absolute_error(vertical_cup_to_disc_ratio(segmentation), vertical_cup_to_disc_ratio(gt_label))
return cup_dice, disc_dice, cdr
def generate_table_of_results(image_filenames, segmentation_folder, gt_folder, is_training=False):
'''
Generates a table with image_filename, cup_dice, disc_dice and cdr values
Input:
image_filenames: a list of strings with the names of the images.
segmentation_folder: a string representing the full path to the folder where the segmentation files are
gt_folder: a string representing the full path to the folder where the ground truth annotation files are
is_training: a boolean value indicating if the evaluation is performed on training data or not
Output:
image_filenames: same as the input parameter
cup_dices: a numpy array with the same length than the image_filenames list, with the Dice coefficient for each optic cup
disc_dices: a numpy array with the same length than the image_filenames list, with the Dice coefficient for each optic disc
ae_cdrs: a numpy array with the same length than the image_filenames list, with the absolute error of the vertical cup to disc ratio
'''
# initialize an array for the Dice coefficients of the optic cups
cup_dices = np.zeros(len(image_filenames), dtype=np.float)
# initialize an array for the Dice coefficients of the optic discs
disc_dices = np.zeros(len(image_filenames), dtype=np.float)
# initialize an array for the absolute errors of the vertical cup to disc ratios
ae_cdrs = np.zeros(len(image_filenames), dtype=np.float)
# iterate for each image filename
for i in range(len(image_filenames)):
# read the segmentation
segmentation = misc.imread(path.join(segmentation_folder, image_filenames[i]))
if len(segmentation.shape) > 2:
segmentation = segmentation[:,:,0]
# read the gt
if is_training:
gt_filename = path.join(gt_folder, 'Glaucoma', image_filenames[i])
if path.exists(gt_filename):
gt_label = misc.imread(gt_filename)
else:
gt_filename = path.join(gt_folder, 'Non-Glaucoma', image_filenames[i])
if path.exists(gt_filename):
gt_label = misc.imread(gt_filename)
else:
raise ValueError('Unable to find {} in your training folder. Make sure that you have the folder organized as provided in our website.'.format(image_filenames[i]))
else:
gt_filename = path.join(gt_folder, image_filenames[i])
if path.exists(gt_filename):
gt_label = misc.imread(gt_filename)
else:
raise ValueError('Unable to find {} in your ground truth folder. If you are using training data, make sure to use the parameter is_training in True.'.format(image_filenames[i]))
# evaluate the results and assign to the corresponding row in the table
cup_dices[i], disc_dices[i], ae_cdrs[i] = evaluate_binary_segmentation(segmentation, gt_label)
# return the colums of the table
return image_filenames, cup_dices, disc_dices, ae_cdrs
def get_mean_values_from_table(cup_dices, disc_dices, ae_cdrs):
'''
Compute the mean evaluation metrics for the segmentation task.
Input:
cup_dices: a numpy array with the same length than the image_filenames list, with the Dice coefficient for each optic cup
disc_dices: a numpy array with the same length than the image_filenames list, with the Dice coefficient for each optic disc
ae_cdrs: a numpy array with the same length than the | |
= ht.ones((m), split=0)
b = ht.ones((j, k), split=1)
b[0] = ht.arange(1, k + 1)
b[:, 0] = ht.arange(1, j + 1)
ret00 = ht.matmul(a, b)
ret_comp = ht.array(a_torch @ b_torch, split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (k,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
# b -> vector
a_torch = torch.ones((n, m), device=self.device.torch_device)
a_torch[0] = torch.arange(1, m + 1, device=self.device.torch_device)
a_torch[:, -1] = torch.arange(1, n + 1, device=self.device.torch_device)
b_torch = torch.ones((j), device=self.device.torch_device)
# splits None None
a = ht.ones((n, m), split=None)
b = ht.ones((j), split=None)
a[0] = ht.arange(1, m + 1)
a[:, -1] = ht.arange(1, n + 1)
ret00 = ht.matmul(a, b)
ret_comp = ht.array(a_torch @ b_torch, split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, None)
a = ht.ones((n, m), split=None, dtype=ht.int64)
b = ht.ones((j), split=None, dtype=ht.int64)
a[0] = ht.arange(1, m + 1, dtype=ht.int64)
a[:, -1] = ht.arange(1, n + 1, dtype=ht.int64)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.int64)
self.assertEqual(ret00.split, None)
# splits 0 None
a = ht.ones((n, m), split=0)
b = ht.ones((j), split=None)
a[0] = ht.arange(1, m + 1)
a[:, -1] = ht.arange(1, n + 1)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
a = ht.ones((n, m), split=0, dtype=ht.int64)
b = ht.ones((j), split=None, dtype=ht.int64)
a[0] = ht.arange(1, m + 1, dtype=ht.int64)
a[:, -1] = ht.arange(1, n + 1, dtype=ht.int64)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.int64)
self.assertEqual(ret00.split, 0)
# splits 1 None
a = ht.ones((n, m), split=1)
b = ht.ones((j), split=None)
a[0] = ht.arange(1, m + 1)
a[:, -1] = ht.arange(1, n + 1)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
a = ht.ones((n, m), split=1, dtype=ht.int64)
b = ht.ones((j), split=None, dtype=ht.int64)
a[0] = ht.arange(1, m + 1, dtype=ht.int64)
a[:, -1] = ht.arange(1, n + 1, dtype=ht.int64)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.int64)
self.assertEqual(ret00.split, 0)
# splits None 0
a = ht.ones((n, m), split=None)
b = ht.ones((j), split=0)
a[0] = ht.arange(1, m + 1)
a[:, -1] = ht.arange(1, n + 1)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
a = ht.ones((n, m), split=None, dtype=ht.int64)
b = ht.ones((j), split=0, dtype=ht.int64)
a[0] = ht.arange(1, m + 1, dtype=ht.int64)
a[:, -1] = ht.arange(1, n + 1, dtype=ht.int64)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.int64)
self.assertEqual(ret00.split, 0)
# splits 0 0
a = ht.ones((n, m), split=0)
b = ht.ones((j), split=0)
a[0] = ht.arange(1, m + 1)
a[:, -1] = ht.arange(1, n + 1)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
a = ht.ones((n, m), split=0, dtype=ht.int64)
b = ht.ones((j), split=0, dtype=ht.int64)
a[0] = ht.arange(1, m + 1, dtype=ht.int64)
a[:, -1] = ht.arange(1, n + 1, dtype=ht.int64)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.int64)
self.assertEqual(ret00.split, 0)
# splits 1 0
a = ht.ones((n, m), split=1)
b = ht.ones((j), split=0)
a[0] = ht.arange(1, m + 1)
a[:, -1] = ht.arange(1, n + 1)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.float)
self.assertEqual(ret00.split, 0)
a = ht.ones((n, m), split=1, dtype=ht.int64)
b = ht.ones((j), split=0, dtype=ht.int64)
a[0] = ht.arange(1, m + 1, dtype=ht.int64)
a[:, -1] = ht.arange(1, n + 1, dtype=ht.int64)
ret00 = ht.matmul(a, b)
ret_comp = ht.array((a_torch @ b_torch), split=None)
self.assertTrue(ht.equal(ret00, ret_comp))
self.assertIsInstance(ret00, ht.DNDarray)
self.assertEqual(ret00.shape, (n,))
self.assertEqual(ret00.dtype, ht.int64)
self.assertEqual(ret00.split, 0)
with self.assertRaises(NotImplementedError):
a = ht.zeros((3, 3, 3), split=2)
b = a.copy()
a @ b
def test_norm(self):
a = ht.arange(9, dtype=ht.float32, split=0) - 4
self.assertTrue(
ht.allclose(ht.linalg.norm(a), ht.float32(np.linalg.norm(a.numpy())).item(), atol=1e-5)
)
a.resplit_(axis=None)
self.assertTrue(
ht.allclose(ht.linalg.norm(a), ht.float32(np.linalg.norm(a.numpy())).item(), atol=1e-5)
)
b = ht.array([[-4.0, -3.0, -2.0], [-1.0, 0.0, 1.0], [2.0, 3.0, 4.0]], split=0)
self.assertTrue(
ht.allclose(ht.linalg.norm(b), ht.float32(np.linalg.norm(b.numpy())).item(), atol=1e-5)
)
b.resplit_(axis=1)
self.assertTrue(
ht.allclose(ht.linalg.norm(b), ht.float32(np.linalg.norm(b.numpy())).item(), atol=1e-5)
)
with self.assertRaises(TypeError):
c = np.arange(9) - 4
ht.linalg.norm(c)
def test_outer(self):
# test outer, a and b local, different dtypes
a = ht.arange(3, dtype=ht.int32)
b = ht.arange(8, dtype=ht.float32)
ht_outer = ht.outer(a, b, split=None)
np_outer = np.outer(a.numpy(), b.numpy())
t_outer = torch.einsum("i,j->ij", a.larray, b.larray)
self.assertTrue((ht_outer.numpy() == np_outer).all())
self.assertTrue(ht_outer.larray.dtype is t_outer.dtype)
# test outer, a and b distributed, no data on some ranks
a_split = ht.arange(3, dtype=ht.float32, split=0)
b_split = ht.arange(8, dtype=ht.float32, split=0)
ht_outer_split = ht.outer(a_split, b_split, split=None)
# a and b split 0, outer split 1
ht_outer_split = ht.outer(a_split, b_split, split=1)
self.assertTrue(ht_outer_split.split == 1)
self.assertTrue((ht_outer_split.numpy() == np_outer).all())
# a and b distributed, outer split unspecified
ht_outer_split = ht.outer(a_split, b_split, split=None)
self.assertTrue(ht_outer_split.split == 0)
self.assertTrue((ht_outer_split.numpy() == np_outer).all())
# a not distributed, outer.split = 1
ht_outer_split = ht.outer(a, b_split, split=1)
self.assertTrue(ht_outer_split.split == 1)
self.assertTrue((ht_outer_split.numpy() == np_outer).all())
# b not distributed, outer.split = 0
ht_outer_split = ht.outer(a_split, b, split=0)
self.assertTrue(ht_outer_split.split == 0)
self.assertTrue((ht_outer_split.numpy() == np_outer).all())
# a_split.ndim > 1 and a.split != 0
a_split_3d = ht.random.randn(3, 3, 3, dtype=ht.float64, split=2)
ht_outer_split = ht.outer(a_split_3d, b_split)
np_outer_3d = np.outer(a_split_3d.numpy(), b_split.numpy())
self.assertTrue(ht_outer_split.split == 0)
self.assertTrue((ht_outer_split.numpy() == np_outer_3d).all())
# write to out buffer
ht_out = ht.empty((a.gshape[0], b.gshape[0]), dtype=ht.float32)
ht.outer(a, b, out=ht_out)
self.assertTrue((ht_out.numpy() == np_outer).all())
ht_out_split = ht.empty((a_split.gshape[0], b_split.gshape[0]), dtype=ht.float32, split=1)
ht.outer(a_split, b_split, out=ht_out_split, split=1)
self.assertTrue((ht_out_split.numpy() == np_outer).all())
# test exceptions
t_a = torch.arange(3)
with self.assertRaises(TypeError):
ht.outer(t_a, b)
np_b = np.arange(8)
with self.assertRaises(TypeError):
ht.outer(a, np_b)
a_0d = ht.array(2.3)
with self.assertRaises(RuntimeError):
ht.outer(a_0d, b)
t_out = torch.empty((a.gshape[0], b.gshape[0]), dtype=torch.float32)
with self.assertRaises(TypeError):
ht.outer(a, b, out=t_out)
ht_out_wrong_shape = ht.empty((7, b.gshape[0]), dtype=ht.float32)
with self.assertRaises(ValueError):
ht.outer(a, b, out=ht_out_wrong_shape)
ht_out_wrong_split = ht.empty(
(a_split.gshape[0], b_split.gshape[0]), dtype=ht.float32, split=1
)
with self.assertRaises(ValueError):
ht.outer(a_split, b_split, out=ht_out_wrong_split, split=0)
def test_projection(self):
a = ht.arange(1, 4, dtype=ht.float32, split=None)
e1 = ht.array([1, 0, 0], dtype=ht.float32, split=None)
self.assertTrue(ht.equal(ht.linalg.projection(a, e1), e1))
a.resplit_(axis=0)
self.assertTrue(ht.equal(ht.linalg.projection(a, e1), e1))
e2 = ht.array([0, 1, 0], dtype=ht.float32, split=0)
self.assertTrue(ht.equal(ht.linalg.projection(a, e2), e2 * 2))
a = ht.arange(1, 4, dtype=ht.float32, split=None)
e3 = ht.array([0, 0, 1], dtype=ht.float32, split=0)
self.assertTrue(ht.equal(ht.linalg.projection(a, e3), e3 * 3))
a = np.arange(1, 4)
with self.assertRaises(TypeError):
ht.linalg.projection(a, e1)
a = ht.array([[1], [2], [3]], dtype=ht.float32, split=None)
with self.assertRaises(RuntimeError):
ht.linalg.projection(a, e1)
def test_trace(self):
# ------------------------------------------------
# UNDISTRIBUTED CASE
# ------------------------------------------------
# CASE 2-D
# ------------------------------------------------
x = ht.arange(24).reshape((6, 4))
x_np = x.numpy()
dtype = ht.float32
result = ht.trace(x)
result_np = np.trace(x_np)
self.assertIsInstance(result, int)
self.assertEqual(result, result_np)
# direct call
result = x.trace()
self.assertIsInstance(result, int)
self.assertEqual(result, result_np)
# input = array_like (other than DNDarray)
result = ht.trace(x.tolist())
self.assertIsInstance(result, int)
self.assertEqual(result, result_np)
# dtype
result = ht.trace(x, dtype=dtype)
result_np = np.trace(x_np, dtype=np.float32)
self.assertIsInstance(result, float)
self.assertEqual(result, result_np)
# offset != 0
# negative offset
o = -(x.gshape[0] - 1)
result = ht.trace(x, offset=o)
result_np = np.trace(x_np, offset=o)
self.assertIsInstance(result, int)
self.assertEqual(result, result_np)
# positive offset
o = x.gshape[1] - 1
result = ht.trace(x, offset=o)
result_np = np.trace(x_np, offset=o)
self.assertIsInstance(result, int)
self.assertEqual(result, result_np)
# offset resulting into empty array
# negative
o = -x.gshape[0]
result = ht.trace(x, offset=o)
result_np = np.trace(x_np, offset=o)
self.assertIsInstance(result, int)
self.assertEqual(result, 0)
self.assertEqual(result, result_np)
# positive
o = x.gshape[1]
result = ht.trace(x, offset=o)
result_np = np.trace(x_np, offset=o)
self.assertIsInstance(result, int)
self.assertEqual(result, 0)
self.assertEqual(result, result_np)
# Exceptions
with self.assertRaises(TypeError):
x = "[[1, 2], [3, 4]]"
ht.trace(x)
with self.assertRaises(ValueError):
x = ht.arange(24)
ht.trace(x)
with self.assertRaises(TypeError):
x = ht.arange(24).reshape((6, 4))
ht.trace(x, axis1=0.2)
with self.assertRaises(TypeError):
ht.trace(x, axis2=1.4)
with self.assertRaises(ValueError):
ht.trace(x, axis1=2)
with self.assertRaises(ValueError):
ht.trace(x, axis2=2)
with self.assertRaises(TypeError):
ht.trace(x, offset=1.2)
with self.assertRaises(ValueError):
ht.trace(x, axis1=1, axis2=1)
with self.assertRaises(ValueError):
ht.trace(x, dtype="ht.int64")
with self.assertRaises(TypeError):
ht.trace(x, out=[])
with self.assertRaises(ValueError):
# As result is scalar
out = ht.array([])
ht.trace(x, out=out)
with self.assertRaises(ValueError):
ht.trace(x, dtype="ht.float32")
# ------------------------------------------------
# CASE > 2-D (4D)
# ------------------------------------------------
x = ht.arange(24).reshape((1, 2, 3, 4))
x_np = x.numpy()
out = ht.empty((3, 4))
axis1 = 1
axis2 = 3
result = ht.trace(x)
result_np = np.trace(x_np)
self.assertIsInstance(result, ht.DNDarray)
| |
#######################################################################
# This file is part of Pyblosxom.
#
# Copyright (C) 2003-2011 by the Pyblosxom team. See AUTHORS.
#
# Pyblosxom is distributed under the MIT license. See the file
# LICENSE for distribution details.
#######################################################################
"""
This module contains the base class for all the Entry classes. The
EntryBase class is essentially the API for entries in Pyblosxom. Reading
through the comments for this class will walk you through building your
own EntryBase derivatives.
This module also holds a generic generate_entry function which will generate
a BaseEntry with data that you provide for it.
"""
import time
import locale
from Pyblosxom import tools
BIGNUM = 2000000000
CONTENT_KEY = "body"
DOESNOTEXIST = "THISKEYDOESNOTEXIST"
DOESNOTEXIST2 = "THISKEYDOESNOTEXIST2"
class EntryBase:
"""
EntryBase is the base class for all the Entry classes. Each
instance of an Entry class represents a single entry in the
weblog, whether it came from a file, or a database, or even
somewhere off the InterWeeb.
EntryBase derivatives are dict-like except for one key difference:
when doing ``__getitem__`` on a nonexistent key, it returns None by
default. For example:
>>> entry = EntryBase('some fake request')
>>> None == entry["some_nonexistent_key"]
True
"""
def __init__(self, request):
self._data = ""
self._metadata = dict(tools.STANDARD_FILTERS)
self._id = ""
self._mtime = BIGNUM
self._request = request
def __repr__(self):
"""
Returns a friendly debug-able representation of self. Useful
to know on what entry pyblosxom fails on you (though unlikely)
:returns: Identifiable representation of object
"""
return "<Entry instance: %s>\n" % self.getId()
def get_id(self):
"""
This should return an id that's unique enough for caching
purposes.
Override this.
:returns: string id
"""
return self._id
getId = tools.deprecated_function(get_id)
def get_data(self):
"""
Returns the data string. This method should be overridden to
provide from pulling the data from other places.
Override this.
:returns: the data as a string
"""
return str(self._data)
getData = tools.deprecated_function(get_data)
def set_data(self, data):
"""
Sets the data content for this entry. If you are not creating
the entry, then you have no right to set the data of the
entry. Doing so could be hazardous depending on what
EntryBase subclass you're dealing with.
Override this.
:param data: the data
"""
self._data = data
setData = tools.deprecated_function(set_data)
def get_metadata(self, key, default=None):
"""
Returns a given piece of metadata.
Override this.
:param key: the key being sought
:param default: the default to return if the key does not
exist
:return: either the default (if the key did not exist) or the
value of the key in the metadata dict
"""
return self._metadata.get(key, default)
getMetadata = tools.deprecated_function(get_metadata)
def set_metadata(self, key, value):
"""
Sets a key/value pair in the metadata dict.
Override this.
:param key: the key string
:param value: the value string
"""
self._metadata[key] = value
setMetadata = tools.deprecated_function(set_metadata)
def get_metadata_keys(self):
"""
Returns the list of keys for which we have values in our
stored metadata.
.. Note::
This list gets modified later downstream. If you cache
your list of metadata keys, then this method should return
a copy of that list and not the list itself lest it get
adjusted.
Override this.
:returns: list of metadata keys
"""
return self._metadata.keys()
getMetadataKeys = tools.deprecated_function(get_metadata_keys)
def get_from_cache(self, entryid):
"""
Retrieves information from the cache that pertains to this
specific entryid.
This is a helper method--call this to get data from the cache.
Do not override it.
:param entryid: a unique key for the information you're retrieving
:returns: dict with the values or None if there's nothing for that
entryid
"""
cache = tools.get_cache(self._request)
# cache.__getitem__ returns None if the id isn't there
if cache.has_key(entryid):
return cache[entryid]
return None
getFromCache = tools.deprecated_function(get_from_cache)
def add_to_cache(self, entryid, data):
"""
Over-writes the cached dict for key entryid with the data
dict.
This is a helper method--call this to add data to the cache.
Do not override it.
:param entryid: a unique key for the information you're
storing
:param data: the data to store--this should probably be a dict
"""
mycache = tools.get_cache(self._request)
if mycache:
# This could be extended to cover all keys used by
# set_time(), but this is the key most likely to turn
# up in metadata. If #date is not blocked from caching
# here, the templates will use the raw string value
# from the user metadata, rather than the value
# derived from mtime.
if data.has_key('date'):
data.pop('date')
mycache[entryid] = data
addToCache = tools.deprecated_function(add_to_cache)
def set_time(self, timetuple):
"""
This takes in a given time tuple and sets all the magic
metadata variables we have according to the items in the time
tuple.
:param timetuple: the timetuple to use to set the data
with--this is the same thing as the
mtime/atime portions of an os.stat. This
time is expected to be local time, not UTC.
"""
self['timetuple'] = timetuple
self._mtime = time.mktime(timetuple)
gmtimetuple = time.gmtime(self._mtime)
self['mtime'] = self._mtime
self['ti'] = time.strftime('%H:%M', timetuple)
self['mo'] = time.strftime('%b', timetuple)
self['mo_num'] = time.strftime('%m', timetuple)
self['da'] = time.strftime('%d', timetuple)
self['dw'] = time.strftime('%A', timetuple)
self['yr'] = time.strftime('%Y', timetuple)
self['fulltime'] = time.strftime('%Y%m%d%H%M%S', timetuple)
self['date'] = time.strftime('%a, %d %b %Y', timetuple)
# YYYY-MM-DDThh:mm:ssZ
self['w3cdate'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', gmtimetuple)
# Temporarily disable the set locale, so RFC-compliant date is
# really RFC-compliant: directives %a and %b are locale
# dependent. Technically, we're after english locale, but
# only 'C' locale is guaranteed to exist.
loc = locale.getlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
self['rfc822date'] = time.strftime('%a, %d %b %Y %H:%M GMT', \
gmtimetuple)
# set the locale back
locale.setlocale(locale.LC_ALL, loc)
setTime = tools.deprecated_function(set_time)
# everything below this point involves convenience functions
# that work with the above functions.
def __getitem__(self, key, default=None):
"""
Retrieves an item from this dict based on the key given. If
the item does not exist, then we return the default.
If the item is ``CONTENT_KEY``, it calls ``get_data``,
otherwise it calls ``get_metadata``. Don't override this.
.. Warning::
There's no reason to override this--override ``get_data``
and ``get_metadata`` instead.
:param key: the key being sought
:param default: the default to return if the key does not
exist
:returns: the value of ``get_metadata`` or ``get_data``
"""
if key == CONTENT_KEY:
return self.get_data()
return self.get_metadata(key, default)
def get(self, key, default=None):
"""
Retrieves an item from the internal dict based on the key
given.
All this does is turn aroun and call ``__getitem__``.
.. Warning::
There's no reason to override this--override ``get_data``
and ``get_metadata`` instead.
:param key: the key being sought
:param default: the default to return if the key does not
exist
:returns: the value of ``get_metadata`` or ``get_data``
(through ``__getitem__``)
"""
return self.__getitem__(key, default)
def __setitem__(self, key, value):
"""
Sets the metadata[key] to the given value.
This uses ``set_data`` and ``set_metadata``. Don't override
this.
:param key: the given key name
:param value: the given value
"""
if key == CONTENT_KEY:
self.set_data(value)
else:
self.set_metadata(key, value)
def update(self, newdict):
"""
Updates the contents in this entry with the contents in the
dict. It does so by calling ``set_data`` and
``set_metadata``.
.. Warning::
There's no reason to override this--override ``set_data``
and ``set_metadata`` instead.
:param newdict: the dict we're updating this one with
"""
for mem in newdict.keys():
if mem == CONTENT_KEY:
self.set_data(newdict[mem])
else:
self.set_metadata(mem, newdict[mem])
def has_key(self, key):
"""
Returns whether a given key is in the metadata dict. If the
key is the ``CONTENT_KEY``, then we automatically return true.
.. Warning::
There's no reason to override this--override
``get_metadata`` instead.
:param key: the key to check in the metadata dict for
:returns: whether (True) or not (False) the key exists
"""
if key == CONTENT_KEY or key == CONTENT_KEY + "_escaped":
return True
value = self.get_metadata(key, DOESNOTEXIST)
if value == DOESNOTEXIST:
value = self.get_metadata(key, DOESNOTEXIST2)
if value == DOESNOTEXIST2:
return False
return True
def keys(self):
"""
Returns a list of the keys that can be accessed through
``__getitem__``.
.. Warning::
There's no reason to override this--override
``get_metadata_keys`` instead.
:returns: list of key names
"""
keys = self.get_metadata_keys()
if CONTENT_KEY not in keys:
keys.append(CONTENT_KEY)
return keys
def generate_entry(request, properties, data, mtime=None):
"""
Takes a properties dict and a data | |
<gh_stars>0
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""PBA & AutoAugment Train/Eval module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
import numpy as np
import tensorflow as tf
import pba.data_utils as data_utils
import pba.helper_utils as helper_utils
from pba.bert_model import build_bert_model
from pba.bert_optimization import create_optimizer
from pba.augmentation_utils import ContextNeighborStorage
import six
import json
import re
import collections
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=32,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.current_learning_rate = None
def from_dict(json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
def config_from_json_file(json_file, model_dropout):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r") as reader:
text = reader.read()
config = from_dict(json.loads(text))
if model_dropout != -1:
config.hidden_dropout_prob = model_dropout
config.attention_probs_dropout_prob = model_dropout
return config
def build_model(input_ids, input_mask, token_type_ids, num_classes, is_training, hparams, noise_vector):
"""Constructs the vision model being trained/evaled.
Args:
inputs: input features being fed to the model build built.
num_classes: number of output classes being predicted.
is_training: is the model training or not.
hparams: additional hyperparameters associated with the model.
Returns:
Returns:
The logits of the model.
"""
if hparams.model_name == 'bert':
bert_config_file = os.path.join(hparams.data_path + 'pretrained_models/bert_base/bert_config.json')
bert_config = config_from_json_file(bert_config_file,-1)
logits, embd_output = build_bert_model(input_ids, input_mask, token_type_ids, num_classes, is_training, bert_config, noise_vector)
return logits, embd_output
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name_to_variable[name]
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
class Model(object):
"""Builds an model."""
def __init__(self, hparams, num_classes, text_size):
self.hparams = hparams
self.num_classes = num_classes
self.text_size = text_size
def build(self, mode):
"""Construct the model."""
assert mode in ['train', 'eval']
self.mode = mode
self._setup_misc(mode)
self._setup_texts_and_labels(self.hparams.dataset) # --- create placeholders
self._build_graph(self.input_ids, self.input_mask, self.token_type_ids, self.labels, mode, self.noise_vector)
def _setup_misc(self, mode):
"""Sets up miscellaneous in the model constructor."""
self.lr_rate_ph = self.hparams.lr
self.current_learning_rate = self.lr_rate_ph
self.batch_size = self.hparams.batch_size
self.dataset = self.hparams.dataset
self.max_seq_length = self.hparams.max_seq_length
self.epoch_accuracy = []
self.matthews_corr = []
self.loss_history = []
if mode == 'eval':
self.batch_size = self.hparams.test_batch_size
def _setup_texts_and_labels(self, dataset):
"""Sets up text and label placeholders for the model."""
self.input_ids = tf.placeholder(tf.int32, [None, self.text_size])
self.input_mask = tf.placeholder(tf.int32,[None, self.text_size])
self.token_type_ids = tf.placeholder(tf.int32, [None, self.text_size])
if self.num_classes < 100: # --- classification
self.labels = tf.placeholder(tf.int32, [None, self.num_classes])
else: # --- regression
self.labels = tf.placeholder(tf.float32, [None, 1])
self.noise_vector = tf.placeholder(tf.float32, [None, None, 768])
def assign_epoch(self, session, epoch_value):
session.run(
self._epoch_update, feed_dict={self._new_epoch: epoch_value})
def _build_graph(self, input_ids, input_mask, token_type_ids, labels, mode, noise_vector):
"""Constructs the TF graph for the model.
Args:
texts: A 2-D text Tensor
labels: A 2-D labels Tensor.
mode: string indicating training mode ( e.g., 'train', 'valid', 'test').
"""
is_training = 'train' in mode
if is_training:
self.global_step = tf.train.get_or_create_global_step()
# texts is placeholder set in _setup_texts_and_labels(data set)
logits, embd_output = build_model(input_ids, input_mask, token_type_ids, self.num_classes, is_training,
self.hparams, noise_vector)
self.embedding_output = embd_output
if self.dataset == 'stsb':
self.predictions = logits
self.cost = tf.reduce_mean(tf.square(logits - labels))
else:
self.predictions, self.cost = helper_utils.setup_loss(logits, labels)
self._calc_num_trainable_params()
if is_training:
self._build_train_op()
with tf.device('/cpu:0'):
self.saver = tf.train.Saver(max_to_keep=10)
init_checkpoint = os.path.join(self.hparams.data_path,'pretrained_models', 'bert_base', 'bert_model.ckpt')
tvars = tf.trainable_variables("bert")
(assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)
self.assignment_map = assignment_map
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
self.init = tf.global_variables_initializer()
def _calc_num_trainable_params(self):
self.num_trainable_params = np.sum([
np.prod(var.get_shape().as_list())
for var in tf.trainable_variables()
])
tf.logging.info('number of trainable params: {}'.format(
self.num_trainable_params))
def _build_train_op(self):
"""Builds the train op for the model."""
hparams = self.hparams
clip_norm = hparams.gradient_clipping_by_global_norm
num_train_data = hparams.train_size
batch_size = hparams.batch_size
num_epochs = hparams.num_epochs
num_train_steps = int(np.floor(num_train_data/batch_size) * num_epochs * 0.9)
num_warmup_steps = int(np.floor(num_train_data/batch_size) * num_epochs * 0.1)
self.train_op, self.curr_learning_rate_tensor = create_optimizer(self.cost, self.lr_rate_ph, num_train_steps, num_warmup_steps, False, clip_norm, self.global_step)
class ModelTrainer(object):
"""Trains an instance of the Model class."""
def __init__(self, hparams):
self._session = None
self.hparams = hparams
np.random.seed(0) # --- Set the random seed to be sure the same validation set is used for each model
self.data_loader = data_utils.DataSet(hparams)
np.random.seed() # --- Put the random seed back to random
self.data_loader.reset()
# extra stuff for ray
self._build_models()
self._new_session()
self._session.__enter__()
self.create_nn_database(self.m, self.session)
def save_model(self, checkpoint_dir, step=None):
"""Dumps model into the backup_dir.
Args:
step: If provided, creates a checkpoint with the given step
number, instead of overwriting the existing checkpoints.
"""
model_save_name = os.path.join(checkpoint_dir,'model.ckpt') + '-' + str(step)
save_path = self.saver.save(self.session, model_save_name)
tf.logging.info('Saved child model')
return model_save_name
def extract_model_spec(self, checkpoint_path):
"""Loads a checkpoint with the architecture structure stored in the name."""
self.saver.restore(self.session, checkpoint_path)
tf.logging.warning(
'Loaded child model checkpoint from {}'.format(checkpoint_path))
def eval_child_model(self, model, data_loader, mode):
"""Evaluate the child model.
Args:
model: image model that will be evaluated.
data_loader: dataset object to extract eval data from.
mode: will the model be evaled on train, val or test.
Returns:
Accuracy of the model on the specified dataset.
"""
tf.logging.info('Evaluating child model in mode {}'.format(mode))
while True:
try:
accuracy, matthews_corrcoef, f1_score, pearson, spearman = helper_utils.eval_child_model(
self.session, model, data_loader, mode)
tf.logging.info(
'Eval child model accuracy: {}'.format(accuracy))
break
except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:
tf.logging.info(
'Retryable error caught: {}. Retrying.'.format(e))
return accuracy, matthews_corrcoef, f1_score, pearson, spearman
@contextlib.contextmanager
def _new_session(self):
"""Creates a new session for model m. initialize variables, and save / restore from checkpoint."""
sess_cfg = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
sess_cfg.gpu_options.allow_growth = True
self._session = tf.Session('', config=sess_cfg)
self._session.run(self.m.init)
return self._session
def _build_models(self):
"""Builds the text models for train and eval."""
m = Model(self.hparams, self.data_loader.num_classes, self.data_loader.text_size)
m.build('train')
self._num_trainable_params = m.num_trainable_params
self._saver = m.saver
self.m = m
self.meval = m
def create_nn_database(self, model, session):
"""Create search index for nearest neighbour augmentation from all samples in the train data"""
if type(self.data_loader.train_texts[0]) == str:
self.nn_database = ContextNeighborStorage(sentences=self.data_loader.train_texts, n_labels=self.data_loader.train_labels.shape[1], model=model, session=session)
elif type(self.data_loader.train_texts[0]) == tuple:
all_sentences = [list(sent_pair) for sent_pair in self.data_loader.train_texts]
all_sentences_flat = [item for sublist in all_sentences for item in sublist]
self.nn_database = ContextNeighborStorage(sentences=all_sentences_flat, n_labels=self.data_loader.train_labels.shape[1], model=model, session=session)
self.nn_database.process_sentences()
self.nn_database.build_search_index()
def _run_training_loop(self, curr_epoch):
"""Trains the model `m` for one epoch."""
start_time = time.time()
while True:
try:
train_accuracy, train_matthews, train_f1_score, train_pearson, train_spearman = helper_utils.run_epoch_training(self.session, self.m, self.data_loader, self.nn_database, curr_epoch)
break
except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:
tf.logging.info(
'Retryable error caught: {}. Retrying.'.format(e))
tf.logging.info('Finished epoch: {}'.format(curr_epoch))
tf.logging.info('Epoch time(min): {}'.format(
(time.time() - start_time) / 60.0))
return train_accuracy, train_matthews, train_f1_score, train_pearson, train_spearman
def _compute_final_accuracies(self, iteration):
"""Run once training is finished to compute final test accuracy."""
if (iteration >= self.hparams.num_epochs - 1):
test_accuracy, test_matthews_corrcoef, test_f1_score, test_pearson, test_spearman = | |
request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_login_theme_css_using_post(async_req=True)
>>> result = thread.get()
:param async_req bool
:param PaletteSettings body:
:param bool dark_foreground: Dark foreground enabled flag
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_login_theme_css_using_post_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_login_theme_css_using_post_with_http_info(**kwargs) # noqa: E501
return data
def get_login_theme_css_using_post_with_http_info(self, **kwargs): # noqa: E501
"""Get Login Theme CSS # noqa: E501
Generates the login theme CSS based on the provided Palette Settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_login_theme_css_using_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param PaletteSettings body:
:param bool dark_foreground: Dark foreground enabled flag
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'dark_foreground'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_login_theme_css_using_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'dark_foreground' in params:
query_params.append(('darkForeground', params['dark_foreground'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/noauth/whiteLabel/loginThemeCss{?darkForeground}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_login_white_label_params_using_get(self, logo_image_checksum, favicon_checksum, **kwargs): # noqa: E501
"""Get Login White Labeling parameters # noqa: E501
Returns login white-labeling parameters based on the hostname from request. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_login_white_label_params_using_get(logo_image_checksum, favicon_checksum, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str logo_image_checksum: Logo image checksum. Expects value from the browser cache to compare it with the value from settings. If value matches, the 'logoImageUrl' will be null. (required)
:param str favicon_checksum: Favicon image checksum. Expects value from the browser cache to compare it with the value from settings. If value matches, the 'faviconImageUrl' will be null. (required)
:return: LoginWhiteLabelingParams
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_login_white_label_params_using_get_with_http_info(logo_image_checksum, favicon_checksum, **kwargs) # noqa: E501
else:
(data) = self.get_login_white_label_params_using_get_with_http_info(logo_image_checksum, favicon_checksum, **kwargs) # noqa: E501
return data
def get_login_white_label_params_using_get_with_http_info(self, logo_image_checksum, favicon_checksum, **kwargs): # noqa: E501
"""Get Login White Labeling parameters # noqa: E501
Returns login white-labeling parameters based on the hostname from request. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_login_white_label_params_using_get_with_http_info(logo_image_checksum, favicon_checksum, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str logo_image_checksum: Logo image checksum. Expects value from the browser cache to compare it with the value from settings. If value matches, the 'logoImageUrl' will be null. (required)
:param str favicon_checksum: Favicon image checksum. Expects value from the browser cache to compare it with the value from settings. If value matches, the 'faviconImageUrl' will be null. (required)
:return: LoginWhiteLabelingParams
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['logo_image_checksum', 'favicon_checksum'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_login_white_label_params_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'logo_image_checksum' is set
if ('logo_image_checksum' not in params or
params['logo_image_checksum'] is None):
raise ValueError("Missing the required parameter `logo_image_checksum` when calling `get_login_white_label_params_using_get`") # noqa: E501
# verify the required parameter 'favicon_checksum' is set
if ('favicon_checksum' not in params or
params['favicon_checksum'] is None):
raise ValueError("Missing the required parameter `favicon_checksum` when calling `get_login_white_label_params_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'logo_image_checksum' in params:
query_params.append(('logoImageChecksum', params['logo_image_checksum'])) # noqa: E501
if 'favicon_checksum' in params:
query_params.append(('faviconChecksum', params['favicon_checksum'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/noauth/whiteLabel/loginWhiteLabelParams{?faviconChecksum,logoImageChecksum}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LoginWhiteLabelingParams', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_white_label_params_using_get(self, logo_image_checksum, favicon_checksum, **kwargs): # noqa: E501
"""Get White Labeling parameters # noqa: E501
Returns white-labeling parameters for the current user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_white_label_params_using_get(logo_image_checksum, favicon_checksum, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str logo_image_checksum: Logo image checksum. Expects value from the browser cache to compare it with the value from settings. If value matches, the 'logoImageUrl' will be null. (required)
:param str favicon_checksum: Favicon image checksum. Expects value from the browser cache to compare it with the value from settings. If value matches, the 'faviconImageUrl' will be null. (required)
:return: WhiteLabelingParams
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_white_label_params_using_get_with_http_info(logo_image_checksum, favicon_checksum, **kwargs) # noqa: E501
else:
(data) = self.get_white_label_params_using_get_with_http_info(logo_image_checksum, favicon_checksum, **kwargs) # noqa: E501
return data
def get_white_label_params_using_get_with_http_info(self, logo_image_checksum, favicon_checksum, **kwargs): # noqa: E501
"""Get White Labeling parameters # noqa: E501
Returns white-labeling parameters for the current user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_white_label_params_using_get_with_http_info(logo_image_checksum, favicon_checksum, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str logo_image_checksum: Logo image checksum. Expects value from the browser cache to compare it with the value from settings. If value matches, the 'logoImageUrl' will be null. (required)
:param str favicon_checksum: Favicon image checksum. Expects value from the browser cache to compare it with the value from settings. If value matches, the 'faviconImageUrl' will be null. (required)
:return: WhiteLabelingParams
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['logo_image_checksum', 'favicon_checksum'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_white_label_params_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'logo_image_checksum' is set
if ('logo_image_checksum' not in params or
params['logo_image_checksum'] is None):
raise ValueError("Missing the required parameter `logo_image_checksum` when calling `get_white_label_params_using_get`") # noqa: E501
# verify the required parameter 'favicon_checksum' is set
if ('favicon_checksum' not in params or
params['favicon_checksum'] is None):
raise ValueError("Missing the required parameter `favicon_checksum` when calling `get_white_label_params_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'logo_image_checksum' in params:
query_params.append(('logoImageChecksum', params['logo_image_checksum'])) # noqa: E501
if 'favicon_checksum' in params:
query_params.append(('faviconChecksum', params['favicon_checksum'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/whiteLabel/whiteLabelParams{?faviconChecksum,logoImageChecksum}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WhiteLabelingParams', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def is_customer_white_labeling_allowed_using_get(self, **kwargs): # noqa: E501
"""Check Customer White Labeling Allowed # noqa: E501
Check if the White Labeling is enabled for the customers of the current tenant Security check is performed to verify that the user has 'WRITE' permission for the white labeling resource. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> | |
'''
scriptname = 'eutester_diskpart_script'
self.sys('(echo rescan && echo list disk ) > ' + str(scriptname), code=0)
self.sys('diskpart /s ' + str(scriptname), code=0, timeout=timeout)
def get_diskdrive_for_volume(self, volume):
if not self.is_volume_attached_to_this_instance(volume):
return None
ret_disk = None
for disk in self.diskdrives:
disk.update_ebs_info()
if disk.ebs_volume == volume.id:
ret_disk = disk
if not ret_disk:
ret_disk = self.find_diskdrive_for_volume_by_serial_number(volume, force_check=True)
if not ret_disk:
if hasattr(volume,'md5') and volume.md5:
ret_disk = self.find_diskdrive_for_volume_by_md5(volume, force_check=True)
return ret_disk
def find_diskdrive_for_volume_by_md5(self, volume, md5=None, length=None, force_check=False):
if not force_check and not self.is_volume_attached_to_this_instance(volume):
return None
if not isinstance(volume, EuVolume):
volume = EuVolume.make_euvol_from_vol(volume=volume,tester=self.tester)
md5 = md5 or volume.md5
if not md5:
return None
length = length or volume.md5len
for disk in self.diskdrives:
if disk.cygwin_scsi_drive:
disk_md5 = self.get_dev_md5(disk.cygwin_scsi_drive, length=length)
if disk_md5 == md5:
volume.guestdev = disk.deviceid
volume.md5 = disk_md5
volume.md5len = length
disk.ebs_volume = volume.id
return disk
return None
def find_diskdrive_for_volume_by_serial_number(self, volume, serial_number=None, force_check=False):
'''
Attempt to iterate through all the diskdrives were aware of. If a diskdrive is found with a serial_number
associated with the volume, return that diskdrive obj..
example serial number format: vol-81C13EA4-dev-sdg
:param volume: volume obj to use for deriving the serial_number
:param serial_number: string. Optional. The string representing the serial # to match.
:returns WinInstanceDiskDrive if found, else None
'''
if not force_check and not self.is_volume_attached_to_this_instance(volume):
return None
if not serial_number:
serial_number = volume.id + volume.attach_data.device.replace('/','-')
for disk in self.diskdrives:
if disk.serialnumber == serial_number:
return disk
return None
def is_volume_attached_to_this_instance(self, volume):
'''
Attempts to look up volume state per cloud to confirm the cloud believe the state of this volume is attached
to this instance. This does not verify the guest/hypervisor also belives the volume is attached.
:param volume: volume obj.
:returns boolean
'''
volume.update()
if hasattr(volume, 'attach_data') and volume.attach_data and (volume.attach_data.instance_id == self.id):
self.debug('Volume:' + str(volume.id) + " is attached to this instance: " + str(self.id) + " per cloud perspective")
return True
else:
self.debug('Volume:' + str(volume.id) + " is NOT attached to this instance: " + str(self.id) + " per cloud perspective")
return False
def update_volume_guest_info(self, volume, md5=None, md5len=None, guestdev=None):
self.debug("{0} update_volume_guest_info: {1} {2}"
.format(get_line(), volume, get_line()))
if not self.is_volume_attached_to_this_instance(volume):
raise Exception('Volume not attached to this instance')
disk = None
if not self.get_volume_from_attached_list_by_id(volume.id):
self.attached_vols.append(volume)
volume.guestdev = guestdev or volume.guestdev
if md5:
if not md5len:
raise Exception('Must provide md5len if providing the md5')
volume.md5 = md5
volume.md5len = md5len
else:
disk = self.get_diskdrive_for_volume(volume)
if not disk:
raise Exception('Could not find diskdrive for volume when attempting to update volume guest info:' + str(volume))
volume.md5len = md5len or 1024
volume.md5 = self.get_dev_md5(disk.cygwin_scsi_drive, volume.md5len)
if not guestdev:
volume.guestdev = disk.deviceid
disk = disk or self.get_diskdrive_for_volume(volume)
disk.update_ebs_info()
volume.update_volume_attach_info_tags(md5=volume.md5, md5len=volume.md5len, instance_id=self.id, guestdev=volume.guestdev)
return volume
def get_unsynced_volumes(self, check_md5=True):
'''
Description: Returns list of volumes which are:
-in a state the cloud believes the vol is no longer attached
-the attached device has changed, or is not found.
If all euvols are shown as attached to this instance, and the last known local dev is present and/or a local device is found with matching md5 checksum
then the list will return 'None' as all volumes are successfully attached and state is in sync.
By default this method will iterate through all the known euvolumes attached to this euinstance.
A subset can be provided in the list argument 'euvol_list'.
Returns a list of euvolumes for which a corresponding guest device could not be found, or the cloud no longer believes is attached.
:param euvol_list: - optional - euvolume object list. Defaults to all self.attached_vols
:param md5length: - optional - defaults to the length given in each euvolume. Used to calc md5 checksum of devices
:param timerpervolume: -optional - time to wait for device to appear, per volume before failing
:param min_polls: - optional - minimum iterations to check guest devs before failing, despite timeout
:param check_md5: - optional - find devices by md5 comparision. Default is to only perform this check when virtio_blk is in use.
'''
bad_list = []
retdict = self.sync_attached_volumes_with_clouds_view()
bad_list.extend(retdict['badvols'])
return bad_list
def reboot_instance_and_verify(self,
waitconnect=60,
timeout=600,
wait_for_ports=180,
connect=True,
checkvolstatus=False,
pad=5,
uptime_retries=3):
'''
Attempts to reboot an instance and verify it's state post reboot.
waitconnect-optional-integer representing seconds to wait before attempting to connect to instance after reboot
timeout-optional-integer, seconds. If a connection has failed, this timer is used to determine a retry
connect- optional - boolean to indicate whether an ssh session should be established once the expected state has been reached
checkvolstatus - optional -boolean to be used to check volume status post start up
'''
msg=""
newuptime = None
attempt = 0
def get_safe_uptime():
uptime = None
try:
uptime = self.get_uptime()
except: pass
return uptime
self.debug('Attempting to reboot instance:'+str(self.id)+', check attached volume state first')
uptime = self.tester.wait_for_result( get_safe_uptime, None, oper=operator.ne)
elapsed = 0
start = time.time()
if checkvolstatus:
#update the md5sums per volume before reboot
bad_vols=self.get_unsynced_volumes()
if bad_vols != []:
for bv in bad_vols:
self.debug(str(self.id)+'Unsynced volume found:'+str(bv.id))
raise Exception(str(self.id)+"Could not reboot using checkvolstatus flag due to unsync'd volumes")
self.debug('Rebooting now...')
self.reboot()
time.sleep(waitconnect)
try:
self.poll_for_ports_status(ports=[3389,5589], timeout=wait_for_ports)
except:
self.debug('Failed to poll winrm and rdp ports after ' + str(wait_for_ports) + ' seconds, try to connect anyways...')
timeout=timeout - int(time.time()-start)
while (elapsed < timeout):
self.connect_to_instance(timeout=timeout)
#Wait for the system to provide a valid response for uptime, early connections may not
newuptime = self.tester.wait_for_result( get_safe_uptime, None, oper=operator.ne)
elapsed = int(time.time()-start)
#Check to see if new uptime is at least 'pad' less than before, allowing for some pad
if (newuptime - (uptime+elapsed)) > pad:
err_msg = "Instance uptime does not represent a reboot. Orig:"+str(uptime)+\
", New:"+str(newuptime)+", elapsed:"+str(elapsed)+"/"+str(timeout)
if elapsed > timeout:
raise Exception(err_msg)
else:
self.debug(err_msg)
else:
self.debug("Instance uptime indicates a reboot. Orig:"+str(uptime)+\
", New:"+str(newuptime)+", elapsed:"+str(elapsed))
break
if checkvolstatus:
badvols= self.get_unsynced_volumes()
if badvols != []:
for vol in badvols:
msg = msg+"\nVolume:"+vol.id+" Local Dev:"+vol.guestdev
raise Exception("Missing volumes post reboot:"+str(msg)+"\n")
self.debug(self.id+" reboot_instance_and_verify Success")
def get_uptime(self):
if not hasattr(self, 'system_info'):
self.update_system_info()
if hasattr(self.system_info, 'system_boot_time'):
return self._get_uptime_from_system_boot_time()
elif hasattr(self.system_info, 'system_up_time'):
return self._get_uptime_from_system_up_time()
else:
tb = self.tester.get_traceback()
raise Exception(str(tb) + '\nCould not get system boot or up time from system_info')
def _get_uptime_from_system_boot_time(self):
#11/18/2013, 3:15:39 PM
if not hasattr(self, 'system_info'):
self.update_system_info()
splitdate = self.system_info.system_boot_time.split()
datestring = splitdate[0]
timestring = splitdate[1]
ampm = splitdate[2]
month, day, year = datestring.replace(',',"").split('/')
hours, minutes, seconds = timestring.split(':')
if ampm == 'PM':
hours = int(hours) + 12
datetimestring = str(year) + " " + \
str(month) + " " + \
str(day) + " " + \
str(hours) + " " + \
str(minutes) + " " + \
str(seconds)
dt = datetime.strptime(datetimestring, "%Y %m %d %H %M %S")
return int(time.time() - time.mktime(dt.timetuple()))
def _get_uptime_from_system_up_time(self):
#0 Days, 0 Hours, 6 Minutes, 39 Seconds
if not hasattr(self, 'system_info'):
self.update_system_info()
uptime_string = self.system_info.system_up_time
days = 0
hours = 0
minutes = 0
seconds = 0
split = uptime_string.split(',')
for part in split:
time_string = ""
if re.search('Days', part, re.IGNORECASE):
time_string = str(part.split()[0]).strip()
days = int(time_string or 0)
elif re.search('Hours', part, re.IGNORECASE):
time_string = str(part.split()[0]).strip()
hours = int(time_string or 0)
elif re.search('Minutes', part, re.IGNORECASE):
time_string = str(part.split()[0]).strip()
minutes = int(time_string or 0)
elif re.search('Seconds', part, re.IGNORECASE):
time_string = str(part.split()[0]).strip()
seconds = int(time_string or 0)
self.debug("Days:" +str(days)+', Hours:'+ str(hours) + ", Minutes:" + str(minutes) + ", Seconds:" + str(seconds))
uptime = (days * 86400) + (hours * 3600) + (minutes * 60) + seconds
return uptime
def stop_instance_and_verify(self, timeout=200, state='stopped',
failstate='terminated', check_vols=True):
'''
Attempts to stop instance and verify the state has gone to
stopped state
:param timeout; -optional-time to wait on instance to go to state 'state' before failing
:param state: -optional-the expected state to signify success, default is stopped
:param failstate: -optional-a state transition that indicates failure, default is terminated
'''
self.debug(self.id+" Attempting to stop instance...")
start = time.time()
elapsed = 0
self.stop()
while (elapsed < timeout):
time.sleep(2)
self.update()
if self.state == state:
break
if self.state == failstate:
raise Exception(str(self.id) + " instance went to state:" +
str(self.state) + " while | |
%.2f +/- %.2f Myr \n'%(np.median(mega_df[w3].age), med_age_sigma3))
f.write('Unassociated median age: %.2f +/- %.2f Myr \n'%(np.median(mega_df[w0].age), med_age_sigma0))
f.write('\n')
f.close()
""" now do things by environmental mask locations
simple environmental masks cheatsheet
1 = center (small bulge, nuclear ring & disk)
2 = bar (excluding bar ends)
3 = bar ends (overlap of bar and spiral)
4 = interbar (R_gal < R_bar, but outside bar footprint)
5 = spiral arms inside interbar (R_gal < R_bar)
6 = spiral arms (R_gal > R_bar)
7 = interarm (only the R_gal spanned by spiral arms, and R_gal > R_bar)
8 = outer disc (R_gal > spiral arm ends, only for galaxies with identified spirals)
9 = disc (R_gal > R_bar) where no spiral arms were identified (e.g. flocculent spirals)
simplified further
1 = center
2 + 3 = bar
4 + 7 + 8 = interarm
5 + 6 = spiral arms
9 = disc in galaxies without spirals
"""
# get indices for the clusters of each enviro - need np.where so we can get mulitple conditions and can us iloc later
wcenter = np.where(mega_df['env_mask_val'] == 1)
wbar_idx = np.where((mega_df['env_mask_val'] == 2) | (mega_df['env_mask_val'] == 3) )
winterarm_idx = np.where((mega_df['env_mask_val'] == 4) | (mega_df['env_mask_val'] == 7) | (mega_df['env_mask_val'] == 8))
wspiral_idx = np.where((mega_df['env_mask_val'] == 5) | (mega_df['env_mask_val'] == 6))
wdisk = np.where(mega_df['env_mask_val'] == 9)
# list with all the enviro indices
wall = [wcenter, wbar_idx[0], winterarm_idx[0], wspiral_idx[0], wdisk]
# list of the enviro names
names = ['center', 'bar', 'interarm', 'spiralarm', 'disk']
# loop through to each enviro
for i in range(len(wall)):
# make a temp dataframe with just the clusters of the current enviro
df = mega_df.iloc[wall[i]]
# make histogram of cluster ages split by association number
sc_gmc_assoc_hist(df, filename=data_dir+'sc_gmc_assoc_hist_%s.%s'%(names[i], run_name))
# star cluster ages and errors
age_all = df['age'].to_numpy()
lage_all = np.log10(age_all)
age_err_all = df['age_err'].to_numpy()
lage_err_all = age_err_all/age_all/np.log(10)
# indices for each association number
w0 = df['assoc_num'] == 0
w1 = df['assoc_num'] == 1
w2 = df['assoc_num'] == 2
w3 = df['assoc_num'] == 3
# bootstrap errors on the median ages
med_age_sigma_all = bootstrap_median_error(age_all, age_err_all)
med_age_sigma1 = bootstrap_median_error(age_all[w1], age_err_all[w1])
med_age_sigma2 = bootstrap_median_error(age_all[w2], age_err_all[w2])
med_age_sigma3 = bootstrap_median_error(age_all[w3], age_err_all[w3])
med_age_sigma0 = bootstrap_median_error(age_all[w0], age_err_all[w0])
# log the stats for each env
if i == 0:
f = open(data_dir + 'sc_gmc_assoc_stats_env.%s.txt'%run_name, 'w')
f.write(names[i] + '\n')
f.write('All star clusters median age: %.2f +/- %.2f Myr \n'%(np.median(age_all), med_age_sigma_all) )
f.write('Within 1 R_gmc median age: %.2f +/- %.2f Myr \n'%(np.median(age_all[w1]), med_age_sigma1 ) )
f.write('1 < R_gmc <= 2 median age: %.2f +/- %.2f Myr \n'%(np.median(age_all[w2]), med_age_sigma2 ) )
f.write('2 < R_gmc <= 3 median age: %.2f +/- %.2f Myr \n'%(np.median(age_all[w3]), med_age_sigma3 ) )
f.write('Unassociated median age: %.2f +/- %.2f Myr \n'%(np.median(age_all[w0]), med_age_sigma0 ) )
f.write('\n')
f.close()
else:
f = open(data_dir + 'sc_gmc_assoc_stats_env.%s.txt'%run_name, 'a')
f.write(names[i] + '\n')
f.write('All star clusters median age: %.2f +/- %.2f Myr \n'%(np.median(age_all), med_age_sigma_all) )
f.write('Within 1 R_gmc median age: %.2f +/- %.2f Myr \n'%(np.median(age_all[w1]), med_age_sigma1 ) )
f.write('1 < R_gmc <= 2 median age: %.2f +/- %.2f Myr \n'%(np.median(age_all[w2]), med_age_sigma2 ) )
f.write('2 < R_gmc <= 3 median age: %.2f +/- %.2f Myr \n'%(np.median(age_all[w3]), med_age_sigma3 ) )
f.write('Unassociated median age: %.2f +/- %.2f Myr \n'%(np.median(age_all[w0]), med_age_sigma0 ) )
f.write('\n')
f.close()
def auto_corr(df, min_bin=1.1e-5, nbins=10, nbootstraps=50, method='landy-szalay', rseed=222, gmc=False):
""" function to calculate the auto-correlation for the given dataframe
uses the astroML function bootstrap_two_point_angular
Inputs:
df pandas DataFrame dataframe which holds the objects to do the correlation function for
min_bin float the angular location of the first/minimum bin
nbins int the number of radial bins over which to do the correlation
nbootstraps int number of bootstraps to perform for the error estimation; default is 50
method str estimator method to use for correlation function; landy-szalay or standard; default is landy-szalay
rseed int the seed value which gets used for the numpy.random
gmc bool set to true if the df used is the GMC catalog since it has different keywords for ra,dec
Outputs:
results
results[0] == bins list of the bin edges; len(bins) == nbins + 1
results[1] == corr list of the correlation values for each bin
results[2] == corr_err list of the bootstrap estimated errors on the correlation values
results[3] == bootstraps list of lists of bootstrapped correlation values in each bin; len(bootstraps) == nbootstraps
"""
np.random.seed(rseed)
bins = 10 ** np.linspace(np.log10(min_bin), np.log10(0.1), nbins+1)
results = [bins]
if gmc:
results += bootstrap_two_point_angular(df['XCTR_DEG'], df['YCTR_DEG'], bins=bins, method=method, Nbootstraps=nbootstraps)
else:
results += bootstrap_two_point_angular(df['ra'], df['dec'], bins=bins, method=method, Nbootstraps=nbootstraps)
return results
def powerlaw_func(theta, Aw, alpha):
""" a powerlaw function of the form
f(theta) = Aw * theta^alpha
"""
return Aw * theta**alpha
def tpcf(df, dist, **kwargs):
""" runs the bootstrap two point corrrelation function and the power law fit
Inputs:
df pandas DataFrame dataframe which holds the objects to do the correlation function for
dist float distance to galaxy in Mpc
kwargs dictionary keyword arguments to pass on to the auto_corr function
Outputs:
bins_centers_pc list center positions of the bins in parsecs
corr list correlation values for each bin; 1 + omega(theta)
corr_err list bootstrap estimated errors on the correlation values
power_law_fits list the best-fit for powerlaws; [A_w (deg), error, A_w (pc), error, alpha, error ]
"""
# perform the auto-correlation
bins, corr, corr_err, bootstraps = auto_corr(df, **kwargs)
# find bin centers [degrees]
bin_centers = 0.5 * (bins[1:] + bins[:-1])
# bin centers as in pc
bin_centers_pc = dist*1e6 * bin_centers*u.deg.to(u.rad)
# add 1 so the correlation is 1 + omega(theta)
corr = corr + 1
# need to drop nans for the power law fitting
wnnan = np.where(np.isnan(corr)==False)
# power-law fit
popt_ang, pcov = curve_fit(powerlaw_func, bin_centers[wnnan], corr[wnnan])
perr_ang = np.sqrt(np.diag(pcov))
popt_pc, pcov = curve_fit(powerlaw_func, bin_centers_pc[wnnan], corr[wnnan])
perr_pc = np.sqrt(np.diag(pcov))
# sometimes the error doesn't converge so replace those with 0 (instead of inf)
winf = np.where(np.isinf(perr_ang))[0]
if len(winf) > 0:
perr_ang[winf] = 0
perr_pc[winf] = 0
return bin_centers_pc, corr, corr_err, [popt_ang[0], perr_ang[0], popt_pc[0], perr_pc[0], popt_ang[1], perr_ang[1]]
def all_galaxies_tpcf(galaxy_list, data_dir, run_name, assoc_cat_suffix='_cluster_catalog_in_mask_class12_assoc_gmc', sc_class='class12', nbins=10 ):
""" function form of tpcf.py - loop through all the galaxies and do the two-point correlation function analysis
Inputs:
galaxy_list astropy Table table that holds the list of galaxies to perform the analysis on
data_dir str path to the data directory; e.g., /cherokee1/turner/phangs/cf/data/
run_name str name of the run/test; e.g., run01
assoc_cat_suffix str suffix of the filename for the csv which holds the star cluster - gmc association dataframe
sc_class str which class of clusters to make the catalogs for; class12 or class123
nbins int; list the number of radial bins over which to do the correlation; if a list, it'll loop through all the given nbsins
"""
gal_id = galaxy_list['id']
gal_dist = galaxy_list['dist']
for i in range(len(galaxy_list)):
# galaxy props
gal_name = gal_id[i]
dist = gal_dist[i]
print('')
print(gal_name)
# read in the star cluster cat in the hst-alma footprint overlap mask
sc_df = pd.read_csv(data_dir + '%s/%s/%s%s.csv'%(gal_name, run_name, gal_name, assoc_cat_suffix))
# read in the gmc cat in the hst-alma footprint overlap mask
gmc_cat = fits.open(data_dir + '%s/%s/%s_gmc_cat_masked.fits'%(gal_name, run_name, gal_name))[1].data
gmc_df = Table(gmc_cat).to_pandas()
# check if nbins is a list or int; if int, make it a list of len 1
if type(nbins) == int:
nbins = [nbins]
# loop through the nbins in the list
for j in range(len(nbins)):
# two-point correlation function on the all the star clusters
bin_centers_pc_all, corr_all, corr_err_all, pl_fit_all = tpcf(sc_df, dist, nbins=nbins[j])
# now for clusters <= 10 Myr
wleq10 = sc_df['age'] <= 10
bin_centers_pc_young, corr_young, corr_err_young, pl_fit_young = tpcf(sc_df.loc[wleq10], dist, nbins=nbins[j])
# now for clusters > 10 Myr
w10 = sc_df['age'] > 10
bin_centers_pc_old, corr_old, corr_err_old, pl_fit_old = tpcf(sc_df.loc[w10], dist, nbins=nbins[j])
# now gmcs
bin_centers_pc_gmc, corr_gmc, corr_err_gmc, pl_fit_gmc = tpcf(gmc_df, dist, nbins=nbins[j], min_bin=3e-4, gmc=True)
# write out the power-law best fit parameters for all, young, old
f = open(data_dir + '%s/%s/%s_tpcf_fits.nbins%02d.dat'%(gal_name, run_name, gal_name, nbins[j]), 'w')
f.write('{:<6} '.format('# bin'))
f.write('{:<6} '.format('Aw_deg'))
f.write('{:<5} '.format('error'))
f.write('{:<6} '.format('Aw_pc'))
f.write('{:<6} '.format('error'))
f.write('{:<6} '.format('alpha'))
f.write('{:<5} '.format('error'))
f.write('\n')
f.write('{:<6} '.format('all'))
f.write('{:>6} '.format('%.3f'%(pl_fit_all[0])))
f.write('{:>5} '.format('%.3f'%(pl_fit_all[1])))
f.write('{:>6} '.format('%.3f'%(pl_fit_all[2])))
f.write('{:>6} '.format('%.3f'%(pl_fit_all[3])))
f.write('{:>6} '.format('%.3f'%(pl_fit_all[4])))
f.write('{:>5} '.format('%.3f'%(pl_fit_all[5])))
f.write('\n')
f.write('{:<6} '.format('<= 10'))
f.write('{:>6} '.format('%.3f'%(pl_fit_young[0])))
f.write('{:>5} '.format('%.3f'%(pl_fit_young[1])))
f.write('{:>6} '.format('%.3f'%(pl_fit_young[2])))
f.write('{:>6} '.format('%.3f'%(pl_fit_young[3])))
f.write('{:>6} '.format('%.3f'%(pl_fit_young[4])))
f.write('{:>5} '.format('%.3f'%(pl_fit_young[5])))
f.write('\n')
f.write('{:<6} '.format('> 10'))
f.write('{:>6} '.format('%.3f'%(pl_fit_old[0])))
f.write('{:>5} '.format('%.3f'%(pl_fit_old[1])))
f.write('{:>6} '.format('%.3f'%(pl_fit_old[2])))
f.write('{:>6} '.format('%.3f'%(pl_fit_old[3])))
f.write('{:>6} '.format('%.3f'%(pl_fit_old[4])))
f.write('{:>5} '.format('%.3f'%(pl_fit_old[5])))
f.write('\n')
f.write('{:<6} '.format('gmc'))
f.write('{:>6} '.format('%.3f'%(pl_fit_gmc[0])))
f.write('{:>5} '.format('%.3f'%(pl_fit_gmc[1])))
f.write('{:>6} '.format('%.3f'%(pl_fit_gmc[2])))
f.write('{:>6} '.format('%.3f'%(pl_fit_gmc[3])))
f.write('{:>6} '.format('%.3f'%(pl_fit_gmc[4])))
f.write('{:>5} '.format('%.3f'%(pl_fit_gmc[5])))
f.close()
# create figure
fig, ax = plt.subplots(1,1, figsize=(5,5))
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel(r'$r$ [pc]')
ax.set_ylabel(r'$1 + \omega(\theta)$')
# all clusters
ax.errorbar(bin_centers_pc_all, corr_all, yerr=corr_err_all, fmt='k-o', ecolor='black', markersize=5, lw=1.5,
label=r'All SCs $\alpha=%.2f\pm%.2f$ (%i) '%(pl_fit_all[4], pl_fit_all[5], len(sc_df)))
# clusters <= 10 Myr
ax.errorbar(bin_centers_pc_young, corr_young, yerr=corr_err_young, fmt='-o', color='#377eb8', ecolor='#377eb8', markersize=5, lw=1.5,
label=r'$\leq 10$ Myr $\alpha=%.2f\pm%.2f$ (%i) '%(pl_fit_young[4], pl_fit_young[5], len(sc_df.loc[wleq10])))
# clusters > 10 Myr
ax.errorbar(bin_centers_pc_old, corr_old, yerr=corr_err_old, fmt='-o', color='#e41a1c', ecolor='#e41a1c', markersize=5, lw=1.5,
label=r'$> 10$ Myr $\alpha=%.2f\pm%.2f$ (%i) '%(pl_fit_old[4], pl_fit_old[5], len(sc_df.loc[w10])))
# gmcs
ax.errorbar(bin_centers_pc_gmc, corr_gmc, yerr=corr_err_gmc, fmt='-o', color='#E68310', ecolor='#E68310', markersize=5, lw=1.5,
label=r'GMCs $\alpha=%.2f\pm%.2f$ (%i) '%(pl_fit_gmc[4], pl_fit_gmc[5], len(gmc_df)))
# plot vertical line at mean GMC radius
ax.axvline(gmc_df.mean()['RAD3D_PC'], lw=1.1, c='#999999', zorder=0)
plt.legend(loc='upper right', fontsize='x-small')
plt.savefig(data_dir + '%s/%s/%s_tpcf.nbins%02d.png'%(gal_name, run_name, gal_name, nbins[j]), bbox_inches='tight')
plt.savefig(data_dir + '%s/%s/%s_tpcf.nbins%02d.pdf'%(gal_name, run_name, gal_name, nbins[j]), bbox_inches='tight')
plt.close()
logbins_sc = np.log10(bin_centers_pc_all)
logbins_gmc = np.log10(bin_centers_pc_gmc)
# write out the bin centers and correlation values
f = open(data_dir + '%s/%s/%s_tpcf.nbins%02d.dat'%(gal_name, run_name, gal_name, nbins[j]), 'w')
f.write('# two-point correlation function values (1 + omega(theta)); bin centers in are given in log(pc)\n')
f.write('{:<8} '.format('nbins%02d'%nbins[j]))
for k in range(nbins[j]):
f.write('{:>6} '.format('%.3f'%(logbins_sc[k])))
f.write('\n')
f.write('{:<8} '.format('corr_all'))
for k in range(nbins[j]):
f.write('{:>6} '.format('%.3f'%(corr_all[k])))
f.write('\n')
f.write('{:<8} '.format('corr_yng'))
for k in | |
from types import *
from types_gc import *
import compilerLib, library
import symtable
import re
import numpy as np
import inspect
from collections import OrderedDict
SPDZ = 0
GC = 1
LOCAL = 2
class Params(object):
intp = 64
f = 32
k = 64
@classmethod
def set_params(cls, int_precision=32, f=32, k=64, parallelism=1):
cls.intp = int_precision
cls.f = f
cls.k = k
cfix.set_precision(f, k)
sfix.set_precision(f, k)
cfix_gc.set_precision(f, k)
sfix_gc.set_precision(f, k)
class ClearIntegerFactory(object):
def __call__(self, value):
if mpc_type == SPDZ:
return cint(value)
elif mpc_type == LOCAL:
return int(value)
else:
return cint_gc(Params.intp, value)
class SecretIntegerFactory(object):
def __call__(self, value):
if mpc_type == SPDZ:
return sint(value)
elif mpc_type == LOCAL:
raise ValueError("Secret integer called for local phase")
else:
#return sint_gc(Params.intp, input_party=value)
raise ValueError("Cannot instantiate secret integers in GC. Secret integers must be read using .read_input")
def read_input(self, party):
if mpc_type == SPDZ:
return sint.get_private_input_from(party)
else:
return sint_gc(Params.intp, input_party=party)
class ClearIntegerMatrixFactory(object):
def __call__(self, rows, columns):
if not isinstance(rows, int) or not isinstance(columns, int):
raise ValueError("Matrix sizes must be publicly known integers")
if mpc_type == SPDZ:
ret = cintMatrix(rows, columns)
return ret
else:
ret = cintMatrixGC(rows, columns)
for i in range(rows):
for j in range(columns):
ret[i][j] = cint_gc(0)
return ret
def read_input(self, rows, columns, channel=0):
if not isinstance(rows, int) or not isinstance(columns, int):
raise ValueError("Matrix sizes must be publicly known integers")
if mpc_type == LOCAL:
raise ValueError("Shouldn't be local.")
if mpc_type == SPDZ:
ret = cintMatrix(rows, columns)
@library.for_range(ret.rows)
def f(i):
@library.for_range(ret.columns)
def g(j):
ret[i][j].public_input(channel)
return ret
else:
raise ValueError("Clear matrix read_input not supported for GC")
class ClearFixedPointFactory(object):
def __call__(self, value):
if mpc_type == SPDZ:
return cfix(value)
elif mpc_type == LOCAL:
return float(value)
else:
return cfix_gc(v=value, scale=True)
class SecretFixedPointFactory(object):
def read_input(self, party):
if mpc_type == SPDZ:
v = sint.get_private_input_from(party)
vf = sfix.load_sint(v)
return vf
else:
return sfix_gc(v=None, input_party=party)
class ClearFixedPointArrayFactory(object):
def __call__(self, length):
if not isinstance(length, int):
raise ValueError("Array length must be a publicly known integer")
if mpc_type == SPDZ:
ret = cfixArray(length)
return ret
else:
ret = cfixArrayGC(length)
for i in range(length):
ret[i] = cfix_gc(0)
return ret
class ClearIntegerArrayFactory(object):
def __call__(self, length):
if not isinstance(length, int):
raise ValueError("Array length must be a publicly known integer")
if mpc_type == SPDZ:
ret = cintArray(length)
return ret
else:
ret = cintArrayGC(length)
for i in range(length):
ret[i] = cint_gc(0)
return ret
class SecretFixedPointArrayFactory(object):
def __call__(self, length):
if not isinstance(length, int):
raise ValueError("Array length must be a publicly known integer")
if mpc_type == SPDZ:
ret = sfixArray(length)
return ret
else:
ret = sfixArrayGC(length)
for i in range(length):
ret[i] = cfix_gc(0)
return ret
def read_input(self, length, party):
if not isinstance(length, int):
raise ValueError("Array length must be a publicly known integer")
if mpc_type == SPDZ:
ret = sfixArray(length)
@library.for_range(ret.length)
def f(i):
v = sint.get_private_input_from(party)
ret[i] = sfix.load_sint(v, scale=False)
return ret
else:
ret = sfixArrayGC(length)
for i in range(ret.length):
ret[i] = sfix_gc(v=None, input_party=party)
return ret
class SecretIntegerArrayFactory(object):
def __call__(self, length):
if not isinstance(length, int):
raise ValueError("Array length must be a publicly known integer")
if mpc_type == SPDZ:
ret = sintArray(length)
return ret
else:
ret = sintArrayGC(length)
for i in range(length):
ret[i] = sint_gc(0)
return ret
def read_input(self, length, party):
if not isinstance(length, int):
raise ValueError("Array length must be a publicly known integer")
if mpc_type == SPDZ:
ret = sintArray(length)
@library.for_range(ret.length)
def f(i):
v = sint.get_private_input_from(party)
ret[i] = v
return ret
else:
ret = sintArrayGC(length)
for i in range(ret.length):
ret[i] = sint_gc(Params.intp, input_party=party)
return ret
import struct
class SecretFixedPointMatrixFactory(object):
def __call__(self, rows, columns):
if not isinstance(rows, int) or not isinstance(columns, int):
raise ValueError("Matrix sizes must be publicly known integers")
if mpc_type == LOCAL:
raise ValueError("Shouldn't be local.")
if mpc_type == SPDZ:
ret = sfixMatrix(rows, columns)
return ret
else:
ret = sfixMatrixGC(rows, columns)
for i in range(rows):
for j in range(columns):
ret[i][j] = cfix_gc(0)
return ret
def read_input(self, rows, columns, party):
if not isinstance(rows, int) or not isinstance(columns, int):
raise ValueError("Matrix sizes must be publicly known integers")
if mpc_type == LOCAL:
raise ValueError("Shouldn't be local.")
if mpc_type == SPDZ:
ret = sfixMatrix(rows, columns)
@library.for_range(ret.rows)
def f(i):
@library.for_range(ret.columns)
def g(j):
v = sint.get_private_input_from(party)
ret[i][j] = sfix.load_sint(v, scale=False)
return ret
else:
ret = sfixMatrixGC(rows, columns)
for i in range(ret.rows):
for j in range(ret.columns):
ret[i][j] = sfix_gc(v=None, input_party=party)
return ret
# Reads input from file.
def read_clear_input(self, rows, columns, party, f, input_file="./Input_Data/f0"):
input_type = np.dtype([('f1', np.bool), ('f2', np.int64)])
lst_inputs = np.fromfile(f, input_type, rows * columns)
precision = sfix.f
assert(len(lst_inputs) >= rows * columns)
res = np.zeros((rows, columns))
for i in range(rows):
for j in range(columns):
entry = lst_inputs[i * columns + j]
if entry[0]:
factor = -1
else:
factor = 1
res[i][j] = factor * entry[1] * 1.0 / (2 ** precision)
return res
# Read horizontally partitioned data from multiple parties
# input config should be of the form: (party_id, rows, columns)
def read_input_variable_rows(self, columns, input_config):
rows = sum([ic[1] for ic in input_config])
if mpc_type == SPDZ:
ret = sfixMatrix(rows, columns)
party_config = cintMatrix(len(input_config), 2)
rows_offset = 0
for (p, r) in input_config:
@library.for_range(r)
def a(i):
@library.for_range(columns)
def b(j):
v = sint.get_private_input_from(p)
ret[i + rows_offset][j] = sfix.load_sint(v, scale=False)
rows_offset += r
return ret
else:
ret = sfixMatrixGC(rows, columns)
rows_offset = 0
for (p, r) in input_config:
for i in range(r):
for j in range(columns):
ret[i+rows_offset][j] = sfix_gc(v=None, input_party=p)
rows_offset += r
return ret
class SecretIntegerMatrixFactory(object):
def __call__(self, rows, columns):
if not isinstance(rows, int) or not isinstance(columns, int):
raise ValueError("Matrix sizes must be publicly known integers")
if mpc_type == LOCAL:
raise ValueError("Shouldn't be local.")
if mpc_type == SPDZ:
ret = sintMatrix(rows, columns)
return ret
else:
ret = sintMatrixGC(rows, columns)
for i in range(rows):
for j in range(columns):
ret[i][j] = cint_gc(0) #sint_gc(Params.intp, party)
return ret
def read_input(self, rows, columns, party):
if not isinstance(rows, int) or not isinstance(columns, int):
raise ValueError("Matrix sizes must be publicly known integers")
if mpc_type == LOCAL:
raise ValueError("Shouldn't be local.")
if mpc_type == SPDZ:
ret = sintMatrix(rows, columns)
@library.for_range(ret.rows)
def f(i):
@library.for_range(ret.columns)
def g(j):
v = sint.get_private_input_from(party)
ret[i][j] = v
return ret
else:
ret = sintMatrixGC(rows, columns)
for i in range(ret.rows):
for j in range(ret.columns):
ret[i][j] = sint_gc(Params.intp, input_party=party)
return ret
class ClearFixedPointMatrixFactory(object):
def __call__(self, rows, columns):
if mpc_type == SPDZ:
return cfixMatrix(rows, columns)
elif mpc_type == LOCAL:
return np.zeros((rows, columns))
else:
ret = cfixMatrixGC(rows, columns, cfix_gc)
for i in range(ret.rows):
for j in range(ret.columns):
ret[i][j] = cfix_gc(0)
return ret
class PrivateFixedPointMatrix(object):
def preprocess(self, precision=36):
input_file="./Input_Data/f0"
input_type = np.dtype([('f1', np.bool), ('f2', np.int64)])
lst_inputs = np.fromfile(input_file, input_type)
data = lst_inputs.flatten().tolist()
lst_data = []
for i in range(len(data)):
entry = data[i]
if entry[0]:
factor = -1
else:
factor = 1
val = factor * entry[1] * 1.0 / (2 ** precision)
lst_data.append(val)
self.data = lst_data
def read_input(self, rows, columns, party):
assert(len(self.data) >= rows * columns)
res = np.zeros((rows, columns))
for i in range(rows):
for j in range(columns):
entry = self.data.pop(0)
res[i][j] = entry
return res
def reveal_all(v, text=""):
if mpc_type == SPDZ:
if isinstance(v, (sint, sfix)):
if text == "":
text = "value"
library.print_ln("{} = %s".format(text), v.reveal())
elif isinstance(v, Array):
if text == "":
text = "Array"
@library.for_range(v.length)
def f(i):
library.print_ln("{}[%s] = %s".format(text), i, v[i].reveal())
elif isinstance(v, Matrix):
if text == "":
text = "Matrix"
@library.for_range(v.rows)
def f(i):
@library.for_range(v.columns)
def g(j):
library.print_ln("{}[%s][%s] = %s".format(text), i, j, v[i][j].reveal())
elif isinstance(v, (regint, cint, cfix)):
if text == "":
text = "value"
library.print_ln("{} = %s".format(text), v)
else:
raise NotImplemented
else:
info = v.reveal(name=text)
program_gc.output_objects.append(info)
import numpy as np
import struct
# lst_data is a list of matrices right now, sort of hard coded to the specific program
def write_private_data(lst_data):
lst_private_data = []
for matrix in lst_data:
lst_private_data += matrix.flatten().tolist()
# Need to left shift by 36 due to the way SCALE-MAMBA reads in fixed-point input.
lst_private_data_pow = [e * pow(2, 36) for e in lst_private_data]
f = open("./Input_Data" + "/f0", 'w')
for d in lst_private_data_pow:
sign = d < 0
output = struct.pack("?", sign)
f.write(output)
output = struct.pack("Q", abs(int(d)))
f.write(output)
f.close()
data_rev = lst_private_data[::-1]
f = open("./Input_Data" + "/agmpc.input", 'w')
for d in data_rev:
output = struct.pack(">q", int(d))
f.write(output)
f.close()
ClearInteger = ClearIntegerFactory()
ClearIntegerMatrix = ClearIntegerMatrixFactory()
ClearIntegerArray = ClearIntegerArrayFactory()
SecretInteger = SecretIntegerFactory()
SecretIntegerArray = SecretIntegerArrayFactory()
SecretIntegerMatrix = | |
<filename>glance/store/swift.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for SWIFT"""
from __future__ import absolute_import
import hashlib
import httplib
import math
import urllib
import urlparse
from oslo.config import cfg
from glance.common import auth
from glance.common import exception
from glance.openstack.common import excutils
import glance.openstack.common.log as logging
import glance.store
import glance.store.base
import glance.store.location
try:
import swiftclient
except ImportError:
pass
LOG = logging.getLogger(__name__)
DEFAULT_CONTAINER = 'glance'
DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB
DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M
ONE_MB = 1000 * 1024
swift_opts = [
cfg.BoolOpt('swift_enable_snet', default=False,
help=_('Whether to use ServiceNET to communicate with the '
'Swift storage servers.')),
cfg.StrOpt('swift_store_auth_address',
help=_('The address where the Swift authentication service '
'is listening.')),
cfg.StrOpt('swift_store_user', secret=True,
help=_('The user to authenticate against the Swift '
'authentication service')),
cfg.StrOpt('swift_store_key', secret=True,
help=_('Auth key for the user authenticating against the '
'Swift authentication service.')),
cfg.StrOpt('swift_store_auth_version', default='2',
help=_('Version of the authentication service to use. '
'Valid versions are 2 for keystone and 1 for swauth '
'and rackspace')),
cfg.BoolOpt('swift_store_auth_insecure', default=False,
help=_('If True, swiftclient won\'t check for a valid SSL '
'certificate when authenticating.')),
cfg.StrOpt('swift_store_region',
help=_('The region of the swift endpoint to be used for '
'single tenant. This setting is only necessary if the '
'tenant has multiple swift endpoints.')),
cfg.StrOpt('swift_store_endpoint_type', default='publicURL',
help=_('A string giving the endpoint type of the swift '
'service to use (publicURL, adminURL or internalURL). '
'This setting is only used if swift_store_auth_version '
'is 2.')),
cfg.StrOpt('swift_store_service_type', default='object-store',
help=_('A string giving the service type of the swift service '
'to use. This setting is only used if '
'swift_store_auth_version is 2.')),
cfg.StrOpt('swift_store_container',
default=DEFAULT_CONTAINER,
help=_('Container within the account that the account should '
'use for storing images in Swift.')),
cfg.IntOpt('swift_store_large_object_size',
default=DEFAULT_LARGE_OBJECT_SIZE,
help=_('The size, in MB, that Glance will start chunking image '
'files and do a large object manifest in Swift')),
cfg.IntOpt('swift_store_large_object_chunk_size',
default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE,
help=_('The amount of data written to a temporary disk buffer '
'during the process of chunking the image file.')),
cfg.BoolOpt('swift_store_create_container_on_put', default=False,
help=_('A boolean value that determines if we create the '
'container if it does not exist.')),
cfg.BoolOpt('swift_store_multi_tenant', default=False,
help=_('If set to True, enables multi-tenant storage '
'mode which causes Glance images to be stored in '
'tenant specific Swift accounts.')),
cfg.ListOpt('swift_store_admin_tenants', default=[],
help=_('A list of tenants that will be granted read/write '
'access on all Swift containers created by Glance in '
'multi-tenant mode.')),
cfg.BoolOpt('swift_store_ssl_compression', default=True,
help=_('If set to False, disables SSL layer compression of '
'https swift requests. Setting to False may improve '
'performance for images which are already in a '
'compressed format, eg qcow2.')),
]
CONF = cfg.CONF
CONF.register_opts(swift_opts)
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing a Swift URI. A Swift URI can look like any of
the following:
swift://user:[email protected]/container/obj-id
swift://account:user:[email protected]/container/obj-id
swift+http://user:[email protected]/container/obj-id
swift+https://user:[email protected]/container/obj-id
When using multi-tenant a URI might look like this (a storage URL):
swift+https://example.com/container/obj-id
The swift+http:// URIs indicate there is an HTTP authentication URL.
The default for Swift is an HTTPS authentication URL, so swift:// and
swift+https:// are the same...
"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 'swift+https')
self.user = self.specs.get('user')
self.key = self.specs.get('key')
self.auth_or_store_url = self.specs.get('auth_or_store_url')
self.container = self.specs.get('container')
self.obj = self.specs.get('obj')
def _get_credstring(self):
if self.user and self.key:
return '%s:%s@' % (urllib.quote(self.user), urllib.quote(self.key))
return ''
def get_uri(self):
auth_or_store_url = self.auth_or_store_url
if auth_or_store_url.startswith('http://'):
auth_or_store_url = auth_or_store_url[len('http://'):]
elif auth_or_store_url.startswith('https://'):
auth_or_store_url = auth_or_store_url[len('https://'):]
credstring = self._get_credstring()
auth_or_store_url = auth_or_store_url.strip('/')
container = self.container.strip('/')
obj = self.obj.strip('/')
return '%s://%s%s/%s/%s' % (self.scheme, credstring, auth_or_store_url,
container, obj)
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python. It also deals with the peculiarity that new-style
Swift URIs have where a username can contain a ':', like so:
swift://account:user:[email protected]/container/obj
"""
# Make sure that URIs that contain multiple schemes, such as:
# swift://user:pass@http://authurl.com/v1/container/obj
# are immediately rejected.
if uri.count('://') != 1:
reason = _("URI cannot contain more than one occurrence "
"of a scheme. If you have specified a URI like "
"swift://user:pass@http://authurl.com/v1/container/obj"
", you need to change it to use the "
"swift+http:// scheme, like so: "
"swift+http://user:[email protected]/v1/container/obj")
LOG.debug(_("Invalid store URI: %(reason)s"), {'reason': reason})
raise exception.BadStoreUri(message=reason)
pieces = urlparse.urlparse(uri)
assert pieces.scheme in ('swift', 'swift+http', 'swift+https')
self.scheme = pieces.scheme
netloc = pieces.netloc
path = pieces.path.lstrip('/')
if netloc != '':
# > Python 2.6.1
if '@' in netloc:
creds, netloc = netloc.split('@')
else:
creds = None
else:
# Python 2.6.1 compat
# see lp659445 and Python issue7904
if '@' in path:
creds, path = path.split('@')
else:
creds = None
netloc = path[0:path.find('/')].strip('/')
path = path[path.find('/'):].strip('/')
if creds:
cred_parts = creds.split(':')
if len(cred_parts) != 2:
reason = (_("Badly formed credentials in Swift URI."))
LOG.debug(reason)
raise exception.BadStoreUri()
user, key = cred_parts
self.user = urllib.unquote(user)
self.key = urllib.unquote(key)
else:
self.user = None
self.key = None
path_parts = path.split('/')
try:
self.obj = path_parts.pop()
self.container = path_parts.pop()
if not netloc.startswith('http'):
# push hostname back into the remaining to build full authurl
path_parts.insert(0, netloc)
self.auth_or_store_url = '/'.join(path_parts)
except IndexError:
reason = _("Badly formed Swift URI.")
LOG.debug(reason)
raise exception.BadStoreUri()
@property
def swift_url(self):
"""
Creates a fully-qualified auth url that the Swift client library can
use. The scheme for the auth_url is determined using the scheme
included in the `location` field.
HTTPS is assumed, unless 'swift+http' is specified.
"""
if self.auth_or_store_url.startswith('http'):
return self.auth_or_store_url
else:
if self.scheme in ('swift+https', 'swift'):
auth_scheme = 'https://'
else:
auth_scheme = 'http://'
return ''.join([auth_scheme, self.auth_or_store_url])
def Store(context=None, loc=None):
if (CONF.swift_store_multi_tenant and
(loc is None or loc.store_location.user is None)):
return MultiTenantStore(context, loc)
return SingleTenantStore(context, loc)
class BaseStore(glance.store.base.Store):
CHUNKSIZE = 65536
def get_schemes(self):
return ('swift+https', 'swift', 'swift+http')
def configure(self):
_obj_size = self._option_get('swift_store_large_object_size')
self.large_object_size = _obj_size * ONE_MB
_chunk_size = self._option_get('swift_store_large_object_chunk_size')
self.large_object_chunk_size = _chunk_size * ONE_MB
self.admin_tenants = CONF.swift_store_admin_tenants
self.region = CONF.swift_store_region
self.service_type = CONF.swift_store_service_type
self.endpoint_type = CONF.swift_store_endpoint_type
self.snet = CONF.swift_enable_snet
self.insecure = CONF.swift_store_auth_insecure
self.ssl_compression = CONF.swift_store_ssl_compression
def get(self, location, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
resp_headers, resp_body = connection.get_object(
container=location.container, obj=location.obj,
resp_chunk_size=self.CHUNKSIZE)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
msg = _("Swift could not find image at URI.")
raise exception.NotFound(msg)
else:
raise
class ResponseIndexable(glance.store.Indexable):
def another(self):
try:
return self.wrapped.next()
except StopIteration:
return ''
length = int(resp_headers.get('content-length', 0))
return (ResponseIndexable(resp_body, length), length)
def get_size(self, location, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
resp_headers = connection.head_object(
container=location.container, obj=location.obj)
return int(resp_headers.get('content-length', 0))
except Exception:
return 0
def _option_get(self, param):
result = getattr(CONF, param)
if not result:
reason = (_("Could not find %(param)s in configuration "
"options.") % {'param': param})
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
return result
def _delete_stale_chunks(self, connection, container, chunk_list):
for chunk in chunk_list:
LOG.debug(_("Deleting chunk %s") % chunk)
try:
connection.delete_object(container, chunk)
except Exception:
msg = _("Failed to delete orphaned chunk %s/%s")
LOG.exception(msg, container, chunk)
def add(self, image_id, image_file, image_size, connection=None):
location = self.create_location(image_id)
if not connection:
connection = self.get_connection(location)
self._create_container_if_missing(location.container, connection)
LOG.debug(_("Adding image object '%(obj_name)s' "
"to Swift") % dict(obj_name=location.obj))
try:
if image_size > 0 and image_size < self.large_object_size:
# Image size is known, and is less than large_object_size.
# Send to Swift with regular PUT.
obj_etag = connection.put_object(location.container,
location.obj, image_file,
content_length=image_size)
else:
# Write the image into Swift in chunks.
chunk_id = 1
if image_size > 0:
total_chunks = str(int(
math.ceil(float(image_size) /
float(self.large_object_chunk_size))))
else:
# image_size == 0 is when we don't know the size
# of the image. This can occur with older clients
# that don't inspect the payload size.
LOG.debug(_("Cannot determine image size. Adding as a "
"segmented object to Swift."))
total_chunks = '?'
checksum = hashlib.md5()
written_chunks = []
combined_chunks_size = 0
while True:
chunk_size = self.large_object_chunk_size
if image_size == | |
provider.
:type well_known_open_id_configuration: str
"""
_attribute_map = {
'authorization_endpoint': {'key': 'authorizationEndpoint', 'type': 'str'},
'token_endpoint': {'key': 'tokenEndpoint', 'type': 'str'},
'issuer': {'key': 'issuer', 'type': 'str'},
'certification_uri': {'key': 'certificationUri', 'type': 'str'},
'well_known_open_id_configuration': {'key': 'wellKnownOpenIdConfiguration', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OpenIdConnectConfig, self).__init__(**kwargs)
self.authorization_endpoint = kwargs.get('authorization_endpoint', None)
self.token_endpoint = kwargs.get('token_endpoint', None)
self.issuer = kwargs.get('issuer', None)
self.certification_uri = kwargs.get('certification_uri', None)
self.well_known_open_id_configuration = kwargs.get('well_known_open_id_configuration', None)
class OpenIdConnectLogin(Model):
"""The configuration settings of the login flow of the custom Open ID Connect
provider.
:param name_claim_type: The name of the claim that contains the users
name.
:type name_claim_type: str
:param scopes: A list of the scopes that should be requested while
authenticating.
:type scopes: list[str]
"""
_attribute_map = {
'name_claim_type': {'key': 'nameClaimType', 'type': 'str'},
'scopes': {'key': 'scopes', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(OpenIdConnectLogin, self).__init__(**kwargs)
self.name_claim_type = kwargs.get('name_claim_type', None)
self.scopes = kwargs.get('scopes', None)
class OpenIdConnectRegistration(Model):
"""The configuration settings of the app registration for the custom Open ID
Connect provider.
:param client_id: The client id of the custom Open ID Connect provider.
:type client_id: str
:param client_credential: The authentication credentials of the custom
Open ID Connect provider.
:type client_credential:
~commondefinitions.models.OpenIdConnectClientCredential
:param open_id_connect_configuration: The configuration settings of the
endpoints used for the custom Open ID Connect provider.
:type open_id_connect_configuration:
~commondefinitions.models.OpenIdConnectConfig
"""
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'client_credential': {'key': 'clientCredential', 'type': 'OpenIdConnectClientCredential'},
'open_id_connect_configuration': {'key': 'openIdConnectConfiguration', 'type': 'OpenIdConnectConfig'},
}
def __init__(self, **kwargs):
super(OpenIdConnectRegistration, self).__init__(**kwargs)
self.client_id = kwargs.get('client_id', None)
self.client_credential = kwargs.get('client_credential', None)
self.open_id_connect_configuration = kwargs.get('open_id_connect_configuration', None)
class OperationDetail(Model):
"""Operation detail payload.
:param name: Name of the operation
:type name: str
:param is_data_action: Indicates whether the operation is a data action
:type is_data_action: bool
:param display: Display of the operation
:type display: ~commondefinitions.models.OperationDisplay
:param origin: Origin of the operation
:type origin: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OperationDetail, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
class OperationDisplay(Model):
"""Operation display payload.
:param provider: Resource provider of the operation
:type provider: str
:param resource: Resource of the operation
:type resource: str
:param operation: Localized friendly name for the operation
:type operation: str
:param description: Localized friendly description for the operation
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class QueueScaleRule(Model):
"""Container App container Azure Queue based scaling rule.
:param queue_name: Queue name.
:type queue_name: str
:param queue_length: Queue length.
:type queue_length: int
:param auth: Authentication secrets for the queue scale rule.
:type auth: list[~commondefinitions.models.ScaleRuleAuth]
"""
_attribute_map = {
'queue_name': {'key': 'queueName', 'type': 'str'},
'queue_length': {'key': 'queueLength', 'type': 'int'},
'auth': {'key': 'auth', 'type': '[ScaleRuleAuth]'},
}
def __init__(self, **kwargs):
super(QueueScaleRule, self).__init__(**kwargs)
self.queue_name = kwargs.get('queue_name', None)
self.queue_length = kwargs.get('queue_length', None)
self.auth = kwargs.get('auth', None)
class RegistryCredentials(Model):
"""Container App Private Registry.
:param server: Container Registry Server
:type server: str
:param username: Container Registry Username
:type username: str
:param password_secret_ref: The name of the Secret that contains the
registry login password
:type password_secret_ref: str
"""
_attribute_map = {
'server': {'key': 'server', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password_secret_ref': {'key': 'passwordSecretRef', 'type': 'str'},
}
def __init__(self, **kwargs):
super(RegistryCredentials, self).__init__(**kwargs)
self.server = kwargs.get('server', None)
self.username = kwargs.get('username', None)
self.password_secret_ref = kwargs.get('password_secret_ref', None)
class RegistryInfo(Model):
"""Container App registry information.
:param registry_url: registry server Url.
:type registry_url: str
:param registry_user_name: registry username.
:type registry_user_name: str
:param registry_password: registry secret.
:type registry_password: str
"""
_attribute_map = {
'registry_url': {'key': 'registryUrl', 'type': 'str'},
'registry_user_name': {'key': 'registryUserName', 'type': 'str'},
'registry_password': {'key': 'registryPassword', 'type': 'str'},
}
def __init__(self, **kwargs):
super(RegistryInfo, self).__init__(**kwargs)
self.registry_url = kwargs.get('registry_url', None)
self.registry_user_name = kwargs.get('registry_user_name', None)
self.registry_password = kwargs.get('registry_password', None)
class Replica(ProxyResource):
"""Container App Revision Replica.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. E.g.
"Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy
and modifiedBy information.
:vartype system_data: ~commondefinitions.models.SystemData
:ivar created_time: Timestamp describing when the pod was created by
controller
:vartype created_time: datetime
:param containers: The containers collection under a replica.
:type containers: list[~commondefinitions.models.ReplicaContainer]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'created_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'containers': {'key': 'properties.containers', 'type': '[ReplicaContainer]'},
}
def __init__(self, **kwargs):
super(Replica, self).__init__(**kwargs)
self.created_time = None
self.containers = kwargs.get('containers', None)
class ReplicaCollection(Model):
"""Container App Revision Replicas collection ARM resource.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~commondefinitions.models.Replica]
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Replica]'},
}
def __init__(self, **kwargs):
super(ReplicaCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ReplicaContainer(Model):
"""Container object under Container App Revision Replica.
:param name: The Name of the Container
:type name: str
:param container_id: The Id of the Container
:type container_id: str
:param ready: The container ready status
:type ready: bool
:param started: The container start status
:type started: bool
:param restart_count: The container restart count
:type restart_count: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'container_id': {'key': 'containerId', 'type': 'str'},
'ready': {'key': 'ready', 'type': 'bool'},
'started': {'key': 'started', 'type': 'bool'},
'restart_count': {'key': 'restartCount', 'type': 'int'},
}
def __init__(self, **kwargs):
super(ReplicaContainer, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.container_id = kwargs.get('container_id', None)
self.ready = kwargs.get('ready', None)
self.started = kwargs.get('started', None)
self.restart_count = kwargs.get('restart_count', None)
class Revision(ProxyResource):
"""Container App Revision.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. E.g.
"Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy
and modifiedBy information.
:vartype system_data: ~commondefinitions.models.SystemData
:ivar created_time: Timestamp describing when the revision was created
by controller
:vartype created_time: datetime
:ivar fqdn: Fully qualified domain name of the revision
:vartype fqdn: str
:ivar template: Container App Revision Template with all possible settings
and the
defaults if user did not provide them. The defaults are populated
as they were at the creation time
:vartype template: ~commondefinitions.models.Template
:ivar active: Boolean describing if the Revision is Active
:vartype active: bool
:ivar replicas: Number of pods currently running for this revision
:vartype replicas: int
:ivar traffic_weight: Traffic weight assigned to this revision
:vartype traffic_weight: int
:ivar provisioning_error: Optional Field - Platform Error Message
:vartype provisioning_error: str
:ivar health_state: Current health State of the revision. Possible values
include: 'Healthy', 'Unhealthy', 'None'
:vartype health_state: str or
~commondefinitions.models.RevisionHealthState
:ivar provisioning_state: Current provisioning State of the revision.
Possible values include: 'Provisioning', 'Provisioned', 'Failed',
'Deprovisioning', 'Deprovisioned'
:vartype provisioning_state: str or
~commondefinitions.models.RevisionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'created_time': {'readonly': True},
'fqdn': {'readonly': True},
'template': {'readonly': True},
'active': {'readonly': True},
'replicas': {'readonly': True},
'traffic_weight': {'readonly': True},
'provisioning_error': {'readonly': True},
'health_state': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'fqdn': {'key': 'properties.fqdn', 'type': 'str'},
'template': {'key': 'properties.template', 'type': 'Template'},
'active': {'key': 'properties.active', 'type': 'bool'},
'replicas': {'key': 'properties.replicas', 'type': 'int'},
'traffic_weight': {'key': 'properties.trafficWeight', 'type': 'int'},
'provisioning_error': {'key': 'properties.provisioningError', 'type': 'str'},
'health_state': {'key': 'properties.healthState', 'type': 'str'},
'provisioning_state': | |
<gh_stars>0
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This module implements classes to query the condor daemons
# and manipulate the results
# Please notice that it also converts \" into "
#
# Author:
# <NAME> (Aug 30th 2006)
#
import condorExe
import condorSecurity
import os
import string
import copy
import socket
import xml.parsers.expat
#
# Configuration
#
# Set path to condor binaries
def set_path(new_condor_bin_path):
global condor_bin_path
condor_bin_path = new_condor_bin_path
#
# Caching classes
#
# dummy caching class, when you don't want caching
# used as base class below, too
class NoneScheddCache:
#returns (cms arg schedd string,LOCAL_DIR)
def getScheddId(self,schedd_name,pool_name):
return (self.iGetCmdScheddStr(schedd_name),{})
# INTERNAL and for inheritance
def iGetCmdScheddStr(self,schedd_name):
if schedd_name is None:
schedd_str=""
else:
schedd_str = "-name %s " % schedd_name
return schedd_str
# The schedd can be found either through -name attr
# or through the local disk lookup
# Remember which one to use
class LocalScheddCache(NoneScheddCache):
def __init__(self):
self.enabled=True
# dictionary of
# (schedd_name,pool_name)=>(cms arg schedd string,env)
self.cache={}
self.my_ips=socket.gethostbyname_ex(socket.gethostname())[2]
try:
self.my_ips+=socket.gethostbyname_ex('localhost')[2]
except socket.gaierror,e:
pass # localhost not defined, ignore
def enable(self):
self.enabled=True
def disable(self):
self.enabled=False
#returns (cms arg schedd string,env)
def getScheddId(self,schedd_name,pool_name):
if schedd_name is None: # special case, do not cache
return ("",{})
if self.enabled:
k=(schedd_name,pool_name)
if not self.cache.has_key(k): # not in cache, discover it
env=self.iGetEnv(schedd_name, pool_name)
if env is None: #
self.cache[k]=(self.iGetCmdScheddStr(schedd_name),{})
else:
self.cache[k]=("",env)
return self.cache[k]
else: # not enabled, just return the str
return (self.iGetCmdScheddStr(schedd_name),{})
#
# PRIVATE
#
# return None if not found
# Can raise exceptions
def iGetEnv(self,schedd_name, pool_name):
cs=CondorStatus('schedd',pool_name)
data=cs.fetch(constraint='Name=?="%s"'%schedd_name,format_list=[('ScheddIpAddr','s'),('SPOOL_DIR_STRING','s'),('LOCAL_DIR_STRING','s')])
if not data.has_key(schedd_name):
raise RuntimeError, "Schedd '%s' not found"%schedd_name
el=data[schedd_name]
if 'SPOOL_DIR_STRING' not in el and 'LOCAL_DIR_STRING' not in el: # not advertising, cannot use disk optimization
return None
if not el.has_key('ScheddIpAddr'): # This should never happen
raise RuntimeError, "Schedd '%s' is not advertising ScheddIpAddr"%schedd_name
schedd_ip=el['ScheddIpAddr'][1:].split(':')[0]
if schedd_ip in self.my_ips: #seems local, go for the dir
l=el.get('SPOOL_DIR_STRING', el.get('LOCAL_DIR_STRING'))
if os.path.isdir(l): # making sure the directory exists
if 'SPOOL_DIR_STRING' in el:
return {'_CONDOR_SPOOL': '%s' %l }
else: # LOCAL_DIR_STRING
return {'_CONDOR_SPOOL': '%s/spool' %l }
else: #dir does not exist, likely not relevant, revert to standard behaviour
return None
else: # not local
return None
# default global object
local_schedd_cache=LocalScheddCache()
def condorq_attrs(q_constraint, attribute_list):
"""
Retrieves a list of a single item from the all the factory queues.
"""
attr_str = ""
for attr in attribute_list:
attr_str += " -attr %s" % attr
xml_data = condorExe.exe_cmd("condor_q","-g -l %s -xml -constraint '%s'" % (attr_str, q_constraint))
classads_xml = []
tmp_list = []
for line in xml_data:
# look for the xml header
if line[:5] == "<?xml":
if len(tmp_list) > 0:
classads_xml.append(tmp_list)
tmp_list = []
tmp_list.append(line)
q_proxy_list = []
for ad_xml in classads_xml:
cred_list = xml2list(ad_xml)
q_proxy_list.extend(cred_list)
return q_proxy_list
#
# Condor monitoring classes
#
# Generic, you most probably don't want to use these
class AbstractQuery: # pure virtual, just to have a minimum set of methods defined
# returns the data, will not modify self
def fetch(self,constraint=None,format_list=None):
raise NotImplementedError,"Fetch not implemented"
# will fetch in self.stored_data
def load(self,constraint=None,format_list=None):
raise NotImplementedError,"Load not implemented"
# constraint_func is a boolean function, with only one argument (data el)
# same output as fetch, but limited to constraint_func(el)==True
#
# if constraint_func==None, return all the data
def fetchStored(self,constraint_func=None):
raise NotImplementedError,"fetchStored not implemented"
class StoredQuery(AbstractQuery): # still virtual, only fetchStored defined
stored_data = {}
def fetchStored(self,constraint_func=None):
return applyConstraint(self.stored_data,constraint_func)
#
# format_list is a list of
# (attr_name, attr_type)
# where attr_type is one of
# "s" - string
# "i" - integer
# "r" - real (float)
# "b" - bool
#
#
# security_obj, if defined, should be a child of condorSecurity.ProtoRequest
class QueryExe(StoredQuery): # first fully implemented one, execute commands
def __init__(self,exe_name,resource_str,group_attribute,pool_name=None,security_obj=None,env={}):
self.exe_name=exe_name
self.env=env
self.resource_str=resource_str
self.group_attribute=group_attribute
self.pool_name=pool_name
if pool_name is None:
self.pool_str=""
else:
self.pool_str = "-pool %s" % pool_name
if security_obj is not None:
if security_obj.has_saved_state():
raise RuntimeError, "Cannot use a security object which has saved state."
self.security_obj=copy.deepcopy(security_obj)
else:
self.security_obj=condorSecurity.ProtoRequest()
def require_integrity(self,requested_integrity): # if none, dont change, else forse that one
if requested_integrity is None:
condor_val=None
elif requested_integrity:
condor_val="REQUIRED"
else:
# if not required, still should not fail if the other side requires it
condor_val='OPTIONAL'
self.security_obj.set('CLIENT','INTEGRITY',condor_val)
def get_requested_integrity(self):
condor_val = self.security_obj.get('CLIENT','INTEGRITY')
if condor_val is None:
return None
return (condor_val=='REQUIRED')
def require_encryption(self,requested_encryption): # if none, dont change, else forse that one
if requested_encryption is None:
condor_val=None
elif requested_encryption:
condor_val="REQUIRED"
else:
# if not required, still should not fail if the other side requires it
condor_val='OPTIONAL'
self.security_obj.set('CLIENT','ENCRYPTION',condor_val)
def get_requested_encryption(self):
condor_val = self.security_obj.get('CLIENT','ENCRYPTION')
if condor_val is None:
return None
return (condor_val=='REQUIRED')
def fetch(self,constraint=None,format_list=None):
if constraint is None:
constraint_str=""
else:
constraint_str="-constraint '%s'"%constraint
full_xml=(format_list is None)
if format_list is not None:
format_arr=[]
for format_el in format_list:
attr_name,attr_type=format_el
attr_format={'s':'%s','i':'%i','r':'%f','b':'%i'}[attr_type]
format_arr.append('-format "%s" "%s"'%(attr_format,attr_name))
format_str=string.join(format_arr," ")
# set environment for security settings
self.security_obj.save_state()
try:
self.security_obj.enforce_requests()
if full_xml:
xml_data = condorExe.exe_cmd(self.exe_name,"%s -xml %s %s"%(self.resource_str,self.pool_str,constraint_str),env=self.env);
else:
xml_data = condorExe.exe_cmd(self.exe_name,"%s %s -xml %s %s"%(self.resource_str,format_str,self.pool_str,constraint_str),env=self.env);
finally:
# restore old values
self.security_obj.restore_state()
list_data = xml2list(xml_data)
del xml_data
dict_data = list2dict(list_data, self.group_attribute)
return dict_data
def load(self, constraint=None, format_list=None):
self.stored_data = self.fetch(constraint, format_list)
#
# Fully usable query functions
#
# condor_q
class CondorQ(QueryExe):
def __init__(self,schedd_name=None,pool_name=None,security_obj=None,schedd_lookup_cache=local_schedd_cache):
self.schedd_name=schedd_name
if schedd_lookup_cache is None:
schedd_lookup_cache=NoneScheddCache()
schedd_str,env=schedd_lookup_cache.getScheddId(schedd_name, pool_name)
QueryExe.__init__(self,"condor_q",schedd_str,["ClusterId","ProcId"],pool_name,security_obj,env)
def fetch(self, constraint=None, format_list=None):
if format_list is not None:
# check that ClusterId and ProcId are present, and if not add them
format_list = complete_format_list(format_list, [("ClusterId", 'i'), ("ProcId", 'i')])
return QueryExe.fetch(self, constraint=constraint, format_list=format_list)
# condor_q, where we have only one ProcId x ClusterId
class CondorQLite(QueryExe):
def __init__(self,schedd_name=None,pool_name=None,security_obj=None,schedd_lookup_cache=local_schedd_cache):
self.schedd_name=schedd_name
if schedd_lookup_cache is None:
schedd_lookup_cache=NoneScheddCache()
schedd_str,env=schedd_lookup_cache.getScheddId(schedd_name, pool_name)
QueryExe.__init__(self,"condor_q",schedd_str,"ClusterId",pool_name,security_obj,env)
def fetch(self, constraint=None, format_list=None):
if format_list is not None:
# check that ClusterId is present, and if not add it
format_list = complete_format_list(format_list, [("ClusterId", 'i')])
return QueryExe.fetch(self, constraint=constraint, format_list=format_list)
# condor_status
class CondorStatus(QueryExe):
def __init__(self,subsystem_name=None,pool_name=None,security_obj=None):
if subsystem_name is None:
subsystem_str=""
else:
subsystem_str = "-%s" % subsystem_name
QueryExe.__init__(self,"condor_status",subsystem_str,"Name",pool_name,security_obj,{})
def fetch(self, constraint=None, format_list=None):
if format_list is not None:
# check that Name present and if not, add it
format_list = complete_format_list(format_list, [("Name",'s')])
return QueryExe.fetch(self, constraint=constraint, format_list=format_list)
def __repr__(self):
output = "CondorStatus:\n"
output += "exe_name = %s\n" % str(self.exe_name)
output += "env = %s\n" % str(self.env)
output += "resource_str = %s\n" % str(self.resource_str)
output += "group_attribute = %s\n" % str(self.group_attribute)
output += "pool_str = %s\n" % str(self.pool_str)
output += "security_obj = %s\n" % str(self.security_obj)
output += "stored_data = %s" % str(self.stored_data)
return output
#
# Subquery classes
#
# Generic, you most probably don't want to use this
class BaseSubQuery(StoredQuery):
def __init__(self, query, subquery_func):
self.query = query
self.subquery_func = subquery_func
def fetch(self, constraint=None):
indata = self.query.fetch(constraint)
return self.subquery_func(self, indata)
#
# NOTE: You need to call load on the SubQuery object to use fetchStored
# and had query.load issued before
#
def load(self, constraint=None):
indata = self.query.fetchStored(constraint)
self.stored_data = self.subquery_func(indata)
#
# Fully usable subquery functions
#
class SubQuery(BaseSubQuery):
def __init__(self, query, constraint_func=None):
BaseSubQuery.__init__(self, query, lambda d:applyConstraint(d, constraint_func))
def __repr__(self):
output = "SubQuery:\n"
#output += "client_name = %s\n" % str(self.client_name)
#output += "entry_name = %s\n" % str(self.entry_name)
#output += "factory_name = %s\n" % str(self.factory_name)
#output += "glidein_name = %s\n" % str(self.glidein_name)
#output += "schedd_name = %s\n" % str(self.schedd_name)
output += "stored_data = %s" % str(self.stored_data)
return output
class Group(BaseSubQuery):
# group_key_func - Key extraction function
# One argument: classad dictionary
# Returns: value of the group key
# group_data_func - Key extraction function
# One argument: list of classad dictionaries
# Returns: a summary classad dictionary
def __init__(self, query, group_key_func, group_data_func):
BaseSubQuery.__init__(self, query, lambda d:doGroup(d, group_key_func, group_data_func))
#
# Summarizing classes
#
class Summarize:
# hash_func - Hashing function
# One argument: classad dictionary
# Returns: hash value
# if None, will not be counted
# if a list, all elements will be used
def __init__(self, query, hash_func=lambda x:1):
self.query = query
self.hash_func = hash_func
# Parameters:
# constraint - string to be passed to query.fetch()
# hash_func - if !=None, use this instead of the main one
# Returns a dictionary of hash values
# Elements are counts (or more dictionaries if hash returns lists)
def count(self, constraint=None, hash_func=None):
data = self.query.fetch(constraint)
return fetch2count(data, self.getHash(hash_func))
# Use data pre-stored in query
# Same output as count
def countStored(self, constraint_func=None, hash_func=None):
data = self.query.fetchStored(constraint_func)
return fetch2count(data, self.getHash(hash_func))
# Parameters, same as count
| |
code: 200",
"Total identifiers registered with this collection: 201",
"Entries on this page: 1",
" The Green Mouse",
] == test_result.result
assert True == test_result.success
# Next, test failure.
response = mock_response(url, auth, 401, "An error message.")
test_result = SelfTestResult("failure")
assert False == test_result.success
m(test_result, response)
assert [
"Request URL: %s" % url,
"Request authorization: %s" % auth,
"Status code: 401",
] == test_result.result
def test_external_integration(self):
result = MetadataWranglerOPDSLookup.external_integration(self._db)
assert result.protocol == ExternalIntegration.METADATA_WRANGLER
assert result.goal == ExternalIntegration.METADATA_GOAL
class OPDSImporterTest(OPDSTest):
def setup_method(self):
super(OPDSImporterTest, self).setup_method()
self.content_server_feed = self.sample_opds("content_server.opds")
self.content_server_mini_feed = self.sample_opds("content_server_mini.opds")
self.audiobooks_opds = self.sample_opds("audiobooks.opds")
self.feed_with_id_and_dcterms_identifier = self.sample_opds(
"feed_with_id_and_dcterms_identifier.opds", "rb"
)
self._default_collection.external_integration.setting(
"data_source"
).value = DataSource.OA_CONTENT_SERVER
# Set an ExternalIntegration for the metadata_client used
# in the OPDSImporter.
self.service = self._external_integration(
ExternalIntegration.METADATA_WRANGLER,
goal=ExternalIntegration.METADATA_GOAL,
url="http://localhost",
)
class TestOPDSImporter(OPDSImporterTest):
def test_constructor(self):
# The default way of making HTTP requests is with
# Representation.cautious_http_get.
importer = OPDSImporter(self._db, collection=None)
assert Representation.cautious_http_get == importer.http_get
# But you can pass in anything you want.
do_get = object()
importer = OPDSImporter(self._db, collection=None, http_get=do_get)
assert do_get == importer.http_get
def test_data_source_autocreated(self):
name = "New data source " + self._str
importer = OPDSImporter(self._db, collection=None, data_source_name=name)
source1 = importer.data_source
assert name == source1.name
def test_extract_next_links(self):
importer = OPDSImporter(
self._db, collection=None, data_source_name=DataSource.NYT
)
next_links = importer.extract_next_links(self.content_server_mini_feed)
assert 1 == len(next_links)
assert "http://localhost:5000/?after=327&size=100" == next_links[0]
def test_extract_last_update_dates(self):
importer = OPDSImporter(
self._db, collection=None, data_source_name=DataSource.NYT
)
# This file has two <entry> tags and one <simplified:message> tag.
# The <entry> tags have their last update dates extracted,
# the message is ignored.
last_update_dates = importer.extract_last_update_dates(
self.content_server_mini_feed
)
assert 2 == len(last_update_dates)
identifier1, updated1 = last_update_dates[0]
identifier2, updated2 = last_update_dates[1]
assert "urn:librarysimplified.org/terms/id/Gutenberg%20ID/10441" == identifier1
assert datetime_utc(2015, 1, 2, 16, 56, 40) == updated1
assert "urn:librarysimplified.org/terms/id/Gutenberg%20ID/10557" == identifier2
assert datetime_utc(2015, 1, 2, 16, 56, 40) == updated2
def test_extract_last_update_dates_ignores_entries_with_no_update(self):
importer = OPDSImporter(
self._db, collection=None, data_source_name=DataSource.NYT
)
# Rename the <updated> and <published> tags in the content
# server so they don't show up.
content = self.content_server_mini_feed.replace("updated>", "irrelevant>")
content = content.replace("published>", "irrelevant>")
last_update_dates = importer.extract_last_update_dates(content)
# No updated dates!
assert [] == last_update_dates
def test_extract_metadata(self):
data_source_name = "Data source name " + self._str
importer = OPDSImporter(
self._db, collection=None, data_source_name=data_source_name
)
metadata, failures = importer.extract_feed_data(self.content_server_mini_feed)
m1 = metadata["http://www.gutenberg.org/ebooks/10441"]
m2 = metadata["http://www.gutenberg.org/ebooks/10557"]
c1 = metadata["http://www.gutenberg.org/ebooks/10441"]
c2 = metadata["http://www.gutenberg.org/ebooks/10557"]
assert "The Green Mouse" == m1.title
assert "A Tale of Mousy Terror" == m1.subtitle
assert data_source_name == m1._data_source
assert data_source_name == m2._data_source
assert data_source_name == c1._data_source
assert data_source_name == c2._data_source
[failure] = list(failures.values())
assert (
"202: I'm working to locate a source for this identifier."
== failure.exception
)
def test_use_dcterm_identifier_as_id_with_id_and_dcterms_identifier(self):
data_source_name = "Data source name " + self._str
collection_to_test = self._default_collection
collection_to_test.primary_identifier_source = (
ExternalIntegration.DCTERMS_IDENTIFIER
)
importer = OPDSImporter(
self._db,
collection=collection_to_test,
data_source_name=data_source_name,
)
metadata, failures = importer.extract_feed_data(
self.feed_with_id_and_dcterms_identifier
)
# First book doesn't have <dcterms:identifier>, so <id> must be used as identifier
book_1 = metadata.get("https://root.uri/1")
assert book_1 != None
# Second book have <id> and <dcterms:identifier>, so <dcters:identifier> must be used as id
book_2 = metadata.get("urn:isbn:9781468316438")
assert book_2 != None
# Verify if id was add in the end of identifier
book_2_identifiers = book_2.identifiers
found = False
for entry in book_2.identifiers:
if entry.identifier == "https://root.uri/2":
found = True
break
assert found == True
# Third book has more than one dcterms:identifers, all of then must be present as metadata identifier
book_3 = metadata.get("urn:isbn:9781683351993")
assert book_2 != None
# Verify if id was add in the end of identifier
book_3_identifiers = book_3.identifiers
expected_identifier = [
"9781683351993",
"https://root.uri/3",
"9781683351504",
"9780312939458",
]
result_identifier = [entry.identifier for entry in book_3.identifiers]
assert set(expected_identifier) == set(result_identifier)
def test_use_id_with_existing_dcterms_identifier(self):
data_source_name = "Data source name " + self._str
collection_to_test = self._default_collection
collection_to_test.primary_identifier_source = None
importer = OPDSImporter(
self._db,
collection=collection_to_test,
data_source_name=data_source_name,
)
metadata, failures = importer.extract_feed_data(
self.feed_with_id_and_dcterms_identifier
)
book_1 = metadata.get("https://root.uri/1")
assert book_1 != None
book_2 = metadata.get("https://root.uri/2")
assert book_2 != None
book_3 = metadata.get("https://root.uri/3")
assert book_3 != None
def test_extract_link(self):
no_rel = AtomFeed.E.link(href="http://foo/")
assert None == OPDSImporter.extract_link(no_rel)
no_href = AtomFeed.E.link(href="", rel="foo")
assert None == OPDSImporter.extract_link(no_href)
good = AtomFeed.E.link(href="http://foo", rel="bar")
link = OPDSImporter.extract_link(good)
assert "http://foo" == link.href
assert "bar" == link.rel
relative = AtomFeed.E.link(href="/foo/bar", rel="self")
link = OPDSImporter.extract_link(relative, "http://server")
assert "http://server/foo/bar" == link.href
def test_get_medium_from_links(self):
audio_links = [
LinkData(
href="url",
rel="http://opds-spec.org/acquisition/",
media_type="application/audiobook+json;param=value",
),
LinkData(href="url", rel="http://opds-spec.org/image"),
]
book_links = [
LinkData(href="url", rel="http://opds-spec.org/image"),
LinkData(
href="url",
rel="http://opds-spec.org/acquisition/",
media_type=random.choice(MediaTypes.BOOK_MEDIA_TYPES) + ";param=value",
),
]
m = OPDSImporter.get_medium_from_links
assert m(audio_links) == "Audio"
assert m(book_links) == "Book"
def test_extract_link_rights_uri(self):
# Most of the time, a link's rights URI is inherited from the entry.
entry_rights = RightsStatus.PUBLIC_DOMAIN_USA
link_tag = AtomFeed.E.link(href="http://foo", rel="bar")
link = OPDSImporter.extract_link(link_tag, entry_rights_uri=entry_rights)
assert RightsStatus.PUBLIC_DOMAIN_USA == link.rights_uri
# But a dcterms:rights tag beneath the link can override this.
rights_attr = "{%s}rights" % AtomFeed.DCTERMS_NS
link_tag.attrib[rights_attr] = RightsStatus.IN_COPYRIGHT
link = OPDSImporter.extract_link(link_tag, entry_rights_uri=entry_rights)
assert RightsStatus.IN_COPYRIGHT == link.rights_uri
def test_extract_data_from_feedparser(self):
data_source = DataSource.lookup(self._db, DataSource.OA_CONTENT_SERVER)
importer = OPDSImporter(self._db, None, data_source_name=data_source.name)
values, failures = importer.extract_data_from_feedparser(
self.content_server_mini_feed, data_source
)
# The <entry> tag became a Metadata object.
metadata = values["urn:librarysimplified.org/terms/id/Gutenberg%20ID/10441"]
assert "The Green Mouse" == metadata["title"]
assert "A Tale of Mousy Terror" == metadata["subtitle"]
assert "en" == metadata["language"]
assert "Project Gutenberg" == metadata["publisher"]
circulation = metadata["circulation"]
assert DataSource.GUTENBERG == circulation["data_source"]
# The <simplified:message> tag did not become a
# CoverageFailure -- that's handled by
# extract_metadata_from_elementtree.
assert {} == failures
def test_extract_data_from_feedparser_handles_exception(self):
class DoomedFeedparserOPDSImporter(OPDSImporter):
"""An importer that can't extract metadata from feedparser."""
@classmethod
def _data_detail_for_feedparser_entry(cls, entry, data_source):
raise Exception("Utter failure!")
data_source = DataSource.lookup(self._db, DataSource.OA_CONTENT_SERVER)
importer = DoomedFeedparserOPDSImporter(
self._db, None, data_source_name=data_source.name
)
values, failures = importer.extract_data_from_feedparser(
self.content_server_mini_feed, data_source
)
# No metadata was extracted.
assert 0 == len(list(values.keys()))
# There are 2 failures, both from exceptions. The 202 message
# found in content_server_mini.opds is not extracted
# here--it's extracted by extract_metadata_from_elementtree.
assert 2 == len(failures)
# The first error message became a CoverageFailure.
failure = failures["urn:librarysimplified.org/terms/id/Gutenberg%20ID/10441"]
assert isinstance(failure, CoverageFailure)
assert True == failure.transient
assert "Utter failure!" in failure.exception
# The second error message became a CoverageFailure.
failure = failures["urn:librarysimplified.org/terms/id/Gutenberg%20ID/10557"]
assert isinstance(failure, CoverageFailure)
assert True == failure.transient
assert "Utter failure!" in failure.exception
def test_extract_metadata_from_elementtree(self):
data_source = DataSource.lookup(self._db, DataSource.OA_CONTENT_SERVER)
data, failures = OPDSImporter.extract_metadata_from_elementtree(
self.content_server_feed, data_source
)
# There are 76 entries in the feed, and we got metadata for
# every one of them.
assert 76 == len(data)
assert 0 == len(failures)
# We're going to do spot checks on a book and a periodical.
# First, the book.
book_id = "urn:librarysimplified.org/terms/id/Gutenberg%20ID/1022"
book = data[book_id]
assert Edition.BOOK_MEDIUM == book["medium"]
[contributor] = book["contributors"]
assert "Thoreau, <NAME>" == contributor.sort_name
assert [Contributor.AUTHOR_ROLE] == contributor.roles
subjects = book["subjects"]
assert ["LCSH", "LCSH", "LCSH", "LCC"] == [x.type for x in subjects]
assert ["Essays", "Nature", "Walking", "PS"] == [x.identifier for x in subjects]
assert [None, None, None, "American Literature"] == [
x.name for x in book["subjects"]
]
assert [1, 1, 1, 10] == [x.weight for x in book["subjects"]]
assert [] == book["measurements"]
assert datetime_utc(1862, 6, 1) == book["published"]
[link] = book["links"]
assert Hyperlink.OPEN_ACCESS_DOWNLOAD == link.rel
assert "http://www.gutenberg.org/ebooks/1022.epub.noimages" == link.href
assert Representation.EPUB_MEDIA_TYPE == link.media_type
# And now, the periodical.
periodical_id = "urn:librarysimplified.org/terms/id/Gutenberg%20ID/10441"
periodical = data[periodical_id]
assert Edition.PERIODICAL_MEDIUM == periodical["medium"]
subjects = periodical["subjects"]
assert [
"LCSH",
"LCSH",
"LCSH",
"LCSH",
"LCC",
"schema:audience",
"schema:typicalAgeRange",
] == [x.type for x in subjects]
assert [
"Courtship -- Fiction",
"New York (N.Y.) -- Fiction",
"Fantasy fiction",
"Magic -- Fiction",
"PZ",
"Children",
"7",
] == [x.identifier for x in subjects]
assert [1, 1, 1, 1, 1, 1, 1] == [x.weight for x in subjects]
r1, r2, r3 = periodical["measurements"]
assert Measurement.QUALITY == r1.quantity_measured
assert 0.3333 == r1.value
assert 1 == r1.weight
assert Measurement.RATING == r2.quantity_measured
assert 0.6 == r2.value
assert 1 == r2.weight
assert Measurement.POPULARITY == r3.quantity_measured
assert 0.25 == r3.value
assert 1 == r3.weight
assert "Animal Colors" == periodical["series"]
assert "1" == periodical["series_position"]
assert datetime_utc(1910, 1, 1) == periodical["published"]
def test_extract_metadata_from_elementtree_treats_message_as_failure(self):
data_source = DataSource.lookup(self._db, DataSource.OA_CONTENT_SERVER)
feed = self.sample_opds("unrecognized_identifier.opds")
values, failures = OPDSImporter.extract_metadata_from_elementtree(
feed, data_source
)
# We have no Metadata objects and one CoverageFailure.
assert {} == values
# The CoverageFailure contains the information that was | |
be go to create ?
fogapp_name = status['create_fn']['fogapp_name']
fogapp_image = spec['template']['spec']['containers'][0]['image']
fogapp_replicas = spec['replicas']
fogapp_cpu_request = int(spec['template']['spec']['containers'][0]['resources']['requests']['cpu'][:-1])
# fogapp_cpu_limit = spec['template']['spec']['containers']['resources']['limits']['cpu']
fogapp_memory_request = int(spec['template']['spec']['containers'][0]['resources']['requests']['memory'][:-2])
# fogapp_memory_limit = spec['template']['spec']['containers']['resources']['limits']['memory']
# fogapp_type = spec['appType']
# fogapp_type = body['kind']
spec_text = str(spec)
fogapp_current_replicas = {}
if 'update_fn' in status:
fogapp_current_locations = status['update_fn']['fogapp_locations']
for i in range(0, len(fogapp_current_locations)):
fogapp_current_replicas[fogapp_current_locations[i]] = status['update_fn']['fogapp_replicas'][i]
else:
fogapp_current_locations = status['create_fn']['fogapp_locations']
for i in range(0, len(fogapp_current_locations)):
fogapp_current_replicas[fogapp_current_locations[i]] = status['create_fn']['fogapp_replicas'][i]
total_current_replicas = 0
for cluster in fogapp_current_locations:
total_current_replicas += fogapp_current_replicas[cluster]
print("Current locations and replicas ............................", fogapp_current_replicas)
# if not fogapp_type or 'appType' not in spec:
# raise kopf.HandlerFatalError(f"appType needs to be specified.")
# Make sure image is provided
if not fogapp_image:
raise kopf.HandlerFatalError(f"Image must be set. Got {fogapp_image}.")
if not fogapp_replicas:
raise kopf.HandlerFatalError(f"Number of replicas must be set. Got {fogapp_replicas}.")
if 'numberOfLocations' in spec:
clusters_qty = spec['numberOfLocations']
else:
clusters_qty = 1
# Get namespace
if 'namespace' in body['metadata']:
fogpapp_namespace = body['metadata']['namespace']
else:
fogpapp_namespace = "default"
# Placement policy specified by user
if 'placementPolicy' in spec:
placement_policy = spec['placementPolicy']
else: # Default placement policy is most_traffic
placement_policy = 'most_traffic'
override_replicas = {}
eligible_replicas = []
eligible_clusters = []
if 'locations' not in spec:
mode = 'update'
fogapp_locations = getFogAppLocations(fogapp_name, fogpapp_namespace, fogapp_cpu_request, fogapp_memory_request, fogapp_replicas, clusters_qty, placement_policy, mode)
total_replicas = clusters_qty * fogapp_replicas
if len(fogapp_locations) != 0:
eligible_clusters = []
for cluster in fogapp_locations:
if cluster['max_replicas'] > fogapp_replicas:
cluster['replicas'] = fogapp_replicas
cluster['overflow'] = 0
else:
cluster['replicas'] = cluster['max_replicas']
cluster['overflow'] = fogapp_replicas - cluster['max_replicas']
total_overflow = 0
for cluster in fogapp_locations[:clusters_qty]:
dict = {}
dict['name'] = cluster['name']
dict['replicas'] = cluster['replicas']
eligible_clusters.append(dict)
total_overflow += cluster['overflow']
print("Total overflow ...........", total_overflow)
if total_overflow > 0:
for cluster in fogapp_locations[clusters_qty:]:
if cluster['max_replicas'] > total_overflow:
dict = {}
dict['name'] = cluster['name']
dict['replicas'] = total_overflow
total_overflow = 0
eligible_clusters.append(dict)
break
else:
dict = {}
dict['name'] = cluster['name']
dict['replicas'] = cluster['max_replicas']
total_overflow = total_overflow - dict['replicas']
eligible_clusters.append(dict)
if total_overflow > 0:
for cluster in eligible_clusters:
if 'cloud' in cluster['name']:
cluster['replicas'] += total_overflow
total_overflow = 0
print("Final list of clusters .................", eligible_clusters)
print("Final overflow .................", total_overflow)
if total_overflow > 0:
dict = {}
dict['message'] = 'to_cloud'
dict['replicas'] = total_overflow
patch.status['message'] = dict
raise kopf.TemporaryError("Fog clusters not sufficient to run the app. Provisioning cloud cluster.....................",
delay=30)
else:
dict = {}
dict['message'] = 'to_cloud'
dict['replicas'] = fogapp_replicas
patch.status['message'] = dict
raise kopf.TemporaryError(
"No clusters found at the fog level. Provisioning cloud cluster.....................",
delay=30)
else:
input_clusters = spec['locations'].split(",")
fogapp_locations = []
for location in input_clusters:
fogapp_locations.append(location.strip())
print("Input list of clusters ....", fogapp_locations)
clusters_qty = len(fogapp_locations)
if 'replicaOverrides' in spec:
replicas_list = []
override_replicas = {}
if isinstance(spec['replicaOverrides'], str):
replicas = spec['replicaOverrides'].split(",")
for i in replicas:
replicas_list.append(i.strip())
elif isinstance(spec['replicaOverrides'], list):
replicas_list = spec['replicaOverrides']
print("Replica overrides ............", spec['replicaOverrides'])
for i in range(0, len(fogapp_locations)):
override_replicas[fogapp_locations[i]] = replicas_list[i]
else:
override_replicas = {}
for i in range(0, len(fogapp_locations)):
override_replicas[fogapp_locations[i]] = fogapp_replicas
print("Replica overrides input ....", override_replicas)
total_replicas = 0
for replica in list(override_replicas.values()):
total_replicas += int(replica)
print("Total number of replicas .....", total_replicas)
fog_only_clusters = []
for cluster in fogapp_locations:
if 'cloud' not in cluster:
fog_only_clusters.append(cluster)
# Compute cloud replicas
cloud_replicas = 0
for cluster in fogapp_locations:
if 'cloud' in cluster:
cloud_replicas += int(override_replicas[cluster])
if len(fog_only_clusters) > 0:
possible_clusters = findPossibleClusters(fog_only_clusters, fogapp_cpu_request, fogapp_memory_request)
else:
possible_clusters = []
print("Initial possible clusters list ............", possible_clusters)
# if node of the fog clusters have the right sized nodes
if len(possible_clusters) == 0:
eligible_clusters = []
eligible_replicas = []
cloud_cluster = getCloudCluster()
if 'cloud' in cloud_cluster:
# eligible_clusters.append(cloud_cluster)
# eligible_replicas.append(total_replicas)
dict = {}
dict['name'] = cloud_cluster
dict['replicas'] = total_replicas
eligible_clusters.append(dict)
else:
dict = {}
dict['message'] = 'to_cloud'
dict['replicas'] = total_replicas
patch.status['message'] = dict
raise kopf.TemporaryError("The application could not be scheduled on the Fog elevel. Need cloud cluster.",
delay=30)
print("Initial eligible clusters and replicas 1111", eligible_clusters)
else:
fogapp_locations.sort()
possible_clusters.sort()
override_replicas_update = {}
# Assign replicas to replacement clusters from input clusters
for i in range(0, len(possible_clusters)):
if possible_clusters[i] in fogapp_locations:
override_replicas_update[possible_clusters[i]] = override_replicas[possible_clusters[i]]
else:
override_replicas_update[possible_clusters[i]] = list(override_replicas.values())[i]
print("Override replicas new .....", override_replicas_update)
for cluster in possible_clusters:
replicas = int(override_replicas_update[cluster])
#replicas = int(override_replicas_diff[cluster])
# is_eligible = checkClusterEligibility(cluster, app_cpu_request, app_memory_request, replicas)
# The maximum number of replicas the cluster can host
maximum_replicas = getAllocatableCapacity(cluster, fogapp_cpu_request, fogapp_memory_request, fogapp_name, fogpapp_namespace)
if maximum_replicas > replicas:
dict = {}
dict['name'] = cluster
dict['max_replicas'] = maximum_replicas
dict['replicas'] = replicas
dict['overflow'] = 0
eligible_clusters.append(dict)
else:
dict = {}
dict['name'] = cluster
dict['max_replicas'] = maximum_replicas
dict['replicas'] = maximum_replicas
dict['overflow'] = replicas - maximum_replicas
eligible_clusters.append(dict)
temp_list = []
for cluster in eligible_clusters:
temp_list.append(cluster)
print("Possible list of clusters and oveflow ....", temp_list)
temp_list_2 = []
for cluster in temp_list:
temp_list_2.append(cluster['name'])
temp_list_3 = list(set(fogapp_locations + temp_list_2))
total_overflow = 0
for cluster in temp_list:
total_overflow += cluster['overflow']
maximum_replicas = {}
for cluster in temp_list:
nearest_clusters = []
overflow = cluster['overflow']
#leftover = overflow
print("Overflow from ", cluster, overflow)
if overflow > 0:
nearest_clusters = findNearestClusters(cluster, temp_list_3)
print("List of nearest clusters ....", nearest_clusters)
# else:
# print("The cluster doesn't have overflow ....")
# break
# Distribute overflow to nearest clusters
if len(nearest_clusters) > 0:
for c in nearest_clusters:
# print("Overflow .................", overflow)
# if overflow > 0:
maximum_replicas[c] = getAllocatableCapacity(c, fogapp_cpu_request, fogapp_memory_request, fogapp_name, fogpapp_namespace)
print("Maximum replicas .....", maximum_replicas)
for cluster in temp_list:
nearest_clusters = []
overflow = cluster['overflow']
if overflow > 0:
nearest_clusters = findNearestClusters(cluster, temp_list_3)
# else:
# break
if len(nearest_clusters) > 0:
for c in nearest_clusters:
if cluster['overflow'] > 0:
if maximum_replicas[c] == 0:
cluster['overflow'] = cluster['overflow']
#break
elif maximum_replicas[c] > cluster['overflow']:
dict = {}
dict['name'] = c
dict['replicas'] = cluster['overflow']
dict['overflow'] = 0
eligible_clusters.append(dict)
maximum_replicas[c] = maximum_replicas[c] - cluster['overflow']
cluster['overflow'] = 0
#break
else:
dict = {}
dict['name'] = c
dict['replicas'] = maximum_replicas[c]
dict['overflow'] = 0
cluster['overflow'] = cluster['overflow'] - maximum_replicas[c]
eligible_clusters.append(dict)
maximum_replicas[c] = 0
eligible_clusters = (pd.DataFrame(eligible_clusters)
.groupby(['name'], as_index=False)
.agg({'replicas': 'sum', 'overflow': 'sum'})
.to_dict('r'))
# for c in eligible_clusters:
# maximum_replicas = getMaximumReplicas(c['name'], fogapp_cpu_request, fogapp_memory_request)
# if c['replicas'] > maximum_replicas:
# c['overflow'] = c['overflow'] + c['replicas'] - maximum_replicas
# c['replicas'] = maximum_replicas
print("Preliminary list of eligible clusters ...", eligible_clusters)
# Compute leftover to be deployed on cloud cluster
leftover = 0
for cluster in eligible_clusters:
if cluster['overflow'] > 0:
leftover += cluster['overflow']
# Add leftover on top of the number of replicas requested for cloud
# for cluster in fogapp_locations:
# if 'cloud' in cluster:
# leftover += int(override_replicas[cluster])
if leftover > 0:
for cluster in fogapp_locations:
if 'cloud' in cluster:
dict = {}
dict['name'] = cluster
dict['replicas'] = leftover
dict['overflow'] = 0
eligible_clusters.append(dict)
leftover = 0
print("Eligible clusters including cloud ...........", eligible_clusters)
if len(eligible_clusters) == 0:
dict = {}
dict['message'] = 'to_cloud'
dict['replicas'] = total_replicas
patch.status['message'] = dict
raise kopf.TemporaryError(
"The application could not be scheduled on the Fog level. Need cloud cluster.",
delay=30)
else:
if leftover > 0:
cloud_cluster = getCloudCluster()
if 'cloud' in cloud_cluster:
dict = {}
dict['name'] = cloud_cluster
dict['replicas'] = leftover
dict['overflow'] = 0
eligible_clusters.append(dict)
leftover = 0
print("Eligible clusters including cloud ...........", eligible_clusters)
else:
dict = {}
dict['message'] = 'to_cloud'
dict['replicas'] = leftover
patch.status['message'] = dict
raise kopf.TemporaryError(
"The application could not be scheduled on the Fog level. Need cloud cluster.",
delay=30)
for cluster in eligible_clusters:
if cluster['replicas'] == 0:
eligible_clusters.remove(cluster)
print("Final list of eligible clusters ...", eligible_clusters)
temp_list = []
for cluster in eligible_clusters:
temp_list.append(cluster)
eligible_clusters = []
eligible_replicas = []
for cluster in temp_list:
eligible_clusters.append(cluster['name'])
eligible_replicas.append(cluster['replicas'])
# For the spec file
deployment_template = "{'apiVersion': 'apps/v1', 'kind': 'Deployment', 'metadata': {'name': '" + fogapp_name + "', 'namespace': '" + fogpapp_namespace + "'}, 'spec': "
deployment_json = deployment_template + spec_text + "}"
deployment_text = deployment_json.replace("'", "\"")
deployment_body = json.loads(deployment_text)
# Delete deployment and service from current clusters
fogapp_current_locations.sort()
eligible_clusters_sorted = []
for cluster in eligible_clusters:
eligible_clusters_sorted.append(cluster)
eligible_clusters_sorted.sort()
if len(eligible_clusters_sorted) == len(fogapp_current_locations) and fogapp_current_locations == eligible_clusters_sorted:
print("Same set of clusters .... Patching ......")
i = 0
for cluster in eligible_clusters:
deployment_body['spec']['replicas'] = eligible_replicas[i]
print("Patching fogapp on existing clusters ............")
patchDeployment(cluster, fogapp_name, deployment_body, fogpapp_namespace)
| |
1; size: 30; search: "
await app(get_example_scope("GET", "/", query=b"page=2"), mock_receive(), mock_send)
response = app.response
content = await response.text()
assert response.status == 200
assert content == "Page: 2; size: 30; search: "
await app(
get_example_scope("GET", "/", query=b"page=2&size=50"),
mock_receive(),
mock_send,
)
response = app.response
content = await response.text()
assert response.status == 200
assert content == "Page: 2; size: 50; search: "
await app(
get_example_scope("GET", "/", query=b"page=2&size=50&search=foo"),
mock_receive(),
mock_send,
)
response = app.response
content = await response.text()
assert response.status == 200
assert content == "Page: 2; size: 50; search: foo"
@pytest.mark.asyncio
async def test_handler_normalize_sync_method_from_query_default(
app, mock_send, mock_receive
):
@app.router.get("/")
def get_products(
page: FromQuery[int] = FromQuery(1),
size: FromQuery[int] = FromQuery(30),
search: FromQuery[str] = FromQuery(""),
):
return text(f"Page: {page.value}; size: {size.value}; search: {search.value}")
app.normalize_handlers()
await app(get_example_scope("GET", "/"), mock_receive(), mock_send)
response = app.response
content = await response.text()
assert response.status == 200
assert content == "Page: 1; size: 30; search: "
await app(get_example_scope("GET", "/", query=b"page=2"), mock_receive(), mock_send)
response = app.response
content = await response.text()
assert response.status == 200
assert content == "Page: 2; size: 30; search: "
await app(
get_example_scope("GET", "/", query=b"page=2&size=50"),
mock_receive(),
mock_send,
)
response = app.response
content = await response.text()
assert response.status == 200
assert content == "Page: 2; size: 50; search: "
await app(
get_example_scope("GET", "/", query=b"page=2&size=50&search=foo"),
mock_receive(),
mock_send,
)
response = app.response
content = await response.text()
assert response.status == 200
assert content == "Page: 2; size: 50; search: foo"
@pytest.mark.asyncio
async def test_handler_normalize_list_sync_method_from_query_default(
app, mock_send, mock_receive
):
@app.router.get("/")
def example(
a: FromQuery[List[int]] = FromQuery([1, 2, 3]),
b: FromQuery[List[int]] = FromQuery([4, 5, 6]),
c: FromQuery[List[str]] = FromQuery(["x"]),
):
return text(f"A: {a.value}; B: {b.value}; C: {c.value}")
app.normalize_handlers()
await app(get_example_scope("GET", "/"), mock_receive(), mock_send)
response = app.response
content = await response.text()
assert response.status == 200
assert content == f"A: {[1, 2, 3]}; B: {[4, 5, 6]}; C: {['x']}"
await app(get_example_scope("GET", "/", query=b"a=1349"), mock_receive(), mock_send)
response = app.response
content = await response.text()
assert response.status == 200
assert content == f"A: {[1349]}; B: {[4, 5, 6]}; C: {['x']}"
await app(
get_example_scope("GET", "/", query=b"a=1349&c=Hello&a=55"),
mock_receive(),
mock_send,
)
response = app.response
content = await response.text()
assert response.status == 200
assert content == f"A: {[1349, 55]}; B: {[4, 5, 6]}; C: {['Hello']}"
await app(
get_example_scope("GET", "/", query=b"a=1349&c=Hello&a=55&b=10"),
mock_receive(),
mock_send,
)
response = app.response
content = await response.text()
assert response.status == 200
assert content == f"A: {[1349, 55]}; B: {[10]}; C: {['Hello']}"
@pytest.mark.asyncio
async def test_handler_normalize_sync_method_without_arguments(
app, mock_send, mock_receive
):
@app.router.get("/")
def home():
return
app.normalize_handlers()
await app(get_example_scope("GET", "/"), mock_receive(), mock_send)
assert app.response.status == 204
@pytest.mark.asyncio
async def test_handler_normalize_sync_method_from_query_optional(
app, mock_send, mock_receive
):
@app.router.get("/")
def home(xx: FromQuery[Optional[int]], yy: FromQuery[Optional[int]]):
assert xx.value is None
assert yy.value == 20
app.normalize_handlers()
await app(get_example_scope("GET", "/", query=b"yy=20"), mock_receive(), mock_send)
assert app.response.status == 204
@pytest.mark.asyncio
async def test_handler_normalize_optional_binder(app, mock_send, mock_receive):
@app.router.get("/1")
def home1(xx: Optional[FromQuery[int]], yy: Optional[FromQuery[int]]):
assert xx is None
assert yy.value == 20
@app.router.get("/2")
def home2(xx: Optional[FromQuery[int]]):
assert xx is not None
assert xx.value == 10
@app.router.get("/3")
def home3(xx: Optional[FromQuery[Optional[int]]]):
assert xx is not None
assert xx.value == 10
app.normalize_handlers()
await app(get_example_scope("GET", "/1", query=b"yy=20"), mock_receive(), mock_send)
assert app.response.status == 204
await app(get_example_scope("GET", "/2", query=b"xx=10"), mock_receive(), mock_send)
assert app.response.status == 204
await app(get_example_scope("GET", "/3", query=b"xx=10"), mock_receive(), mock_send)
assert app.response.status == 204
@pytest.mark.asyncio
async def test_handler_normalize_sync_method_from_query_optional_list(
app, mock_send, mock_receive
):
@app.router.get("/")
def home(xx: FromQuery[Optional[List[int]]], yy: FromQuery[Optional[List[int]]]):
assert xx.value is None
assert yy.value == [20, 55, 64]
app.normalize_handlers()
await app(
get_example_scope("GET", "/", query=b"yy=20&yy=55&yy=64"),
mock_receive(),
mock_send,
)
assert app.response.status == 204
@pytest.mark.asyncio
@pytest.mark.parametrize(
"query,expected_values",
[
[b"xx=hello&xx=world&xx=lorem&xx=ipsum", ["hello", "world", "lorem", "ipsum"]],
[b"xx=1&xx=2", ["1", "2"]],
[b"xx=1&yy=2", ["1"]],
],
)
async def test_handler_normalize_sync_method_from_query_default_type(
query, expected_values, app, mock_send, mock_receive
):
@app.router.get("/")
def home(request, xx: FromQuery):
assert xx.value == expected_values
app.normalize_handlers()
await app(get_example_scope("GET", "/", query=query), mock_receive(), mock_send)
assert app.response.status == 204
@pytest.mark.asyncio
async def test_handler_normalize_method_without_input(app, mock_send, mock_receive):
@app.router.get("/")
async def home():
pass
app.normalize_handlers()
await app(get_example_scope("GET", "/"), mock_receive(), mock_send)
assert app.response.status == 204
@pytest.mark.asyncio
@pytest.mark.parametrize(
"value,expected_value",
[["dashboard", "dashboard"], ["hello_world", "hello_world"]],
)
async def test_handler_from_route(value, expected_value, app, mock_send, mock_receive):
@app.router.get("/:area")
async def home(request, area: FromRoute[str]):
assert area.value == expected_value
app.normalize_handlers()
await app(get_example_scope("GET", "/" + value), mock_receive(), mock_send)
assert app.response.status == 204
@pytest.mark.asyncio
@pytest.mark.parametrize(
"value_one,value_two,expected_value_one,expected_value_two",
[
["en", "dashboard", "en", "dashboard"],
["it", "hello_world", "it", "hello_world"],
],
)
async def test_handler_two_routes_parameters(
value_one: str,
value_two: str,
expected_value_one: str,
expected_value_two: str,
app,
mock_send,
mock_receive,
):
@app.router.get("/:culture_code/:area")
async def home(culture_code: FromRoute[str], area: FromRoute[str]):
assert culture_code.value == expected_value_one
assert area.value == expected_value_two
app.normalize_handlers()
await app(
get_example_scope("GET", "/" + value_one + "/" + value_two),
mock_receive(),
mock_send,
)
assert app.response.status == 204
@pytest.mark.asyncio
@pytest.mark.parametrize(
"value_one,value_two,expected_value_one,expected_value_two",
[
["en", "dashboard", "en", "dashboard"],
["it", "hello_world", "it", "hello_world"],
],
)
async def test_handler_two_routes_parameters_implicit(
value_one: str,
value_two: str,
expected_value_one: str,
expected_value_two: str,
app,
mock_send,
mock_receive,
):
@app.router.get("/:culture_code/:area")
async def home(culture_code, area):
assert culture_code == expected_value_one
assert area == expected_value_two
app.normalize_handlers()
await app(
get_example_scope("GET", "/" + value_one + "/" + value_two),
mock_receive(),
mock_send,
)
assert app.response.status == 204
@pytest.mark.asyncio
async def test_handler_from_json_parameter(app, mock_send, mock_receive):
@app.router.post("/")
async def home(item: FromJSON[Item]):
assert item is not None
value = item.value
assert value.a == "Hello"
assert value.b == "World"
assert value.c == 10
app.normalize_handlers()
await app(
get_example_scope(
"POST",
"/",
[[b"content-type", b"application/json"], [b"content-length", b"32"]],
),
mock_receive([b'{"a":"Hello","b":"World","c":10}']),
mock_send,
)
assert app.response.status == 204
@pytest.mark.asyncio
async def test_handler_from_json_without_annotation(app, mock_send, mock_receive):
@app.router.post("/")
async def home(item: FromJSON):
assert item is not None
assert isinstance(item.value, dict)
value = item.value
assert value == {"a": "Hello", "b": "World", "c": 10}
app.normalize_handlers()
await app(
get_example_scope(
"POST",
"/",
[[b"content-type", b"application/json"], [b"content-length", b"32"]],
),
mock_receive([b'{"a":"Hello","b":"World","c":10}']),
mock_send,
)
assert app.response.status == 204
@pytest.mark.asyncio
async def test_handler_from_json_parameter_dict(app, mock_send, mock_receive):
@app.router.post("/")
async def home(item: FromJSON[dict]):
assert item is not None
assert isinstance(item.value, dict)
value = item.value
assert value == {"a": "Hello", "b": "World", "c": 10}
app.normalize_handlers()
await app(
get_example_scope(
"POST",
"/",
[[b"content-type", b"application/json"], [b"content-length", b"32"]],
),
mock_receive([b'{"a":"Hello","b":"World","c":10}']),
mock_send,
)
assert app.response.status == 204
@pytest.mark.asyncio
async def test_handler_from_json_parameter_dict_unannotated(
app, mock_send, mock_receive
):
@app.router.post("/")
async def home(item: FromJSON[Dict]):
assert item is not None
assert isinstance(item.value, dict)
value = item.value
assert value == {"a": "Hello", "b": "World", "c": 10}
app.normalize_handlers()
await app(
get_example_scope(
"POST",
"/",
[[b"content-type", b"application/json"], [b"content-length", b"32"]],
),
mock_receive([b'{"a":"Hello","b":"World","c":10}']),
mock_send,
)
assert app.response.status == 204
@pytest.mark.asyncio
async def test_handler_from_json_parameter_dict_annotated(app, mock_send, mock_receive):
@app.router.post("/")
async def home(item: FromJSON[Dict[str, Any]]):
assert item is not None
assert isinstance(item.value, dict)
value = item.value
assert value == {"a": "Hello", "b": "World", "c": 10}
app.normalize_handlers()
await app(
get_example_scope(
"POST",
"/",
[[b"content-type", b"application/json"], [b"content-length", b"32"]],
),
mock_receive([b'{"a":"Hello","b":"World","c":10}']),
mock_send,
)
assert app.response.status == 204
@pytest.mark.parametrize(
"value",
[
"Lorem ipsum dolor sit amet",
"Hello, World",
"Lorem ipsum dolor sit amet\n" * 200,
],
)
@pytest.mark.asyncio
async def test_handler_from_text_parameter(value: str, app, mock_send, mock_receive):
@app.router.post("/")
async def home(text: FromText):
assert text.value == value
app.normalize_handlers()
await app(
get_example_scope(
"POST",
"/",
[
[b"content-type", b"text/plain; charset=utf-8"],
[b"content-length", str(len(value)).encode()],
],
),
mock_receive([value.encode("utf8")]),
mock_send,
)
assert app.response.status == 204
@pytest.mark.parametrize(
"value",
[
b"Lorem ipsum dolor sit amet",
b"Hello, World",
b"Lorem ipsum dolor sit amet\n" * 200,
],
)
@pytest.mark.asyncio
async def test_handler_from_bytes_parameter(value: bytes, app, mock_send, mock_receive):
@app.router.post("/")
async def home(text: FromBytes):
assert text.value == value
app.normalize_handlers()
await app(
get_example_scope(
"POST",
"/",
[
[b"content-type", b"text/plain; charset=utf-8"],
[b"content-length", str(len(value)).encode()],
],
),
mock_receive([value]),
mock_send,
)
assert app.response.status == 204
@pytest.mark.asyncio
async def test_handler_from_files(app, mock_send, mock_receive):
@app.router.post("/")
async def home(files: FromFiles):
assert files is not None
assert files.value is not None
assert len(files.value) == 4
file1 = files.value[0]
file2 = files.value[1]
file3 = files.value[2]
file4 = files.value[3]
assert file1.name == b"file1"
assert file1.file_name == b"a.txt"
assert file1.data == b"Content of a.txt.\r\n"
assert file2.name == b"file2"
assert file2.file_name == b"a.html"
assert file2.data == b"<!DOCTYPE html><title>Content of a.html.</title>\r\n"
assert file3.name == b"file2"
assert file3.file_name == b"a.html"
assert file3.data == b"<!DOCTYPE html><title>Content of a.html.</title>\r\n"
assert file4.name == b"file3"
assert file4.file_name == b"binary"
assert file4.data == b"a\xcf\x89b"
app.normalize_handlers()
boundary = b"---------------------0000000000000000000000001"
content = b"\r\n".join(
[
boundary,
b'Content-Disposition: form-data; name="text1"',
b"",
b"text default",
boundary,
b'Content-Disposition: form-data; name="text2"',
b"",
"aωb".encode("utf8"),
boundary,
b'Content-Disposition: form-data; name="file1"; filename="a.txt"',
b"Content-Type: text/plain",
b"",
b"Content of a.txt.",
b"",
boundary,
b'Content-Disposition: form-data; name="file2"; filename="a.html"',
b"Content-Type: text/html",
b"",
b"<!DOCTYPE html><title>Content of a.html.</title>",
b"",
boundary,
b'Content-Disposition: form-data; name="file2"; filename="a.html"',
b"Content-Type: text/html",
b"",
b"<!DOCTYPE html><title>Content of a.html.</title>",
b"",
boundary,
b'Content-Disposition: form-data; name="file3"; filename="binary"',
b"Content-Type: application/octet-stream",
b"",
"aωb".encode("utf8"),
boundary + b"--",
]
)
await app(
get_example_scope(
"POST",
"/",
[
[b"content-length", str(len(content)).encode()],
[b"content-type", b"multipart/form-data; boundary=" + boundary],
],
),
mock_receive([content]),
mock_send,
)
assert app.response.status == 204
@pytest.mark.asyncio
async def test_handler_from_files_handles_empty_body(app, mock_send, mock_receive):
@app.router.post("/")
async def home(files: FromFiles):
assert files.value == []
app.normalize_handlers()
await app(
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri May 10 13:30:43 2019
@author: Darin
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import scipy.sparse as sparse
import Material
import Update
class PyOpt:
""" Topology optimization object
"""
def __init__(self, fem=None, update=None, threshold=0.3):
"""Constructor
Parameters
----------
fem : FEM object
An object describing the underlying finite element analysis
update : Update scheme object
Provides functionality to store and update design variables
threshold : scalar
Minimum density to plot 3D elements
"""
self.fem = fem
self.update = update
self.dens_thresh = threshold
self.objectives = []
self.constraints = []
self.f = []
self.g = []
def LoadFromFile(self, filename):
""" Loads an old run from a file
Parameters
----------
filename : string
Name of the file that everything will be saved to
Returns
-------
None
"""
data = np.load(filename, allow_pickle=True).item()
from FEM import FEM
self.fem = FEM()
self.fem.Load(data['fem'])
if data['update']['type'] == 'OC':
self.update = Update.OCUpdateScheme(0.2, 0.5, 0.5 * np.ones(self.fem.nElem),
np.zeros(self.fem.nElem), np.ones(self.fem.nElem))
elif data['update']['type'] == 'MMA':
self.update = Update.MMA(0.5 * np.ones(self.fem.nElem), 1,
np.zeros(self.fem.nElem), np.ones(self.fem.nElem))
self.update.Load(data['update'])
self.Filter = data['opt']['Filter']
try:
self.R = data['opt']['R']
except:
pass
import Functions as Funcs
for objective in data['opt']['objectives']:
self.AddFunction(getattr(Funcs, objective['function']),
objective['weight'], objective['min'],
objective['max'], 'objective')
for constraint in data['opt']['constraints']:
self.AddFunction(getattr(Funcs, constraint['function']),
constraint['constraint'], constraint['min'],
constraint['max'], 'constraint')
def LoadPetsc(self, folder, appendix=None, Endian='=', update='MMA'):
""" Create PyOpt structure from PETSc code results
Parameters
----------
folder : str
folder containing all of the Petsc results
appendix : str
Appendix for result values to restart from, if none picks highest penalty
Endian : char
Indicates byte ordering ('=':default, '<':little Endian, '>':big Endian)
update : str
Which updte scheme to use (MMA or OC)
Returns
-------
None
"""
from os import listdir
from os.path import sep
from PetscBinaryIO import PetscBinaryRead
import Functions_Timing as Funcs
# Load FEM data
from FEM import FEM
self.fem = FEM()
self.fem.LoadPetsc(folder, Endian=Endian)
# Load update data
if update == 'OC':
self.update = Update.OCUpdateScheme(0.2, 0.5, 0.5 * np.ones(self.fem.nElem),
np.zeros(self.fem.nElem), np.ones(self.fem.nElem))
elif update == 'MMA':
self.update = Update.MMA(0.5 * np.ones(self.fem.nElem), 1,
np.zeros(self.fem.nElem), np.ones(self.fem.nElem))
self.update.LoadPetsc(folder, appendix=appendix, Endian=Endian)
# Load filter matrics
self.Filter = PetscBinaryRead(folder + sep + "Filter.bin")
try:
self.R = PetscBinaryRead(folder + sep + "Max_Filter.bin")
edge = PetscBinaryRead(folder + sep + "Void_Edge_Volume.bin")
self.R = self.R.tocoo()
self.R = sparse.csr_matrix((np.concatenate([self.R.data, edge]),
(np.concatenate([self.R.row, np.arange(self.R.shape[0])]),
np.concatenate([self.R.col, self.R.shape[0]*np.ones(self.R.shape[0], dtype=int)]))))
except:
self.R = sparse.dia_matrix((np.ones(self.fem.nElem), np.zeros(self.fem.nElem)))
# Set up functions and material properties
inputFile = [file for file in listdir(folder) if '_Input' in file][0]
active = False
name = None
fType = None
value = None
minimum = None
maximum = None
E0, Nu0, Density = None, None, None
with open(folder + sep + inputFile, 'r') as fh:
for line in fh:
line = line.strip()
if line[:3] == 'E0:':
E0 = float(line.split(':')[-1])
elif line[:4] == 'Nu0:':
Nu0 = float(line.split(':')[-1])
elif line[:8] == 'Density:':
Density = float(line.split(':')[-1])
elif '[Functions]' in line:
active = True
elif '[/Functions]' in line:
active = False
elif active:
if line in ['Compliance', 'Stability', 'Frequencey', 'Volume']:
name = line
elif line in ['Objective', 'Constraint']:
fType = line
elif 'Values:' in line:
value = [float(val) for val in line.split(':')[-1].split(',')][0]
elif 'Range:' in line:
minimum, maximum = [float(val) for val in line.split(':')[-1].split(',')]
if name is not None and fType is not None and value is not None and minimum is not None:
self.AddFunction(getattr(Funcs, name), value, minimum, maximum, fType)
name = None
fType = None
value = None
minimum = None
maximum = None
if self.fem.nDof == 2:
self.fem.SetMaterial(Material.PlaneStressElastic(E0, Nu0))
else:
self.fem.SetMaterial(Material.Elastic3D(E0, Nu0))
def SetInterpolation(self, interpolation):
""" Set the object for interpolating filtered densities to material values
Parameters
----------
interpolation : Interpolation object
The interpolation object
Returns
-------
None
"""
self.MatIntFnc = interpolation.Interpolate
def ConstructDensityFilter(self, radius, nElx):
""" Sets up the density filter
Parameters
----------
radius : scalar
Filter radius
nElx : list of integer
Number of elements in each direction
Returns
-------
Filter : sparse matrix
Filter matrix
"""
centroids = np.mean(self.fem.nodes[self.fem.elements.reshape(
1,self.fem.nElem,-1)], axis=2).reshape(self.fem.nElem,-1)
# Element sizes
dx = np.zeros(self.fem.nodes.shape[1])
# Number of elements to check in each direction
Nx = np.zeros(self.fem.nodes.shape[1], dtype=int)
for i in range(dx.size):
dx[i] = (np.max(self.fem.nodes[self.fem.elements[0], i]) -
np.min(self.fem.nodes[self.fem.elements[0], i]))
Nx[i] = max(np.floor(radius/dx[i]), 1)
# Distance of all nearby elements
offset = [np.arange(-Nx[0], Nx[0]+1)]
for i in range(1, self.fem.nodes.shape[1]):
newshape = [1 for j in range(i)] + [2*Nx[i]+1]
for j in range(len(offset)):
offset[j] = np.tile(np.expand_dims(offset[j], axis=-1), newshape)
newshape = [1 for j in range(i)] + [-1]
offset.append(np.arange(-Nx[i], Nx[i]+1).reshape(newshape))
newshape = list(offset[0].shape)
newshape[-1] = 1
offset[-1] = np.tile(offset[-1], newshape)
dist = [dx[i]*d.ravel() for i, d in enumerate(offset)]
r = np.sqrt(np.array([d**2 for d in dist]).sum(axis=0))
Nbrhd = r < radius
Low_Bnd = np.min(self.fem.nodes, axis=0)
Upp_Bnd = np.max(self.fem.nodes, axis=0)
sx = [1]
for nEl in nElx[:-1]:
sx.append(sx[-1] * nEl)
Template = sum([sx[i]*d for i, d in enumerate(offset)]).ravel()
indi = [0 for i in range(self.fem.nElem)]
indj = [0 for i in range(self.fem.nElem)]
valk = [0 for i in range(self.fem.nElem)]
for el in range(self.fem.nElem):
Add = el + Template
Valid = [np.logical_and(centroids[el, i]+dist[i] > Low_Bnd[i],
centroids[el, i]+dist[i] < Upp_Bnd[i])
for i in range(len(dist))]
Valid = np.logical_and.reduce(Valid)
Valid = np.logical_and(Valid, Nbrhd)
Add = Add[Valid]
indi[el] = Add
indj[el] = el*np.ones(len(Add), dtype=int)
valk[el] = r[Valid]
Filter = sparse.csr_matrix((1-np.concatenate(valk)/radius,
(np.concatenate(indi),np.concatenate(indj))))
rowsum = Filter.sum(axis=1)
return sparse.dia_matrix((1/rowsum.T,0),shape=Filter.shape) * Filter
def AddFunction(self, function, value, minimum, maximum, funcType):
""" Add an objective or constraint function to the list of functions
to be evaluated
Parameters
----------
function : OptFunction object
The objective function. Returns a function value and design sensitivities
value : scalar
The objective weight or constraint value.
Objective weights should be adjusted so all weights sum to 1.
minimum : scalar
Mimimum function value for normalization
minimum : scalar
Mimimum function value for normalization
funcType : str
'objective' or 'constraint'
Returns
-------
None
"""
if funcType.lower() == 'objective':
self.objectives.append({'function':function, 'weight':value,
'min':minimum, 'max':maximum})
else:
self.constraints.append({'function':function, 'constraint':value,
'min':minimum, 'max':maximum})
def CallFunctions(self):
""" Call all functions to get objective and constraint value as well
as all function sensitivities
Parameters
----------
None
Returns
-------
f : scalar
Objective value
dfdx : array_like
Objective gradients
g : array_like
Constraint values
dgdx : array_like
Constraint gradients
"""
matVals = self.MatIntFnc(self.update.x)
self.densities = matVals['V']
self.fem.ConstructSystem(matVals['E'])
x0 = self.fem.U.copy()
self.fem.SolveSystem(sparse.linalg.cg, x0=x0)
f = 0
dfdx = np.zeros(self.fem.nElem)
g = np.zeros(max(self.update.m, 1))
dgdx = np.zeros((self.fem.nElem, g.size))
for funDict in self.objectives:
obj, dobjdE, dobjdV = funDict['function'](self.fem, matVals)
dobjdx = self.Filter.T * dobjdV
dobjdx += self.Filter.T * (matVals['y'] * dobjdE -
matVals['rhoq'] * (matVals['y'] < 1) *
(self.R.T[:-1,:] * (matVals['rho'] * dobjdE)))
f += funDict['weight'] * (obj - funDict['min']) / (funDict['max'] -
funDict['min'])
dfdx += funDict['weight'] * dobjdx / (funDict['max'] - funDict['min'])
print("\t%s: %f" % (funDict['function'].__name__,
funDict['weight'] * (obj - funDict['min']) /
(funDict['max'] - funDict['min'])))
i = 0
for iiii, funDict in enumerate(self.constraints):
con, dcondE, dcondV = funDict['function'](self.fem, matVals)
dcondx = self.Filter.T * dcondV
dcondx += self.Filter.T * (matVals['y'] * dcondE -
matVals['rhoq'] * (matVals['y'] < 1) *
(self.R.T[:-1,:] * (matVals['rho'] * dcondE)))
g[i] = (con - funDict['constraint']) / (funDict['max'] - funDict['min'])
dgdx[:,i] = dcondx / (funDict['max'] - funDict['min'])
i += 1
print("\t%s: %f" % (funDict['function'].__name__, g[i-1]))
self.f.append(f)
self.g.append(g)
return f, dfdx, g, dgdx
def Plot(self, filename=None, edgeColor='none'):
""" Plot the optimized shape
Parameters
----------
None
Returns
-------
None
"""
fig = plt.figure("Result", figsize=(12,12), clear=True)
if self.fem.nDof == 2:
collection = PolyCollection(self.fem.nodes[self.fem.elements], edgecolors=edgeColor)
collection.set_array(self.densities)
collection.set_cmap('gray_r')
collection.set_clim(vmin=0, vmax=1)
ax = fig.gca()
ax.add_collection(collection)
ax.set_xlim(self.fem.nodes[:,0].min(), self.fem.nodes[:,0].max())
ax.set_ylim(self.fem.nodes[:,1].min(), self.fem.nodes[:,1].max())
ratio = ((ax.get_ylim()[1] - ax.get_ylim()[0]) /
(ax.get_xlim()[1] - ax.get_xlim()[0]))
if ratio < 1:
fig.set_figheight(ratio * fig.get_figwidth())
else:
fig.set_figwidth(fig.get_figheight() / ratio)
ax.axis('off')
elif self.fem.nDof == 3:
if not hasattr(self, 'facePairs'):
face = np.array([0, 1, 2, 3, 4, 5, | |
handles categorical colors / legends better.
Parameters
----------
data : array-like, shape=[n_samples, n_features]
Input data. Only the first two components will be used.
c : list-like or None, optional (default: None)
Color vector. Can be a single color value (RGB, RGBA, or named
matplotlib colors), an array of these of length n_samples, or a list of
discrete or continuous values of any data type. If `c` is not a single
or list of matplotlib colors, the values in `c` will be used to
populate the legend / colorbar with colors from `cmap`
cmap : `matplotlib` colormap, str, dict, list or None, optional (default: None)
matplotlib colormap. If None, uses `tab20` for discrete data and
`inferno` for continuous data. If a list, expects one color for every
unique value in `c`, otherwise interpolates between given colors for
continuous data. If a dictionary, expects one key
for every unique value in `c`, where values are valid matplotlib colors
(hsv, rbg, rgba, or named colors)
cmap_scale : {'linear', 'log', 'symlog', 'sqrt'} or `matplotlib.colors.Normalize`,
optional (default: 'linear')
Colormap normalization scale. For advanced use, see
<https://matplotlib.org/users/colormapnorms.html>
s : float, optional (default: None)
Point size. If `None`, set to 200 / sqrt(n_samples)
mask : list-like, optional (default: None)
boolean mask to hide data points
discrete : bool or None, optional (default: None)
If True, the legend is categorical. If False, the legend is a colorbar.
If None, discreteness is detected automatically. Data containing
non-numeric `c` is always discrete, and numeric data with 20 or less
unique values is discrete.
ax : `matplotlib.Axes` or None, optional (default: None)
axis on which to plot. If None, an axis is created
legend : bool, optional (default: None)
States whether or not to create a legend. If data is continuous,
the legend is a colorbar. If `None`, a legend is created where possible.
colorbar : bool, optional (default: None)
Synonym for `legend`
shuffle : bool, optional (default: True)
If True. shuffles the order of points on the plot.
figsize : tuple, optional (default: None)
Tuple of floats for creation of new `matplotlib` figure. Only used if
`ax` is None.
ticks : True, False, or list-like (default: True)
If True, keeps default axis ticks. If False, removes axis ticks.
If a list, sets custom axis ticks
{x,y}ticks : True, False, or list-like (default: None)
If set, overrides `ticks`
ticklabels : True, False, or list-like (default: True)
If True, keeps default axis tick labels. If False, removes axis tick labels.
If a list, sets custom axis tick labels
{x,y}ticklabels : True, False, or list-like (default: None)
If set, overrides `ticklabels`
label_prefix : str or None (default: None)
Prefix for all axis labels. Axes will be labelled `label_prefix`1,
`label_prefix`2, etc. Can be overriden by setting `xlabel`,
`ylabel`, and `zlabel`.
{x,y}label : str or None (default : None)
Axis labels. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set
unless the data is a pandas Series, in which case the series name is used.
Override this behavior with `{x,y,z}label=False`
title : str or None (default: None)
axis title. If None, no title is set.
fontsize : float or None (default: None)
Base font size.
legend_title : str (default: None)
title for the colorbar of legend
legend_loc : int or string or pair of floats, default: 'best'
Matplotlib legend location. Only used for discrete data.
See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>
for details.
legend_anchor : `BboxBase`, 2-tuple, or 4-tuple
Box that is used to position the legend in conjunction with loc.
See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>
for details.
legend_ncol : `int` or `None`, optimal (default: None)
Number of columns to show in the legend.
If None, defaults to a maximum of entries per column.
vmin, vmax : float, optional (default: None)
Range of values to use as the range for the colormap.
Only used if data is continuous
filename : str or None (default: None)
file to which the output is saved
dpi : int or None, optional (default: None)
The resolution in dots per inch. If None it will default to the value
savefig.dpi in the matplotlibrc file. If 'figure' it will set the dpi
to be the value of the figure. Only used if filename is not None.
**plot_kwargs : keyword arguments
Extra arguments passed to `matplotlib.pyplot.scatter`.
Returns
-------
ax : `matplotlib.Axes`
axis on which plot was drawn
Examples
--------
>>> import scprep
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> data = np.random.normal(0, 1, [200, 2])
>>> # Continuous color vector
>>> colors = data[:, 0]
>>> scprep.plot.scatter2d(data, c=colors)
>>> # Discrete color vector with custom colormap
>>> colors = np.random.choice(['a','b'], data.shape[0], replace=True)
>>> data[colors == 'a'] += 10
>>> scprep.plot.scatter2d(
data, c=colors, cmap={'a' : [1,0,0,1], 'b' : 'xkcd:sky blue'}
)
"""
if isinstance(data, list):
data = utils.toarray(data)
if isinstance(data, np.ndarray):
data = np.atleast_2d(data)
return scatter(
x=select.select_cols(data, idx=0),
y=select.select_cols(data, idx=1),
c=c,
cmap=cmap,
cmap_scale=cmap_scale,
s=s,
mask=mask,
discrete=discrete,
ax=ax,
legend=legend,
colorbar=colorbar,
shuffle=shuffle,
figsize=figsize,
ticks=ticks,
xticks=xticks,
yticks=yticks,
ticklabels=ticklabels,
xticklabels=xticklabels,
yticklabels=yticklabels,
label_prefix=label_prefix,
xlabel=xlabel,
ylabel=ylabel,
title=title,
fontsize=fontsize,
legend_title=legend_title,
legend_loc=legend_loc,
legend_anchor=legend_anchor,
legend_ncol=legend_ncol,
filename=filename,
dpi=dpi,
**plot_kwargs,
)
@utils._with_pkg(pkg="matplotlib", min_version=3)
def scatter3d(
data,
c=None,
cmap=None,
cmap_scale="linear",
s=None,
mask=None,
discrete=None,
ax=None,
legend=None,
colorbar=None,
shuffle=True,
figsize=None,
ticks=True,
xticks=None,
yticks=None,
zticks=None,
ticklabels=True,
xticklabels=None,
yticklabels=None,
zticklabels=None,
label_prefix=None,
xlabel=None,
ylabel=None,
zlabel=None,
title=None,
fontsize=None,
legend_title=None,
legend_loc="best",
legend_anchor=None,
legend_ncol=None,
elev=None,
azim=None,
filename=None,
dpi=None,
**plot_kwargs,
):
"""Create a 3D scatter plot.
Builds upon `matplotlib.pyplot.scatter` with nice defaults
and handles categorical colors / legends better.
Parameters
----------
data : array-like, shape=[n_samples, n_features]
Input data. Only the first two components will be used.
c : list-like or None, optional (default: None)
Color vector. Can be a single color value (RGB, RGBA, or named
matplotlib colors), an array of these of length n_samples, or a list of
discrete or continuous values of any data type. If `c` is not a single
or list of matplotlib colors, the values in `c` will be used to
populate the legend / colorbar with colors from `cmap`
cmap : `matplotlib` colormap, str, dict, list or None, optional (default: None)
matplotlib colormap. If None, uses `tab20` for discrete data and
`inferno` for continuous data. If a list, expects one color for every
unique value in `c`, otherwise interpolates between given colors for
continuous data. If a dictionary, expects one key
for every unique value in `c`, where values are valid matplotlib colors
(hsv, rbg, rgba, or named colors)
cmap_scale : {'linear', 'log', 'symlog', 'sqrt'} or `matplotlib.colors.Normalize`,
optional (default: 'linear')
Colormap normalization scale. For advanced use, see
<https://matplotlib.org/users/colormapnorms.html>
s : float, optional (default: None)
Point size. If `None`, set to 200 / sqrt(n_samples)
mask : list-like, optional (default: None)
boolean mask to hide data points
discrete : bool or None, optional (default: None)
If True, the legend is categorical. If False, the legend is a colorbar.
If None, discreteness is detected automatically. Data containing
non-numeric `c` is always discrete, and numeric data with 20 or less
unique values is discrete.
ax : `matplotlib.Axes` or None, optional (default: None)
axis on which to plot. If None, an axis is created
legend : bool, optional (default: None)
States whether or not to create a legend. If data is continuous,
the legend is a colorbar. If `None`, a legend is created where possible.
colorbar : bool, optional (default: None)
Synonym for `legend`
shuffle : bool, optional (default: True)
If True. shuffles the order of points on the plot.
figsize : tuple, optional (default: None)
Tuple of floats for creation of new `matplotlib` figure. Only used if
`ax` is None.
ticks : True, False, or list-like (default: True)
If True, keeps default axis ticks. If False, removes axis ticks.
If a list, sets custom axis ticks
{x,y,z}ticks : True, False, or list-like (default: None)
If set, overrides `ticks`
ticklabels : True, False, or list-like (default: True)
If True, keeps default | |
Counter(flines)
for k in counts.keys():
if k != "N.N.N.N":
r = labels.index(k)
abmat[r,c] += counts[k]
df = pd.DataFrame(abmat, index=labels, columns=samples)
fname = os.path.join(aa_dir,'raw_counts.tsv')
df.to_csv(fname,sep='\t',index=True,header=True)
return 0
# Carnelian features
#def translateOne(argument):
# '''Subroutine for translating one sample on one cpu using transeq'''
# #print("in translate one")
# os.system('transeq -frame 6 ' + argument)
def translateSeqs(seq_dir, out_dir, fgsp_loc, args):
'''
Find genes in the input reads and translate the coding sequences to ORFs using FragGeneScan
using n cpus.
seq_dir (string): must be a path to a directory with a nucleotide fasta file
out_dir (string): must be a path to an output directory where ORFs will be written
fgsp_loc(string): must be a path to the directory where FragGeneScan is installed
Unpacking args:
ncpus (int): number of cpus to be used to parallelize the translation
'''
ncpus = args.ncpus
#p=Pool(args.ncpus)
#my_env["PATH"]=(os.path.dirname(fgsp_loc) + ":" + my_env.get("PATH", ""))
os.environ["PATH"]=(fgsp_loc + ":" + my_env.get("PATH", ""))
try:
fpath = os.path.join(seq_dir,'*fasta')
fasta_file = [x for x in glob.glob(fpath)][0]
#name_path = [(name, seq_dir + '/' + name) for name in fasta_filelist]
first_record = SeqIO.parse(fasta_file, "fasta").next()
if not sequtil.check_if_nucl(str(first_record.seq)):
print("Could not find nucleotide fasta file in:" + seq_dir)
return(1)
except IndexError:
raise RuntimeError("Could not find fasta file in:" + seq_dir)
safe_makedirs(out_dir)
out_file = os.path.join(out_dir, os.path.basename(fasta_file).rsplit('.',1)[0])
starttime = datetime.now()
print('''================================================
Predicting ORFs from nucleotide fasta file
{:%Y-%m-%d %H:%M:%S}'''.format(starttime))
sys.stdout.flush()
#cmd = 'FGS+ -s ' + fasta_file + ' -o ' + out_file + ' -w 0 -r ' + os.path.join(fgsp_loc,'train') + ' -t illumina_1 ' + ' -m 20480'
print(os.path.join(fgsp_loc,'train/complete'))
cmd = 'run_FragGeneScan.pl -genome=' + fasta_file + ' -out='+out_file +' -train=complete'+ ' -complete=0'
if ncpus > 1:
#cmd += ' -p ' + str(ncpus)
cmd += ' -thread=' + str(ncpus)
os.system(cmd)
outfile = out_file + '.faa'
os.system('mv ' + outfile + ' ' + out_file + '.fasta')
print('''------------------------------------------------
Total wall clock runtime (sec): {}
================================================'''.format(
(datetime.now() - starttime).total_seconds()))
sys.stdout.flush()
return(0)
def frag(test_dir, frag_dir, args):
'''
Draw fragments of length l from the fasta file found in the test_dir with
coverage c. Note that there must be a label file of the same basename with
matching ids for each of the fasta lines.
test_dir (string): must be a path to a directory with a single fasta
and label file
frag_dir (string): must be a path to an output directory
Unpacking args:
frag_length (int): length of fragments to be drawn
coverage (float): fraction of times each location is to be covered
by drawn fragments
'''
# Unpack args
frag_length = args.frag_length
coverage = args.coverage
# Finish unpacking args
fasta, labels = get_fasta_and_label(test_dir)
safe_makedirs(frag_dir)
fasta_out = os.path.join(frag_dir, 'test.fragments.fasta')
gi2label_out = os.path.join(frag_dir, 'test.fragments.gi2label')
label_out = os.path.join(frag_dir, 'test.fragments.label')
starttime = datetime.now()
print('''================================================
Drawing fragments
{:%Y-%m-%d %H:%M:%S}
'''.format(starttime) + '''
frag_length = {frag_length}
coverage = {coverage}
------------------------------------------------
Fasta input: {fasta}
labels input: {labels}
Fasta output: {fasta_out}
gi2label output:{gi2label_out}
labels output: {label_out}'''.format(
frag_length=frag_length, coverage=coverage, fasta=fasta,
labels=labels, fasta_out=fasta_out, gi2label_out=gi2label_out,
label_out=label_out)
)
sys.stdout.flush()
# set seed (for reproducibility)
seed = 42
# draw fragments
subprocess.check_call(["drawfrag",
"-i", fasta,
"-t", labels,
"-l", str(frag_length),
"-c", str(coverage),
"-o", fasta_out,
"-g", gi2label_out,
"-s", str(seed)],
env=my_env)
# extract labels
extract_column_two(gi2label_out, label_out)
print('''------------------------------------------------
Total wall clock runtime (sec): {}
================================================'''.format(
(datetime.now() - starttime).total_seconds()))
sys.stdout.flush()
return 0
def train(ref_dir, model_dir, args):
'''Draws fragments from the fasta file found in ref_dir. Note that
there must be a label file of the same basename with matching ids for
each of the fasta lines.
ref_dir (string): must be a path to a directory with a single fasta
and label file
model_dir (string): must be a path to an output directory
Unpacking args:
frag_length (int): length of fragments to be drawn
coverage (float): fraction of times each location is to be covered
by drawn fragments
kmer_length (int): size of k-mers used
rweight (int): how many positions will be randomly chosen in the
contiguous k-mer (k-mer length should be multiple
of row_weight)
num_hash (int): number of hashing functions
num_batches (int): number of times to run vowpal_wabbit
num_passes (int): number of passes within vowpal_wabbit
precise (flag): if set trained model will store probabilities for labels
'''
# Unpack args
frag_length = args.frag_length
coverage = args.coverage
kmer = args.kmer_length
row_weight = args.rweight
hierarchical = args.hweight # only comes into play if > 0
num_hash = args.num_hash
num_batches = args.num_batches
num_passes = args.num_passes
bits = args.bits
lambda1 = args.lambda1
lambda2 = args.lambda2
# Finish unpacking args
fasta, labels = get_fasta_and_label(ref_dir)
starttime = datetime.now()
if kmer % row_weight != 0:
raise ValueError("Row weight [{}] must divide into k-mer length [{}].".format(row_weight, kmer))
if (hierarchical > 0):
if kmer % hierarchical != 0:
raise ValueError("Hierarchy middle level [{}] must divide into k-mer length [{}].".format(hierarchical, kmer))
if hierarchical % row_weight != 0:
raise ValueError("Row weight[{}] must divide into middle hierarchical structure weight [{}].".format(row_weight, hierarchical))
print(
'''================================================
Training using Carnelian + vowpal-wabbit
{:%Y-%m-%d %H:%M:%S}
'''.format(starttime) + '''
frag_length = {frag_length}
coverage: {coverage}
k-mer length: {kmer}'''.format(
frag_length=frag_length,
coverage=coverage,
kmer=kmer))
if hierarchical > 0:
print('''hierarchical: {}'''.format(hierarchical))
print('''row weight: {row_weight}
num hashes: {num_hash}
num batches: {num_batches}
num passes: {num_passes}
------------------------------------------------
Fasta input: {fasta}
labels input: {labels}
------------------------------------------------'''.format(
row_weight=row_weight,
num_hash=num_hash,
num_batches=num_batches,
num_passes=num_passes,
fasta=fasta,
labels=labels)
)
sys.stdout.flush()
num_labels = unique_lines(labels)
print("Number labels: {}".format(num_labels))
sys.stdout.flush()
safe_makedirs(model_dir)
# define output "dictionary" : label <--> vw classes
dico = os.path.join(model_dir, "vw-dico.txt")
# define model prefix
model_prefix = os.path.join(model_dir, "vw-model")
# generate LDPC spaced pattern
pattern_file = os.path.join(model_dir, "patterns.txt")
ldpc.ldpc_write(k=kmer, t=row_weight, _m=num_hash, d=pattern_file)
seed = 42
for i in range(num_batches):
seed = seed + 1
batch_prefix = os.path.join(model_dir, "train.batch-{}".format(i))
fasta_batch = batch_prefix + ".fasta"
gi2label_batch = batch_prefix + ".gi2label"
label_batch = batch_prefix + ".label"
# draw fragments
subprocess.check_call(["drawfrag",
"-i", fasta,
"-t", labels,
"-l", str(frag_length),
"-c", str(coverage),
"-o", fasta_batch,
"-g", gi2label_batch,
"-s", str(seed)],
env=my_env)
# extract labels
extract_column_two(gi2label_batch, label_batch)
#cherry = [line.strip() for line in open(label_batch)]
#print(len(cherry))
print("calling fasta2skm for batch {}".format(i))
# learn model
fasta2skm_param_list = ["fasta2skm",
"-i", fasta_batch,
"-t", label_batch,
"-k", str(kmer),
"-d", dico,
"-p", pattern_file]
print("Getting training set ...")
sys.stdout.flush()
training_list = subprocess.check_output(
fasta2skm_param_list, env=my_env).splitlines()
print("Shuffling training set ...")
sys.stdout.flush()
random.shuffle(training_list)
curr_model = model_prefix + "_batch-{}.model".format(i)
prev_model = model_prefix + "_batch-{}.model".format(i-1) # May not exist if first run
vw_param_base = ["vw",
"--random_seed", str(seed),
"-f", curr_model,
"--cache_file", batch_prefix + ".cache",
"--passes", str(num_passes),
"--save_resume"]
if args.precise:
vw_param_base += ["--loss_function=logistic", "--probabilities"]
vw_param_firstrun = [
"--oaa", str(num_labels),
"--bit_precision", str(bits),
"--l1", str(lambda1),
"--l2", str(lambda2)]
if i > 0:
vw_param_list = vw_param_base + ["-i", prev_model]
else:
vw_param_list = vw_param_base + vw_param_firstrun
print(vw_param_list)
sys.stdout.flush()
vwps = subprocess.Popen(vw_param_list, env=my_env,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
gsp = vwps.communicate(input='\n'.join(training_list))
print(gsp)
while vwps.poll() is None:
l = vwps.stdout.readline()
sys.stdout.write(l)
sys.stdout.flush()
# thread.join() # This shouldn't be necessary, but just being safe.
if i > 0:
os.remove(prev_model)
if i == num_batches - 1:
os.rename(curr_model, model_prefix + "_final.model")
os.remove(batch_prefix + ".cache")
os.remove(fasta_batch)
os.remove(label_batch)
os.remove(gi2label_batch)
print("Finished batch {}".format(i))
print('''------------------------------------------------
Total wall clock runtime (sec): {}
================================================'''.format(
(datetime.now() - starttime).total_seconds()))
sys.stdout.flush()
return 0
def retrain(old_model_dir, new_model_dir, new_examples_dir, args):
'''Draws fragments from the fasta file found in ref_dir. Note that
there must be a label file of the same basename with matching ids for
each of the fasta lines.
old_model_dir (string): must be a path to a directory with old vowpal wabbit model
new_model_dir (string): must be a path to a directory that will contain the new model
new_examples_dir (string): must be a path to a directory containing the new training samples and labels
Unpacking args:
frag_length (int): length of fragments to be drawn
coverage (float): fraction of times each location is to be covered
by drawn fragments
kmer_length (int): size of k-mers used
row_weight (int): how many positions will be randomly chosen in the
contiguous k-mer (k-mer length should be multiple
of row_weight)
num_hash (int): number of hashing functions
num_batches (int): number of times to run vowpal_wabbit
num_passes (int): number of passes within vowpal_wabbit
precise (flag): if set trained model will store probabilities for labels
'''
frag_length = args.frag_length
coverage = args.coverage
kmer = args.kmer_length
num_batches = args.num_batches
num_passes = args.num_passes
fasta, labels = get_fasta_and_label(new_examples_dir)
starttime = datetime.now()
print('''================================================
Retraining using Carnelian + vowpal-wabbit
{:%Y-%m-%d %H:%M:%S}
'''.format(starttime) +
'''num batches: {num_batches}
num passes: {num_passes}
------------------------------------------------
Fasta input: {fasta}
labels input: {labels}
------------------------------------------------'''.format(
num_batches=num_batches,
num_passes=num_passes,
fasta=fasta,
labels=labels)
)
sys.stdout.flush()
num_labels = unique_lines(labels)
print("Number labels: {}".format(num_labels))
sys.stdout.flush()
safe_makedirs(new_model_dir)
old_dico = os.path.join(old_model_dir,"vw-dico.txt")
dico = os.path.join(new_model_dir, "vw-dico.txt")
# define model prefix
prev_model = os.path.join(old_model_dir,"vw-model_final.model")
model_prefix = os.path.join(new_model_dir, "vw-model")
# copy previously used LDPC spaced pattern
old_pattern_file = os.path.join(old_model_dir, "patterns.txt")
pattern_file = os.path.join(new_model_dir, "patterns.txt")
copyfile(old_pattern_file, pattern_file)
seed = 42
for i in range(num_batches):
seed = seed + 1
batch_prefix = os.path.join(new_model_dir, "train.batch-{}".format(i))
fasta_batch = batch_prefix + ".fasta"
gi2label_batch = batch_prefix + ".gi2label"
label_batch = batch_prefix + ".label"
# draw fragments
subprocess.check_call(["drawfrag",
"-i", fasta,
"-t", labels,
"-l", str(frag_length),
"-c", str(coverage),
"-o", fasta_batch,
"-g", gi2label_batch,
"-s", str(seed)],
env=my_env)
# extract labels
extract_column_two(gi2label_batch, label_batch)
# learn model
fasta2skm_param_list = ["fasta2skm",
"-i", fasta_batch,
"-t", label_batch,
"-k", str(kmer),
"-d", dico,
"-p", pattern_file]
print("Getting new training examples ...")
sys.stdout.flush()
training_list = subprocess.check_output(
fasta2skm_param_list, env=my_env).splitlines()
#print(training_list)
print("Shuffling training set ...")
sys.stdout.flush()
random.shuffle(training_list)
curr_model = model_prefix + "_batch-{}.model".format(i)
if i > 0:
prev_model = model_prefix + "_batch-{}.model".format(i-1) # May not exist if first run
vw_param_base = ["vw",
"--random_seed", str(seed),
"-f", curr_model,
"--cache_file", batch_prefix + ".cache",
"--passes", str(num_passes),
"--save_resume"]
if args.precise:
vw_param_base += ["--loss_function=logistic", "--probabilities"]
vw_param_list = vw_param_base + ["-i", prev_model]
print(vw_param_list)
sys.stdout.flush()
vwps = subprocess.Popen(vw_param_list, env=my_env,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
gsp = vwps.communicate(input='\n'.join(training_list))
print(gsp)
while vwps.poll() is None:
l = vwps.stdout.readline()
sys.stdout.write(l)
sys.stdout.flush()
#thread.join() # This shouldn't be necessary, but just being safe.
if i > 0:
os.remove(prev_model)
if i == num_batches - 1:
os.rename(curr_model, model_prefix + "_final.model")
os.remove(batch_prefix + ".cache")
os.remove(fasta_batch)
os.remove(label_batch)
os.remove(gi2label_batch)
merge_dico(old_dico,dico)
print('''------------------------------------------------
Total wall clock runtime (sec): {}
================================================'''.format(
(datetime.now() - starttime).total_seconds()))
sys.stdout.flush()
return 0
def predict(model_dir, test_dir, predict_dir, args):
'''Predicts functional | |
("prop", "1918:397"): "metadataonly",
("prop", "1918:398"): "metadataonly",
("prop", "1918:399"): "metadataonly",
("prop", "1918:4"): "metadataonly",
("prop", "1918:40"): "metadataonly",
("prop", "1918:400"): "metadataonly",
("prop", "1918:401"): "metadataonly",
("prop", "1918:402"): "metadataonly",
("prop", "1918:403"): "metadataonly",
("prop", "1918:404"): "metadataonly",
("prop", "1918:406"): "metadataonly",
("prop", "1918:407"): "metadataonly",
("prop", "1918:408"): "metadataonly",
("prop", "1918:409"): "metadataonly",
("prop", "1918:41"): "metadataonly",
("prop", "1918:410"): "metadataonly",
("prop", "1918:411"): "metadataonly",
("prop", "1918:412"): "metadataonly",
("prop", "1918:413"): "metadataonly",
("prop", "1918:414"): "metadataonly",
("prop", "1918:415"): "metadataonly",
("prop", "1918:416"): "metadataonly",
("prop", "1918:417"): "metadataonly",
("prop", "1918:418"): "metadataonly",
("prop", "1918:419"): "metadataonly",
("prop", "1918:42"): "metadataonly",
("prop", "1918:420"): "metadataonly",
("prop", "1918:421"): "metadataonly",
("prop", "1918:422"): "metadataonly",
("prop", "1918:423"): "metadataonly",
("prop", "1918:424"): "metadataonly",
("prop", "1918:425"): "metadataonly",
("prop", "1918:426"): "metadataonly",
("prop", "1918:427"): "metadataonly",
("prop", "1918:428"): "metadataonly",
("prop", "1918:429"): "metadataonly",
("prop", "1918:43"): "metadataonly",
("prop", "1918:430"): "metadataonly",
("prop", "1918:431"): "metadataonly",
("prop", "1918:432"): "metadataonly",
("prop", "1918:433"): "metadataonly",
("prop", "1918:435"): "metadataonly",
("prop", "1918:436"): "metadataonly",
("prop", "1918:437"): "metadataonly",
("prop", "1918:438"): "metadataonly",
("prop", "1918:439"): "metadataonly",
("prop", "1918:44"): "metadataonly",
("prop", "1918:440"): "metadataonly",
("prop", "1918:441"): "metadataonly",
("prop", "1918:442"): "metadataonly",
("prop", "1918:443"): "metadataonly",
("prop", "1918:444"): "metadataonly",
("prop", "1918:445"): "metadataonly",
("prop", "1918:446"): "metadataonly",
("prop", "1918:448"): "metadataonly",
("prop", "1918:449"): "metadataonly",
("prop", "1918:45"): "metadataonly",
("prop", "1918:450"): "metadataonly",
("prop", "1918:451"): "metadataonly",
("prop", "1918:452"): "metadataonly",
("prop", "1918:453"): "metadataonly",
("prop", "1918:454"): "metadataonly",
("prop", "1918:455"): "metadataonly",
("prop", "1918:456"): "metadataonly",
("prop", "1918:457"): "metadataonly",
("prop", "1918:458"): "metadataonly",
("prop", "1918:459"): "metadataonly",
("prop", "1918:46"): "metadataonly",
("prop", "1918:47"): "metadataonly",
("prop", "1918:48"): "metadataonly",
("prop", "1918:49"): "metadataonly",
("prop", "1918:5"): "metadataonly",
("prop", "1918:50"): "metadataonly",
("prop", "1918:51"): "metadataonly",
("prop", "1918:52"): "metadataonly",
("prop", "1918:53"): "metadataonly",
("prop", "1918:54"): "metadataonly",
("prop", "1918:55"): "metadataonly",
("prop", "1918:56"): "metadataonly",
("prop", "1918:57"): "metadataonly",
("prop", "1918:6"): "metadataonly",
("prop", "1918:60"): "metadataonly",
("prop", "1918:61"): "metadataonly",
("prop", "1918:62"): "metadataonly",
("prop", "1918:63"): "metadataonly",
("prop", "1918:64"): "metadataonly",
("prop", "1918:65"): "metadataonly",
("prop", "1918:66"): "metadataonly",
("prop", "1918:67"): "metadataonly",
("prop", "1918:68"): "metadataonly",
("prop", "1918:69"): "metadataonly",
("prop", "1918:7"): "metadataonly",
("prop", "1918:70"): "metadataonly",
("prop", "1918:71"): "metadataonly",
("prop", "1918:72"): "metadataonly",
("prop", "1918:73"): "metadataonly",
("prop", "1918:74"): "metadataonly",
("prop", "1918:75"): "metadataonly",
("prop", "1918:76"): "metadataonly",
("prop", "1918:77"): "metadataonly",
("prop", "1918:78"): "metadataonly",
("prop", "1918:79"): "metadataonly",
("prop", "1918:8"): "metadataonly",
("prop", "1918:81"): "metadataonly",
("prop", "1918:82"): "metadataonly",
("prop", "1918:83"): "metadataonly",
("prop", "1918:84"): "metadataonly",
("prop", "1918:85"): "metadataonly",
("prop", "1918:86"): "metadataonly",
("prop", "1918:87"): "metadataonly",
("prop", "1918:88"): "metadataonly",
("prop", "1918:89"): "metadataonly",
("prop", "1918:9"): "metadataonly",
("prop", "1918:90"): "metadataonly",
("prop", "1918:91"): "metadataonly",
("prop", "1918:92"): "metadataonly",
("prop", "1918:93"): "metadataonly",
("prop", "1918:94"): "metadataonly",
("prop", "1918:95"): "metadataonly",
("prop", "1918:97"): "metadataonly",
("prop", "1918:98"): "metadataonly",
("prop", "1918:99"): "metadataonly",
("prop", "1919:1"): "metadataonly",
("prop", "1919:10"): "metadataonly",
("prop", "1919:100"): "metadataonly",
("prop", "1919:101"): "metadataonly",
("prop", "1919:102"): "metadataonly",
("prop", "1919:103"): "metadataonly",
("prop", "1919:104"): "metadataonly",
("prop", "1919:105"): "metadataonly",
("prop", "1919:106"): "metadataonly",
("prop", "1919:107"): "metadataonly",
("prop", "1919:108"): "metadataonly",
("prop", "1919:109"): "metadataonly",
("prop", "1919:11"): "metadataonly",
("prop", "1919:110"): "metadataonly",
("prop", "1919:111"): "metadataonly",
("prop", "1919:112"): "metadataonly",
("prop", "1919:113"): "metadataonly",
("prop", "1919:114"): "metadataonly",
("prop", "1919:116"): "metadataonly",
("prop", "1919:117"): "metadataonly",
("prop", "1919:118"): "metadataonly",
("prop", "1919:119"): "metadataonly",
("prop", "1919:12"): "metadataonly",
("prop", "1919:120"): "metadataonly",
("prop", "1919:121"): "metadataonly",
("prop", "1919:122"): "metadataonly",
("prop", "1919:123"): "metadataonly",
("prop", "1919:124"): "metadataonly",
("prop", "1919:125"): "metadataonly",
("prop", "1919:126"): "metadataonly",
("prop", "1919:127"): "metadataonly",
("prop", "1919:128"): "metadataonly",
("prop", "1919:129"): "metadataonly",
("prop", "1919:13"): "metadataonly",
("prop", "1919:130"): "metadataonly",
("prop", "1919:131"): "metadataonly",
("prop", "1919:132"): "metadataonly",
("prop", "1919:133"): "metadataonly",
("prop", "1919:134"): "metadataonly",
("prop", "1919:135"): "metadataonly",
("prop", "1919:136"): "metadataonly",
("prop", "1919:137"): "metadataonly",
("prop", "1919:138"): "metadataonly",
("prop", "1919:139"): "metadataonly",
("prop", "1919:14"): "metadataonly",
("prop", "1919:140"): "metadataonly",
("prop", "1919:141"): "metadataonly",
("prop", "1919:142"): "metadataonly",
("prop", "1919:143"): "metadataonly",
("prop", "1919:144"): "metadataonly",
("prop", "1919:145"): "metadataonly",
("prop", "1919:146"): "metadataonly",
("prop", "1919:147"): "metadataonly",
("prop", "1919:148"): "metadataonly",
("prop", "1919:149"): "metadataonly",
("prop", "1919:15"): "metadataonly",
("prop", "1919:150"): "metadataonly",
("prop", "1919:152"): "metadataonly",
("prop", "1919:153"): "metadataonly",
("prop", "1919:154"): "metadataonly",
("prop", "1919:155"): "metadataonly",
("prop", "1919:156"): "metadataonly",
("prop", "1919:157"): "metadataonly",
("prop", "1919:158"): "metadataonly",
("prop", "1919:159"): "metadataonly",
("prop", "1919:16"): "metadataonly",
("prop", "1919:160"): "metadataonly",
("prop", "1919:162"): "metadataonly",
("prop", "1919:163"): "metadataonly",
("prop", "1919:164"): "metadataonly",
("prop", "1919:165"): "metadataonly",
("prop", "1919:166"): "metadataonly",
("prop", "1919:167"): "metadataonly",
("prop", "1919:168"): "metadataonly",
("prop", "1919:169"): "metadataonly",
("prop", "1919:17"): "metadataonly",
("prop", "1919:170"): "metadataonly",
("prop", "1919:171"): "metadataonly",
("prop", "1919:172"): "metadataonly",
("prop", "1919:173"): "metadataonly",
("prop", "1919:174"): "metadataonly",
("prop", "1919:176"): "metadataonly",
("prop", "1919:178"): "metadataonly",
("prop", "1919:179"): "metadataonly",
("prop", "1919:18"): "metadataonly",
("prop", "1919:180"): "metadataonly",
("prop", "1919:181"): "metadataonly",
("prop", "1919:182"): "metadataonly",
("prop", "1919:183"): "metadataonly",
("prop", "1919:184"): "metadataonly",
("prop", "1919:185"): "metadataonly",
("prop", "1919:186"): "metadataonly",
("prop", "1919:187"): "metadataonly",
("prop", "1919:188"): "metadataonly",
("prop", "1919:189"): "metadataonly",
("prop", "1919:19"): "metadataonly",
("prop", "1919:190"): "metadataonly",
("prop", "1919:191"): "metadataonly",
("prop", "1919:192"): "metadataonly",
("prop", "1919:194"): "metadataonly",
("prop", "1919:195"): "metadataonly",
("prop", "1919:197"): "metadataonly",
("prop", "1919:198"): "metadataonly",
("prop", "1919:199"): "metadataonly",
("prop", "1919:2"): "metadataonly",
("prop", "1919:20"): "metadataonly",
("prop", "1919:200"): "metadataonly",
("prop", "1919:201"): "metadataonly",
("prop", "1919:202"): "metadataonly",
("prop", "1919:203"): "metadataonly",
("prop", "1919:204"): "metadataonly",
("prop", "1919:205"): "metadataonly",
("prop", "1919:206"): "metadataonly",
("prop", "1919:207"): "metadataonly",
("prop", "1919:208"): "metadataonly",
("prop", "1919:209"): "metadataonly",
("prop", "1919:21"): "metadataonly",
("prop", "1919:210"): "metadataonly",
("prop", "1919:211"): "metadataonly",
("prop", "1919:212"): "metadataonly",
("prop", "1919:213"): "metadataonly",
("prop", "1919:214"): "metadataonly",
("prop", "1919:215"): "metadataonly",
("prop", "1919:216"): "metadataonly",
("prop", "1919:217"): "metadataonly",
("prop", "1919:218"): "metadataonly",
("prop", "1919:219"): "metadataonly",
("prop", "1919:22"): "metadataonly",
("prop", "1919:220"): "metadataonly",
("prop", "1919:221"): "metadataonly",
("prop", "1919:222"): "metadataonly",
("prop", "1919:223"): "metadataonly",
("prop", "1919:224"): "metadataonly",
("prop", "1919:225"): "metadataonly",
("prop", "1919:226"): "metadataonly",
("prop", "1919:227"): "metadataonly",
("prop", "1919:228"): "metadataonly",
("prop", "1919:229"): "metadataonly",
("prop", "1919:23"): "metadataonly",
("prop", "1919:231"): "metadataonly",
("prop", "1919:233"): "metadataonly",
("prop", "1919:234"): "metadataonly",
("prop", "1919:235"): "metadataonly",
("prop", "1919:236"): "metadataonly",
("prop", "1919:237"): "metadataonly",
("prop", "1919:238"): "metadataonly",
("prop", "1919:239"): "metadataonly",
("prop", "1919:24"): "metadataonly",
("prop", "1919:240"): "metadataonly",
("prop", "1919:241"): "metadataonly",
("prop", "1919:242"): "metadataonly",
("prop", "1919:244"): "metadataonly",
("prop", "1919:245"): "metadataonly",
("prop", "1919:246"): "metadataonly",
("prop", "1919:248"): "metadataonly",
("prop", "1919:249"): "metadataonly",
("prop", "1919:25"): "metadataonly",
("prop", "1919:250"): "metadataonly",
("prop", "1919:251"): "metadataonly",
("prop", "1919:252"): "metadataonly",
("prop", "1919:253"): "metadataonly",
("prop", "1919:254"): "metadataonly",
("prop", "1919:255"): "metadataonly",
("prop", "1919:256"): "metadataonly",
("prop", "1919:257"): "metadataonly",
("prop", "1919:258"): "metadataonly",
("prop", "1919:26"): "metadataonly",
("prop", "1919:260"): "metadataonly",
("prop", "1919:261"): "metadataonly",
("prop", "1919:262"): "metadataonly",
("prop", "1919:263"): "metadataonly",
("prop", "1919:264"): "metadataonly",
("prop", "1919:265"): "metadataonly",
("prop", "1919:266"): "metadataonly",
("prop", "1919:267"): "metadataonly",
("prop", "1919:268"): "metadataonly",
("prop", "1919:27"): "metadataonly",
("prop", "1919:270"): "metadataonly",
("prop", "1919:271"): "metadataonly",
("prop", "1919:272"): "metadataonly",
("prop", "1919:273"): "metadataonly",
("prop", "1919:274"): "metadataonly",
("prop", "1919:275"): "metadataonly",
("prop", "1919:276"): "metadataonly",
("prop", "1919:277"): "metadataonly",
("prop", "1919:278"): "metadataonly",
("prop", "1919:279"): "metadataonly",
("prop", "1919:28"): "metadataonly",
("prop", "1919:280"): "metadataonly",
("prop", "1919:281"): "metadataonly",
("prop", "1919:283"): "metadataonly",
("prop", "1919:284"): "metadataonly",
("prop", "1919:285"): "metadataonly",
("prop", "1919:286"): "metadataonly",
("prop", "1919:287"): "metadataonly",
("prop", "1919:288"): "metadataonly",
("prop", "1919:289"): "metadataonly",
("prop", "1919:29"): "metadataonly",
("prop", "1919:291"): "metadataonly",
("prop", "1919:292"): "metadataonly",
("prop", "1919:293"): "metadataonly",
("prop", "1919:294"): "metadataonly",
("prop", "1919:295"): "metadataonly",
("prop", "1919:296"): "metadataonly",
("prop", "1919:297"): "metadataonly",
("prop", "1919:298"): "metadataonly",
("prop", "1919:299"): "metadataonly",
("prop", "1919:3"): "metadataonly",
("prop", "1919:30"): "metadataonly",
("prop", "1919:300"): "metadataonly",
("prop", "1919:301"): "metadataonly",
("prop", "1919:302"): "metadataonly",
("prop", "1919:303"): "metadataonly",
("prop", "1919:304"): "metadataonly",
("prop", "1919:305"): "metadataonly",
("prop", "1919:306"): "metadataonly",
("prop", "1919:307"): "metadataonly",
("prop", "1919:308"): "metadataonly",
("prop", "1919:309"): "metadataonly",
("prop", "1919:31"): "metadataonly",
("prop", "1919:311"): "metadataonly",
("prop", "1919:312"): "metadataonly",
("prop", "1919:313"): "metadataonly",
("prop", "1919:314"): "metadataonly",
("prop", "1919:315"): "metadataonly",
("prop", "1919:316"): "metadataonly",
("prop", "1919:317"): "metadataonly",
("prop", "1919:318"): "metadataonly",
("prop", "1919:319"): "metadataonly",
("prop", "1919:320"): "metadataonly",
("prop", "1919:321"): "metadataonly",
("prop", "1919:322"): "metadataonly",
("prop", "1919:323"): "metadataonly",
("prop", "1919:324"): "metadataonly",
("prop", "1919:325"): "metadataonly",
("prop", "1919:326"): "metadataonly",
("prop", "1919:327"): "metadataonly",
("prop", "1919:328"): "metadataonly",
("prop", "1919:329"): "metadataonly",
("prop", "1919:330"): "metadataonly",
("prop", "1919:331"): "metadataonly",
("prop", "1919:332"): "metadataonly",
("prop", "1919:333"): "metadataonly",
("prop", "1919:334"): "metadataonly",
("prop", "1919:335"): "metadataonly",
("prop", "1919:336"): "metadataonly",
("prop", "1919:337"): "metadataonly",
("prop", "1919:338"): "metadataonly",
("prop", "1919:339"): "metadataonly",
("prop", "1919:34"): "metadataonly",
("prop", "1919:340"): "metadataonly",
("prop", "1919:341"): "metadataonly",
("prop", "1919:342"): "metadataonly",
("prop", "1919:343"): "metadataonly",
("prop", "1919:344"): "metadataonly",
("prop", "1919:345"): "metadataonly",
("prop", "1919:346"): "metadataonly",
("prop", "1919:347"): "metadataonly",
("prop", "1919:348"): "metadataonly",
("prop", "1919:349"): "metadataonly",
("prop", "1919:35"): "metadataonly",
("prop", "1919:351"): "metadataonly",
("prop", "1919:352"): "metadataonly",
("prop", "1919:353"): "metadataonly",
("prop", "1919:355"): "metadataonly",
("prop", "1919:356"): "metadataonly",
("prop", "1919:357"): "metadataonly",
("prop", "1919:359"): "metadataonly",
("prop", "1919:36"): "metadataonly",
("prop", "1919:360"): "metadataonly",
("prop", "1919:361"): "metadataonly",
("prop", "1919:362"): "metadataonly",
("prop", "1919:363"): "metadataonly",
("prop", "1919:365"): "metadataonly",
("prop", "1919:366"): "metadataonly",
("prop", "1919:367"): "metadataonly",
("prop", "1919:368"): "metadataonly",
("prop", "1919:369"): "metadataonly",
("prop", "1919:37"): "metadataonly",
("prop", "1919:370"): "metadataonly",
("prop", "1919:371"): "metadataonly",
("prop", "1919:372"): "metadataonly",
("prop", "1919:373"): "metadataonly",
("prop", "1919:374"): "metadataonly",
("prop", "1919:375"): "metadataonly",
("prop", "1919:376"): "metadataonly",
("prop", "1919:377"): "metadataonly",
("prop", "1919:378"): "metadataonly",
("prop", "1919:379"): "metadataonly",
("prop", "1919:38"): "metadataonly",
("prop", "1919:380"): "metadataonly",
("prop", "1919:381"): "metadataonly",
("prop", "1919:382"): "metadataonly",
("prop", "1919:383"): "metadataonly",
("prop", "1919:384"): "metadataonly",
("prop", "1919:385"): "metadataonly",
("prop", "1919:386"): "metadataonly",
("prop", "1919:387"): "metadataonly",
("prop", "1919:388"): "metadataonly",
("prop", "1919:389"): "metadataonly",
("prop", "1919:39"): "metadataonly",
("prop", "1919:390"): "metadataonly",
("prop", "1919:391"): "metadataonly",
("prop", "1919:392"): "metadataonly",
("prop", "1919:393"): | |
"""
Aqualink API documentation
The Aqualink public API documentation # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from aqualink_sdk.api_client import ApiClient, Endpoint as _Endpoint
from aqualink_sdk.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from aqualink_sdk.model.create_user_dto import CreateUserDto
from aqualink_sdk.model.inline_response404 import InlineResponse404
from aqualink_sdk.model.set_admin_level_dto import SetAdminLevelDto
from aqualink_sdk.model.site import Site
from aqualink_sdk.model.user import User
class UsersApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.users_controller_create_endpoint = _Endpoint(
settings={
'response_type': (User,),
'auth': [],
'endpoint_path': '/users',
'operation_id': 'users_controller_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'create_user_dto',
],
'required': [
'create_user_dto',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'create_user_dto':
(CreateUserDto,),
},
'attribute_map': {
},
'location_map': {
'create_user_dto': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.users_controller_delete_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'bearer'
],
'endpoint_path': '/users/{id}',
'operation_id': 'users_controller_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(float,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.users_controller_get_administered_sites_endpoint = _Endpoint(
settings={
'response_type': ([Site],),
'auth': [
'bearer'
],
'endpoint_path': '/users/current/administered-sites',
'operation_id': 'users_controller_get_administered_sites',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.users_controller_get_self_endpoint = _Endpoint(
settings={
'response_type': (User,),
'auth': [
'bearer'
],
'endpoint_path': '/users/current',
'operation_id': 'users_controller_get_self',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.users_controller_set_admin_level_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'bearer'
],
'endpoint_path': '/users/{id}/level',
'operation_id': 'users_controller_set_admin_level',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'id',
'set_admin_level_dto',
],
'required': [
'id',
'set_admin_level_dto',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(float,),
'set_admin_level_dto':
(SetAdminLevelDto,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'set_admin_level_dto': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def users_controller_create(
self,
create_user_dto,
**kwargs
):
"""Creates a new user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.users_controller_create(create_user_dto, async_req=True)
>>> result = thread.get()
Args:
create_user_dto (CreateUserDto):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
User
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['create_user_dto'] = \
create_user_dto
return self.users_controller_create_endpoint.call_with_http_info(**kwargs)
def users_controller_delete(
self,
id,
**kwargs
):
"""Deletes specified user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.users_controller_delete(id, async_req=True)
>>> result = thread.get()
Args:
id (float):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.users_controller_delete_endpoint.call_with_http_info(**kwargs)
def users_controller_get_administered_sites(
self,
**kwargs
):
"""Returns the administered sites of the signed in user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.users_controller_get_administered_sites(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Site]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
| |
elif which == 'tick1':
# for cartesian projection, this is bottom spine
return self.spines['bottom'].get_spine_transform()
elif which == 'tick2':
# for cartesian projection, this is top spine
return self.spines['top'].get_spine_transform()
else:
raise ValueError('unknown value for which')
def get_xaxis_text1_transform(self, pad_points):
"""
Returns
-------
transform : Transform
The transform used for drawing x-axis labels, which will add
*pad_points* of padding (in points) between the axes and the label.
The x-direction is in data coordinates and the y-direction is in
axis corrdinates
valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'}
The text vertical alignment.
halign : {'center', 'left', 'right'}
The text horizontal alignment.
Notes
-----
This transformation is primarily used by the `~matplotlib.axis.Axis`
class, and is meant to be overridden by new kinds of projections that
may need to place axis elements in different locations.
"""
labels_align = rcParams["xtick.alignment"]
return (self.get_xaxis_transform(which='tick1') +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72,
self.figure.dpi_scale_trans),
"top", labels_align)
def get_xaxis_text2_transform(self, pad_points):
"""
Returns
-------
transform : Transform
The transform used for drawing secondary x-axis labels, which will
add *pad_points* of padding (in points) between the axes and the
label. The x-direction is in data coordinates and the y-direction
is in axis corrdinates
valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'}
The text vertical alignment.
halign : {'center', 'left', 'right'}
The text horizontal alignment.
Notes
-----
This transformation is primarily used by the `~matplotlib.axis.Axis`
class, and is meant to be overridden by new kinds of projections that
may need to place axis elements in different locations.
"""
labels_align = rcParams["xtick.alignment"]
return (self.get_xaxis_transform(which='tick2') +
mtransforms.ScaledTranslation(0, pad_points / 72,
self.figure.dpi_scale_trans),
"bottom", labels_align)
def get_yaxis_transform(self, which='grid'):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
if which == 'grid':
return self._yaxis_transform
elif which == 'tick1':
# for cartesian projection, this is bottom spine
return self.spines['left'].get_spine_transform()
elif which == 'tick2':
# for cartesian projection, this is top spine
return self.spines['right'].get_spine_transform()
else:
raise ValueError('unknown value for which')
def get_yaxis_text1_transform(self, pad_points):
"""
Returns
-------
transform : Transform
The transform used for drawing y-axis labels, which will add
*pad_points* of padding (in points) between the axes and the label.
The x-direction is in axis coordinates and the y-direction is in
data corrdinates
valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'}
The text vertical alignment.
halign : {'center', 'left', 'right'}
The text horizontal alignment.
Notes
-----
This transformation is primarily used by the `~matplotlib.axis.Axis`
class, and is meant to be overridden by new kinds of projections that
may need to place axis elements in different locations.
"""
labels_align = rcParams["ytick.alignment"]
return (self.get_yaxis_transform(which='tick1') +
mtransforms.ScaledTranslation(-1 * pad_points / 72, 0,
self.figure.dpi_scale_trans),
labels_align, "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Returns
-------
transform : Transform
The transform used for drawing secondart y-axis labels, which will
add *pad_points* of padding (in points) between the axes and the
label. The x-direction is in axis coordinates and the y-direction
is in data corrdinates
valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'}
The text vertical alignment.
halign : {'center', 'left', 'right'}
The text horizontal alignment.
Notes
-----
This transformation is primarily used by the `~matplotlib.axis.Axis`
class, and is meant to be overridden by new kinds of projections that
may need to place axis elements in different locations.
"""
labels_align = rcParams["ytick.alignment"]
return (self.get_yaxis_transform(which='tick2') +
mtransforms.ScaledTranslation(pad_points / 72, 0,
self.figure.dpi_scale_trans),
labels_align, "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
for line in getattr(self, "lines", []): # Not set during init.
try:
line._transformed_path.invalidate()
except AttributeError:
pass
def get_position(self, original=False):
"""
Get a copy of the axes rectangle as a `.Bbox`.
Parameters
----------
original : bool
If ``True``, return the original position. Otherwise return the
active position. For an explanation of the positions see
`.set_position`.
Returns
-------
pos : `.Bbox`
"""
if original:
return self._originalPosition.frozen()
else:
locator = self.get_axes_locator()
if not locator:
self.apply_aspect()
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position.
Axes have two position attributes. The 'original' position is the
position allocated for the Axes. The 'active' position is the
position the Axes is actually drawn at. These positions are usually
the same unless a fixed aspect is set to the Axes. See `.set_aspect`
for details.
Parameters
----------
pos : [left, bottom, width, height] or `~matplotlib.transforms.Bbox`
The new position of the in `.Figure` coordinates.
which : {'both', 'active', 'original'}, optional
Determines which position variables to change.
"""
self._set_position(pos, which=which)
# because this is being called externally to the library we
# zero the constrained layout parts.
self._layoutbox = None
self._poslayoutbox = None
def _set_position(self, pos, which='both'):
"""
private version of set_position. Call this internally
to get the same functionality of `get_position`, but not
to take the axis out of the constrained_layout
hierarchy.
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
for ax in self._twinned_axes.get_siblings(self):
if which in ('both', 'active'):
ax._position.set(pos)
if which in ('both', 'original'):
ax._originalPosition.set(pos)
self.stale = True
def reset_position(self):
"""
Reset the active position to the original position.
This resets the a possible position change due to aspect constraints.
For an explanation of the positions see `.set_position`.
"""
for ax in self._twinned_axes.get_siblings(self):
pos = ax.get_position(original=True)
ax.set_position(pos, which='active')
def set_axes_locator(self, locator):
"""
Set the axes locator.
Parameters
----------
locator : Callable[[Axes, Renderer], Bbox]
"""
self._axes_locator = locator
self.stale = True
def get_axes_locator(self):
"""
Return the axes_locator.
"""
return self._axes_locator
def _set_artist_props(self, a):
"""set the boilerplate props for artists added to axes"""
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.axes = self
if a.mouseover:
self._mouseover_set.add(a)
def _gen_axes_patch(self):
"""
Returns
-------
Patch
The patch used to draw the background of the axes. It is also used
as the clipping path for any data elements on the axes.
In the standard axes, this is a rectangle, but in other projections
it may not be.
Notes
-----
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def _gen_axes_spines(self, locations=None, offset=0.0, units='inches'):
"""
Returns
-------
dict
Mapping of spine names to `Line2D` or `Patch` instances that are
used to draw axes spines.
In the standard axes, spines are single line segments, but in other
projections they may not be.
Notes
-----
Intended to be overridden by new projection types.
"""
return OrderedDict((side, mspines.Spine.linear_spine(self, side))
for side in ['left', 'right', 'bottom', 'top'])
def cla(self):
"""Clear the current axes."""
# Note: this is called by Axes.__init__()
# stash the current visibility state
if hasattr(self, 'patch'):
patch_visible = self.patch.get_visible()
else:
patch_visible = True
xaxis_visible = self.xaxis.get_visible()
yaxis_visible = self.yaxis.get_visible()
self.xaxis.cla()
self.yaxis.cla()
for name, spine in self.spines.items():
spine.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry()
if self._sharex is not None:
# major and minor are axis.Ticker class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False,
auto=self._sharex.get_autoscalex_on())
self.xaxis._scale = self._sharex.xaxis._scale
else:
self.xaxis._set_scale('linear')
try:
self.set_xlim(0, 1)
except TypeError:
pass
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False,
auto=self._sharey.get_autoscaley_on())
self.yaxis._scale = self._sharey.yaxis._scale
else:
self.yaxis._set_scale('linear')
try:
self.set_ylim(0, 1)
except TypeError:
pass
# update the minor locator for x and y axis based on rcParams
if rcParams['xtick.minor.visible']:
self.xaxis.set_minor_locator(mticker.AutoMinorLocator())
if rcParams['ytick.minor.visible']:
self.yaxis.set_minor_locator(mticker.AutoMinorLocator())
if self._sharex is None:
self._autoscaleXon = True
if self._sharey is None:
self._autoscaleYon = True
self._xmargin = rcParams['axes.xmargin']
self._ymargin = rcParams['axes.ymargin']
self._tight = None
self._use_sticky_edges = True
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self._mouseover_set = _OrderedSet()
self.child_axes = []
self._current_image = None # strictly for pyplot via _sci, _gci
self.legend_ = None
self.collections = [] # collection.Collection instances
self.containers = []
self.grid(False) # Disable grid on init to | |
<reponame>iagcl/data_pipeline
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: applier
# Purpose: Applies CDCs polled from Kafka queue to a target DB
#
# Notes:
#
###############################################################################
import confluent_kafka
import logging
import os
import sys
import time
import yaml
import data_pipeline.audit.connection_factory as audit_conn_factory
import data_pipeline.constants.const as const
import data_pipeline.logger.logging_loader as logging_loader
import data_pipeline.sql.utils as sql_utils
import data_pipeline.utils.dbuser as dbuser
import data_pipeline.utils.filesystem as filesystem_utils
import data_pipeline.utils.mailer as mailer
from .exceptions import ApplyError
from abc import ABCMeta, abstractmethod
from data_pipeline.audit.audit_dao import SourceSystemProfile
from data_pipeline.audit.factory import AuditFactory, get_audit_db
from data_pipeline.common import SignalHandler
from data_pipeline.stream.file_writer import FileWriter
from data_pipeline.processor.exceptions import UnsupportedSqlError
from data_pipeline.utils.args import get_program_args
LSN = "lsn"
SQL = "sql"
OFFSET = "offset"
def _is_start_of_batch(message):
return message.record_type == const.START_OF_BATCH
def _is_end_of_batch(message):
return message.record_type == const.END_OF_BATCH
def _is_kill(message):
return message.record_type == const.KILL
def get_inactive_applied_tables(audit_conn_details, argv, logger):
sql = """
SELECT profile_name, target_region, object_name
FROM {audit_schema}.source_system_profile
WHERE 1 = 1
AND profile_name = %s
AND version = %s
AND COALESCE(applied_ind, 'Y') = 'N'
ORDER BY object_seq""".format(audit_schema=argv.auditschema)
bind_values = (argv.profilename, argv.profileversion)
with get_audit_db(argv) as audit_db:
logger.debug("Executing: {sql}\nBind values = {bind_values}"
.format(sql=sql, bind_values=bind_values))
result = audit_db.execute_query(sql, argv.arraysize, bind_values)
tables = set()
for row in result:
schema = const.EMPTY_STRING
if row[1]:
schema = "{schema}.".format(schema=row[1])
if row[2]:
tables.add("{schema}{table}"
.format(schema=schema, table=row[2])
.lower())
logger.debug("Following tables will not be applied: {tables}"
.format(tables=tables))
return tables
class CdcApplyRecord:
def __init__(self, executor_run_id, executor_status, status):
self.executor_run_id = executor_run_id
self.executor_status = executor_status
self.status = status
class BulkOperation(object):
def __init__(self):
self._buff = {}
self.reset()
def keys(self):
return self._buff.keys()
def items(self):
return self._buff.items()
def empty(self):
return self.max_count == 0
def __getitem__(self, key):
return self._buff[key]
def reset(self):
self.max_count = 0
self.max_lsn = 0
self.max_offset = 0
self.start_offset = 0
self.statement_type = None
def add(self, statement, commit_lsn, offset):
if (self.statement_type and
self.statement_type != statement.statement_type):
raise Exception("Attempting to add statement with operation: "
"{statement_op} but current bulk operation is for "
"opertion: {bulk_op}"
.format(statement_op=statement.statement_type,
bulk_op=self.statement_type))
self.statement_type = statement.statement_type
if self.empty():
self.start_offset = offset
statements = self._buff.setdefault(statement.table_name, [])
statements.append((statement, commit_lsn, offset))
self.max_count = max(self.max_count, len(statements))
self.max_lsn = max(self.max_lsn, commit_lsn)
self.max_offset = max(self.max_offset, offset)
def __str__(self):
return str(self._buff)
class Applier(SignalHandler):
__metaclass__ = ABCMeta
def __init__(self, mode, target_db, argv, audit_factory, source_processor):
super(Applier, self).__init__(mode, argv, audit_factory)
self._target_db = target_db
self._source_processor = source_processor
self._output_file = None
self._batch_started = False
self._target_conn_details = dbuser.get_dbuser_properties(
argv.targetuser)
self._target_conn_details.sslmode = self._argv.sslmode
self._target_conn_details.sslcert = self._argv.sslcert
self._target_conn_details.sslrootcert = self._argv.sslrootcert
self._target_conn_details.sslkey = self._argv.sslkey
self._target_conn_details.sslcrl = self._argv.sslcrl
self._init()
self._init_auditing()
self._init_output_file()
self._maxlsns_per_table = {}
self._delta_maxlsns_per_table = {}
self._get_max_lsn_source_system_profile()
self._last_apply_record = None
self._get_last_apply_record()
self._first_batch_received = False
self._skip_batches = int(self._argv.skipbatch)
stream = file(argv.datatypemap)
self._config = yaml.load(stream)
self._stream_message = None
self._bulk_ops = BulkOperation()
self._committed_state = {}
self._last_executed_state = {}
self._last_committed_state = None
@property
def recovery_offset(self):
if not self._recovery_offset:
return self.current_message_offset
return self._recovery_offset
@recovery_offset.setter
def recovery_offset(self, value):
self._logger.debug("Setting recovery offset = {}".format(value))
self._recovery_offset = value
@property
def at_auditcommitpoint(self):
return (self._received_count > 0 and
self._received_count % self._argv.auditcommitpoint == 0)
@property
def at_targetcommitpoint(self):
return (self._received_count > 0 and
self._received_count % self._argv.targetcommitpoint == 0)
@property
def next_offset_to_read(self):
# No record previous apply records found
if self._last_apply_record is None:
return None
return self._last_apply_record.executor_run_id
@property
def last_status(self):
# No record previous apply records found
if self._last_apply_record is None:
return None
return self._last_apply_record.status
@property
def current_message_offset(self):
return self._stream_message.offset()
@property
def next_message_offset(self):
return self._stream_message.offset() + 1
def apply(self, stream_message):
self._stream_message = stream_message
message = self._source_processor.deserialise(stream_message.value())
batch_committed = False
retries_remaining = self._argv.retry
while retries_remaining >= 0:
try:
if self._can_apply(message):
if _is_kill(message):
self._log_terminate()
return const.KILLED
elif _is_start_of_batch(message):
self._start_batch(message)
elif _is_end_of_batch(message):
batch_committed = self._end_batch(message)
else:
self._apply_data(message)
if not batch_committed:
self._audit_commit()
self._target_commit(message)
break
except Exception, e:
err_message = "{err}\n".format(err=str(e))
self.report_error(err_message)
if retries_remaining == 0:
return const.ERROR
time.sleep(self._argv.retrypause)
self._logger.info("Retrying apply... remaining retries = {r}"
.format(r=retries_remaining))
retries_remaining -= 1
# Update the next offset to read in case a reassignment is triggered
self._last_apply_record.executor_run_id = self.next_message_offset
return const.COMMITTED if batch_committed else const.UNCOMMITTED
def _log_terminate(self):
self._init_auditing()
warn_message = ("Termination message received. "
"Shutting down consumer.")
self._pc.comment = warn_message
self._pc.status = const.KILLED
self._pc.executor_run_id = self.next_message_offset
self._pc.executor_status = const.COMMITTED
self._pc.update()
self._logger.warn(warn_message)
def _can_apply(self, message):
t = sql_utils.TableName(self._argv.targetschema.lower(),
message.table_name.lower())
if t.fullname in self._inactive_applied_tables:
self._logger.warn("Table {t} marked as inactive for applies. "
"Message will not be applied."
.format(t=t.fullname))
return False
if t.fullname in self._maxlsns_per_table:
if not message.commit_lsn:
self._logger.warn("[{t}] Message LSN is not set for message. "
"Allowing message to be applied "
"to target: {message}"
.format(t=t.fullname,
message=str(message)))
return True
if not self._maxlsns_per_table[t.fullname]:
self._logger.warn("[{t}] Max LSN is not set in "
"source_system_profile table. Allowing "
"message to be applied to target: {message}"
.format(t=t.fullname,
message=str(message)))
return True
message_lsn = int(message.commit_lsn)
max_lsn = int(self._maxlsns_per_table[t.fullname])
self._logger.debug("[{t}] Making sure message LSN ({msglsn}) > "
"Max recorded LSN ({maxlsn})"
.format(t=t.fullname,
msglsn=message_lsn,
maxlsn=max_lsn))
if message_lsn <= max_lsn:
self._logger.warn("[{t}] Message LSN ({msglsn}) <= Max "
"recorded LSN ({maxlsn}). "
"Message will not be applied."
.format(msglsn=message_lsn,
maxlsn=max_lsn,
t=t.fullname))
return False
return True
def _get_last_apply_record(self):
if self._last_apply_record is None:
self._last_apply_record = CdcApplyRecord(
executor_run_id=None,
executor_status=const.SUCCESS,
status=const.SUCCESS
)
if self._argv.seektoend:
self._last_apply_record.executor_run_id = confluent_kafka.OFFSET_END
self._last_apply_record.executor_status = const.SUCCESS
self._last_apply_record.status = const.SUCCESS
return
sql = """
SELECT executor_run_id, executor_status, status
FROM {audit_schema}.process_control
WHERE id = (
SELECT MAX(id)
FROM process_control
WHERE executor_run_id > 0
AND profile_name = %s
AND profile_version = %s
AND process_code = %s
)
""".format(audit_schema=self._argv.auditschema,
committed=const.COMMITTED)
bind_variables = (self._argv.profilename,
self._argv.profileversion,
self._mode)
with get_audit_db(self._argv) as audit_db:
query_results = audit_db.execute_query(
sql, self._argv.arraysize, bind_variables)
row = query_results.fetchone()
if row:
self._last_apply_record.executor_run_id = row[0]
self._last_apply_record.executor_status = row[1]
self._last_apply_record.status = row[2]
self._logger.info("Last committed offset = {offset}"
.format(offset=self.next_offset_to_read))
def _get_max_lsn_source_system_profile(self):
sql = """
SELECT target_region, object_name, max_lsn
FROM {audit_schema}.source_system_profile
WHERE profile_name = %s
AND version = %s
""".format(audit_schema=self._argv.auditschema)
bind_variables = (self._argv.profilename,
self._argv.profileversion)
with get_audit_db(self._argv) as audit_db:
query_results = audit_db.execute_query(
sql, self._argv.arraysize, bind_variables)
for row in query_results:
target_region = row[0].lower()
table_name = row[1].lower()
max_lsn = row[2]
t = sql_utils.TableName(target_region, table_name)
self._logger.debug("Mapping table->max_lsns from "
"source_system_profile: {t}->{l}"
.format(t=t.fullname, l=max_lsn))
if max_lsn is not None:
self._maxlsns_per_table[t.fullname.lower()] = max_lsn
def _apply_data(self, message):
if self._skip_batches < 0:
raise Exception("Invalid state: Skip batches < 0")
elif self._skip_batches == 0:
self._received_count += 1
table = sql_utils.TableName(self._argv.targetschema,
message.table_name)
tablename = table.fullname.lower()
self._delta_maxlsns_per_table[tablename] = message.commit_lsn
if not self._batch_started:
# Insert an implicit start of batch
self._start_batch(message)
if message.table_name:
pcd = self.get_pcd(message.table_name)
pcd.source_row_count += 1
else:
raise ApplyError("table_name has not been "
"specified in message")
try:
statement = self._source_processor.process(message)
if statement:
# Create an entry in source_system_profile if the
# table_name doesn't already exist
self._ensure_table_name_in_ssp(
statement.statement_type, message)
self.execute_statement(statement, message.commit_lsn)
except UnsupportedSqlError, err:
self._logger.warn("Unsupported SQL in {msg}: {error}"
.format(msg=message, error=str(err)))
self._processed_count += 1
else:
self._logger.debug("Skipping message...")
def _ensure_table_name_in_ssp(self, statement_type, message):
if statement_type != const.CREATE:
return
sql = ("""
-- Insert a new source_system_profile record for object
-- '{table_name}' if it doesn't already exist for the current profile
INSERT INTO {schema}.source_system_profile
(profile_name, version, source_system_code, target_region,
object_name, min_lsn, max_lsn, active_ind,
last_process_code, last_status,
last_updated, last_applied, object_seq)
SELECT
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
CURRENT_TIMESTAMP, CURRENT_TIMESTAMP,
( -- Compute the next object_seq in the profile
SELECT MAX(object_seq)+1 FROM {schema}.source_system_profile
WHERE 1 = 1
AND profile_name = %s
AND version = %s
)
WHERE NOT EXISTS ( -- Make this operation idempotent
SELECT * FROM {schema}.source_system_profile
WHERE 1 = 1
AND profile_name = %s
AND version = %s
AND LOWER(target_region) = LOWER(%s)
AND LOWER(object_name) = LOWER(%s)
)""".format(schema=self._argv.auditschema,
table_name=message.table_name))
with get_audit_db(self._argv) as audit_db:
bind_values = (
self._argv.profilename,
self._argv.profileversion,
self._argv.sourcesystem,
self._argv.targetschema,
message.table_name.lower(),
message.commit_lsn,
message.commit_lsn,
'Y',
const.CDCAPPLY,
const.SUCCESS,
self._argv.profilename,
self._argv.profileversion,
self._argv.profilename,
self._argv.profileversion,
self._argv.targetschema,
message.table_name.lower(),
)
affected_rows = audit_db.execute(sql, bind_values)
audit_db.commit()
def report_error(self, err_message):
try:
self._pc.comment = err_message
self._pc.status = const.ERROR
self._pc.update()
subject = ("{source_system} applier has failed. Partial batch "
"(up to error) committed."
.format(source_system=self._argv.profilename))
# More detailed error message for email | |
import random
import numpy as np
import scipy
import time
import json
import os
import pdb
import pickle
import pandas
from progressbar import *
from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot
from keras.models import Sequential, load_model, Model
from keras.optimizers import RMSprop, Adam, SGD
from keras import backend as K
from keras import regularizers
from keras.utils.np_utils import to_categorical
from utils import convnet_vgg, convnet_mod, convnet_ori, convnet_com
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
#return x / np.linalg.norm(x)
def makeFunc(x):
return lambda y:y[:,x]
class BaseListenerNetwork(object):
def __init__(self, modelname, optfilename, lr, entropy_coefficient, config_dict):
self.modelname = modelname
self.optfilename = optfilename
self.lr = lr
self.entropy_coefficient = entropy_coefficient
assert config_dict, "config_dict does not exist"
self.config = config_dict
self.initialize_model()
self.build_train_fn()
def rebuild_train_fn(self, entropy_coefficient=None, lr=None):
if entropy_coefficient:
self.entropy_coefficient = entropy_coefficient
if lr:
self.lr = lr
self.build_train_fn()
def save(self):
self.listener_model.save(self.modelname)
def load(self):
self.listener_model = load_model(self.modelname)
def save_weights(self):
self.listener_model.save_weights(self.modelname)
def load_weights(self):
self.listener_model.load_weights(self.modelname)
def save_opt(self):
symbolic_weights = self.opt.weights
weight_values = K.batch_get_value(symbolic_weights)
with open(self.optfilename, 'wb') as f:
pickle.dump(weight_values, f)
def load_opt(self):
with open(self.optfilename, 'rb') as f:
weight_values = pickle.load(f)
self.opt.set_weights(weight_values)
def save_memory(self):
self.memory_model_weights = self.listener_model.get_weights()
def load_memory(self):
self.listener_model.set_weights(self.memory_model_weights)
class PaperListenerNetwork(BaseListenerNetwork):
def __init__(self, modelname, optfilename, lr, entropy_coefficient, config_dict):
super(PaperListenerNetwork, self).__init__(modelname, optfilename, lr, entropy_coefficient, config_dict)
self.batch_speaker_message = []
self.batch_action = []
self.batch_candidates = []
self.batch_reward = []
def initialize_model(self):
"""
Batch input and output.
"""
if not os.path.exists(self.modelname):
## Define model
t_input = Input(shape=(self.config['max_message_length'],)) #Speakers Message, shape(bs, max_message_length)
c_inputs_all = Input(shape=(self.config['n_classes'], self.config['speaker_input_dim'])) #Candidates, shape(bs, n_class, speaker_input_dim)
inputs = [t_input, c_inputs_all]
z = Dense(self.config['speaker_input_dim'], activation='sigmoid')(t_input) #shape(bs, speaker_input_dim)
ts = []
us = []
for _ in range(self.config['n_classes']):
#c_input = Input(shape=(self.config['speaker_input_dim'],)) #shape(bs, speaker_input_dim)
c_input = Lambda(makeFunc(_))(c_inputs_all) #shape(bs, speaker_input_dim)
#t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x), axis=1)))(add([t_trans, Lambda(lambda x: -x)(c_input)])) #shape(bs, 1)
t = Dot(1, False)([z, c_input]) #shape(bs, 1)
ts.append(t)
us.append(c_input)
U = concatenate(ts) #shape(bs, n_classes)
us = concatenate(us)
final_output = Lambda(lambda x: K.softmax(x))(U) #shape(bs, n_classes)
#final_output = Dense(self.n_classes, activation='softmax', kernel_initializer='identity')(U)
#final_output = Dense(self.n_classes, activation='softmax')(U)
#f1 = Dense(50)(U)
#f2 = Lambda(lambda x: K.square(x))(f1)
#final_output = Dense(self.n_classes, activation='softmax')(f2)
self.listener_model = Model(inputs=inputs, outputs=[final_output, U, z, us])
#self.listener_model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=self.config['listener_lr']))
else:
self.load()
#check!!!
def build_train_fn(self):
"""
Batch input and output.
"""
#direct prob input!!!
action_prob_placeholder = self.listener_model.output[0] #(bs, n_classes)
action_onehot_placeholder = K.placeholder(shape=(None, self.config['n_classes']), name="action_onehot") #(bs, n_classes)
reward_placeholder = K.placeholder(shape=(None,), name="reward") #(?)
action_prob = K.sum(action_prob_placeholder * action_onehot_placeholder, axis=1)
log_action_prob = K.log(action_prob)
loss = - log_action_prob * reward_placeholder
entropy = K.sum(action_prob_placeholder * K.log(action_prob_placeholder + 1e-10), axis=1)
#entropy = K.sum(entropy)
loss = loss + self.entropy_coefficient * entropy
loss = K.mean(loss)
self.opt = Adam(lr=self.lr)
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights, loss=loss)
if os.path.exists(self.optfilename):
self.load_opt()
self.train_fn = K.function(
inputs = self.listener_model.input + [action_onehot_placeholder, reward_placeholder],
outputs=[loss, loss], updates=self.updates)
def reshape_message_candidates(self, speaker_message, candidates):
assert len(speaker_message.shape)==1 and speaker_message.shape[0]==self.config['max_message_length']
assert len(candidates.shape)==2 and candidates.shape[0]==self.config['n_classes'] and candidates.shape[1]==self.config['speaker_input_dim']
speaker_message = np.expand_dims(speaker_message, axis=0) #shape(1, max_message_length)
#X = [speaker_message] + [c.reshape([1,-1]) for c in candidates]
X = [speaker_message, np.expand_dims(candidates, axis=0)]
return X
def sample_from_listener_policy(self, speaker_message, candidates):
"""
Input and output are all just one instance. No bs dimensize.
"""
X = self.reshape_message_candidates(speaker_message, candidates)
listener_output= self.listener_model.predict_on_batch(X)
y, U, z = listener_output[:3]
#us = listener_output[3]
listener_probs = y
listener_probs = np.squeeze(listener_probs) #shape(n_class)
listener_action = np.random.choice(np.arange(self.config['n_classes']), p=listener_probs) #int
U = np.squeeze(U)
return listener_action, listener_probs, U
def infer_from_listener_policy(self, speaker_message, candidates):
"""
Input and output are all just one instance. No bs dimensize.
"""
X = self.reshape_message_candidates(speaker_message, candidates)
listener_output= self.listener_model.predict_on_batch(X)
y, U, z = listener_output[:3]
#us = listener_output[3]
listener_probs = y
listener_probs = np.squeeze(listener_probs) #shape(n_class)
listener_action = np.argmax(listener_probs) #int
U = np.squeeze(U)
return listener_action, listener_probs, U
def train_listener_policy_on_batch(self):
"""
Train as a batch. Loss is an float for a batch
"""
action_onehot = to_categorical(self.batch_action, num_classes=self.config['n_classes'])
#self.batch_candidates = np.array(self.batch_candidates).transpose([1, 0, 2]).tolist() #shape(num_classes, bs, speaker_input_dim)
#self.batch_candidates = np.swapaxes(np.array(self.batch_candidates), 0, 1).tolist() #shape(num_classes, bs, speaker_input_dim)
#self.batch_candidates = np.swapaxes(np.array(self.batch_candidates), 0, 1).astype('float32').tolist() #shape(num_classes, bs, speaker_input_dim)
#self.batch_candidates = [np.array(_) for _ in self.batch_candidates]
#_loss, _entropy = self.train_fn([self.batch_speaker_message] + self.batch_candidates + [action_onehot, self.batch_reward] )
_loss, _entropy = self.train_fn([np.array(self.batch_speaker_message), self.batch_candidates, action_onehot, self.batch_reward] )
#print("Listener loss: ", _loss)
self.batch_speaker_message = [] #shape(bs, max_message_length)
self.batch_action = [] #shape(bs)
self.batch_candidates = [] #shape(bs, n_classes, speaker_input_dim)
self.batch_reward = [] #shape(bs)
def remember_listener_training_details(self, speaker_message, action, action_probs, target, candidates, reward):
"""
Inputs are just one instance. No bs dimensize.
"""
self.batch_speaker_message.append(speaker_message)
self.batch_action.append(action)
self.batch_candidates.append(candidates)
self.batch_reward.append(reward)
class PaperListenerNetwork_rnn(PaperListenerNetwork):
def reshape_message_candidates(self, speaker_message, candidates):
#if not self.config['fixed_length']:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
#else:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]==self.config['max_message_length']
assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
assert len(candidates.shape)==2 and candidates.shape[0]==self.config['n_classes'] and candidates.shape[1]==self.config['speaker_input_dim']
speaker_message = np.expand_dims(to_categorical(speaker_message, self.config['alphabet_size']), axis=0) #shape(1, message_length, alphabet_size)
#X = [speaker_message] + [c.reshape([1,-1]) for c in candidates]
X = [speaker_message, np.expand_dims(candidates, axis=0)]
return X
def initialize_model(self):
"""
Batch input and output.
"""
## Define model
if not os.path.exists(self.modelname):
t_input = Input(shape=(None, self.config['alphabet_size'],)) #Speakers Message, shape(bs, message_length, alphabet_size)
#c_inputs_all = Input(shape=(self.config['n_classes'], self.config['speaker_input_dim'])) #Candidates, shape(bs, n_classes, speaker_input_dim)
c_inputs_all = Input(shape=(None, self.config['speaker_input_dim'])) #Candidates, shape(bs, n_classes, speaker_input_dim)
inputs = [t_input, c_inputs_all]
lstm = LSTM(self.config['listener_dim'], activation='tanh', return_sequences=False, return_state=True)
o, sh, sc = lstm(t_input)
z = Dense(self.config['listener_dim'], activation='sigmoid')(o) #shape(bs, listener_dim)
ts = []
us = []
u = Dense(self.config['listener_dim'], activation='sigmoid')
for _ in range(self.config['n_classes']):
#c_input = Input(shape=(self.config['speaker_input_dim'],)) #shape(bs, speaker_input_dim)
c_input = Lambda(makeFunc(_))(c_inputs_all)
uc = u(c_input)
t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x), axis=1)))(add([z, Lambda(lambda x: -x)(uc)])) #shape(bs, 1)
#t = Dot(1, False)([z,uc]) #shape(bs, 1)
ts.append(t)
us.append(uc)
U = concatenate(ts) #shape(bs, n_classes)
us = concatenate(us)
final_output = Lambda(lambda x: K.softmax(x))(U)
#shape(bs, n_classes)
self.listener_model = Model(inputs=inputs, outputs=[final_output, U, z, us])
#self.listener_model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=self.config['listener_lr']))
else:
self.load()
#check!!!
def set_updates(self):
self.opt = Adam(lr=self.lr)
#adam = RMSprop(lr=self.lr)
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights, loss=self.loss)
if os.path.exists(self.optfilename):
self.load_opt()
def build_train_fn(self):
"""
Batch input and output.
"""
#direct prob input!!!
action_prob_placeholder = self.listener_model.output[0] #(bs, n_classes)
#action_onehot_placeholder = K.placeholder(shape=(None, self.config['n_classes']), name="action_onehot") #(bs, n_classes)
action_onehot_placeholder = K.placeholder(shape=(None, None), name="action_onehot") #(bs, n_classes)
reward_placeholder = K.placeholder(shape=(None,), name="reward") #(?)
action_prob = K.sum(action_prob_placeholder*action_onehot_placeholder, axis=1)
log_action_prob = K.log(action_prob)
loss = - log_action_prob*reward_placeholder
entropy = K.sum(action_prob_placeholder * K.log(action_prob_placeholder + 1e-10), axis=1)
#entropy = K.sum(entropy)
loss = loss + self.entropy_coefficient * entropy
loss = K.mean(loss)
self.loss =loss
self.set_updates()
self.train_fn = K.function(
inputs = self.listener_model.input + [action_onehot_placeholder, reward_placeholder],
outputs=[loss, loss], updates=self.updates)
def remember_listener_training_details(self, speaker_message, action, action_probs, target, candidates, reward):
"""
Inputs are just one instance. No bs dimensize.
"""
#if not self.config['fixed_length']:
toadd = self.config['max_message_length'] - len(speaker_message)
for _ in range(toadd):
speaker_message = np.append(speaker_message, -1)
speaker_message = to_categorical(speaker_message, self.config['alphabet_size']) #shape(message_length, alphabet_size)
self.batch_speaker_message.append(speaker_message)
self.batch_action.append(action)
self.batch_candidates.append(candidates)
self.batch_reward.append(reward)
class PaperListenerNetwork_rnn_conv(PaperListenerNetwork_rnn):
def __init__(self, modelname, optfilename, lr, entropy_coefficient, pretrain_convmodel_file, traincnn, config):
self.pretrain_convmodel_file = pretrain_convmodel_file
self.traincnn = traincnn
super(PaperListenerNetwork_rnn_conv, self).__init__(modelname, optfilename, lr, entropy_coefficient, config)
def initialize_model(self):
"""
Batch input and output.
"""
if not os.path.exists(self.modelname):
## Define model
self.conv_model = convnet_com(self.config['speaker_input_w'], self.config['speaker_input_h'], 3, preloadfile=self.pretrain_convmodel_file, name='conv_model_l')
t_input = Input(shape=(None, self.config['alphabet_size'],)) #Speakers Message, shape(bs, message_length, alphabet_size)
c_inputs_all = Input(shape=(self.config['n_classes'], self.config['speaker_input_w'], self.config['speaker_input_h'], 3), name='image_l') #Candidates, shape(bs, speaker_input_w, speaker_input_h, 3)
inputs = [t_input, c_inputs_all]
lstm = LSTM(self.config['listener_dim'], activation='tanh', return_sequences=False, return_state=True)
o, sh, sc = lstm(t_input)
z = Dense(self.config['listener_dim'], activation='sigmoid')(o) #shape(bs, listener_dim)
#u = Dense(self.config['listener_dim'], activation='sigmoid',kernel_regularizer=regularizers.l2(0.01))
u = Dense(self.config['listener_dim'], activation='sigmoid')
ts = []
us = []
for _ in range(self.config['n_classes']):
#c_input = Input(shape=(self.config['speaker_input_w'],self.config['speaker_input_h'],3)) #speaker_model.input[0], shape(bs, speaker_input_w, speaker_input_h, 3)
#c_input = Lambda(lambda x: x[:, _])(c_inputs_all)
c_input = Lambda(makeFunc(_))(c_inputs_all)
conv_outputs = self.conv_model(c_input)
uc = u(conv_outputs)
t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x),axis=1)))(add([z, Lambda(lambda x: -x)(uc)])) #shape(bs, 1)
#t = Dot(1, False)([z,uc]) #shape(bs, 1)
ts.append(t)
us.append(uc)
U = concatenate(ts) #shape(bs, n_classes)
us = concatenate(us)
final_output = Lambda(lambda x: K.softmax(x))(U) #shape(bs, n_classes)
self.listener_model = Model(inputs=inputs, outputs=[final_output, U, z, us])
#self.listener_model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=self.config['listener_lr']))
else:
self.load()
#check!!!
self.conv_model = [l for l in self.listener_model.layers if l.name=='conv_model_l'][0]
#self.listener_model.layers[6].kernel_regularizer = None
#self.internal_model = Model(inputs=self.listener_model.inputs, outputs=[self.listener_model.layers[7].get_output_at(_) for _ in range(2)] + [self.listener_model.layers[6].output, self.listener_model.layers[-2].output]) #dot
#self.internal_model = Model(inputs=self.listener_model.inputs, outputs=[self.listener_model.layers[6].get_output_at(_) for _ in range(2)] + [self.listener_model.layers[7].output, self.listener_model.layers[-2].output]) #euc
self.trainable_weights_others = []
self.trainable_weights_conv = []
for layer in self.listener_model.layers:
if layer.name!='conv_model_l':
self.trainable_weights_others.extend(layer.trainable_weights)
else:
self.trainable_weights_conv.extend(layer.trainable_weights)
def set_updates(self):
self.opt = Adam(lr=self.lr)
#self.opt = RMSprop(lr=self.lr)
#opt = SGD(lr=self.lr, momentum=0.9, decay=1e-6, nesterov=True)
if not self.traincnn:
#self.updates = self.opt.get_updates(params=self.trainable_weights_others+self.trainable_weights_rnn, loss=self.loss)
self.updates = self.opt.get_updates(params=self.trainable_weights_others, loss=self.loss)
else:
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights, loss=self.loss)
if os.path.exists(self.optfilename):
self.load_opt()
def reshape_message_candidates(self, speaker_message, candidates):
#if not self.config['fixed_length']:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
#else:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]==self.config['max_message_length']
assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
assert len(candidates.shape)==4 and candidates.shape[0]==self.config['n_classes'] and candidates.shape[1]==self.config['speaker_input_w'] and candidates.shape[2]==self.config['speaker_input_h']
speaker_message = np.expand_dims(to_categorical(speaker_message, self.config['alphabet_size']), axis=0) #shape(1, ?, alphabet_size)
X = [speaker_message, np.expand_dims(candidates, axis=0)]
return X
'''
class PaperListenerNetwork_rnn_conv_color(PaperListenerNetwork_rnn):
def initialize_model(self):
"""
Batch input and output.
"""
if not os.path.exists(self.modelname):
## Define model
t_input = Input(shape=(None, self.config['alphabet_size'],)) #Speakers Message, shape(bs, message_length, alphabet_size)
c_inputs_all = Input(shape=(self.config['n_classes'], 8))
inputs = [t_input, c_inputs_all]
lstm = LSTM(self.config['listener_dim'], activation='tanh', return_sequences=False, return_state=True)
o, sh, sc = lstm(t_input)
z = Dense(self.config['listener_dim'], activation='sigmoid')(o) #shape(bs, listener_dim)
u = Dense(self.config['listener_dim'], activation='sigmoid')
ts = []
for _ in range(self.config['n_classes']):
#c_input = Input(shape=(self.config['speaker_input_w'],self.config['speaker_input_h'],3)) #speaker_model.input[0], shape(bs, speaker_input_w, speaker_input_h, 3)
#c_input = Lambda(lambda x: x[:, _])(c_inputs_all)
c_input = Lambda(makeFunc(_))(c_inputs_all)
#conv_outputs = conv_model(c_input)
#conv_outputs = c_input
uc = u(c_input)
t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x),axis=1)))(add([z, Lambda(lambda x: -x)(uc)])) #shape(bs, 1)
ts.append(t)
U = concatenate(ts) #shape(bs, n_classes)
final_output = Lambda(lambda x: K.softmax(x))(U) #shape(bs, n_classes)
self.listener_model = Model(inputs=inputs, outputs=[final_output, z, U])
#self.listener_model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=self.config['listener_lr']))
else:
self.load()
#check!!!
self.trainable_weights_rnn = self.listener_model.trainable_weights[:3]
self.trainable_weights_others = self.listener_model.trainable_weights[3:]
def set_updates(self):
self.opt = Adam(lr=self.lr)
#opt = RMSprop(lr=self.lr)
#opt = SGD(lr=self.lr, momentum=0.9, decay=1e-6, nesterov=True)
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights, loss=self.loss)
if os.path.exists(self.optfilename):
self.load_opt()
def reshape_message_candidates(self, speaker_message, candidates):
#if not self.config['fixed_length']:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
#else:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]==self.config['max_message_length']
#pdb.set_trace()
assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
assert len(candidates.shape)==2 and candidates.shape[0]==self.config['n_classes'] and candidates.shape[1]==8
speaker_message = np.expand_dims(to_categorical(speaker_message, self.config['alphabet_size']), axis=0) #shape(1, ?, alphabet_size)
X = [speaker_message, np.expand_dims(candidates, axis=0)]
return X
class PaperListenerNetwork_direct(BaseListenerNetwork):
def __init__(self, modelname, config_dict):
assert False #TOMODIFY
super(PaperListenerNetwork_direct, self).__init__(modelname, config_dict)
self.batch_speaker_message = []
self.batch_action = []
self.batch_candidates = []
self.batch_reward = []
def initialize_model(self):
"""
Batch input and output.
"""
if not os.path.exists(self.modelname):
## Define model
## Speakers Message
t_input = Input(shape=(self.config['max_message_length'],)) #shape(bs, max_message_length)
t_trans = Dense(self.config['speaker_input_dim'],
#kernel_initializer=keras.initializers.Identity(gain=1.0),
#bias_initializer='zeros',
activation='sigmoid')(t_input) #shape(bs, speaker_input_dim)
inputs = [t_input]
ts = []
for _ in range(self.config['n_classes']):
c_input = Input(shape=(self.config['speaker_input_dim'],)) #shape(bs, speaker_input_dim)
t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x),axis=1)))(add([t_trans, Lambda(lambda x: -x)(c_input)])) #shape(bs, 1)
inputs.append(c_input)
ts.append(t)
U = concatenate(ts) #shape(bs, n_classes)
listener_probs = U
#listener_probs = Lambda(lambda x: K.softmax(x))(U) #shape(bs, n_classes)
listener_infer_action = Lambda(lambda x: K.argmax(x))(U) #shape(bs)
target_onehot_placeholder = Input(shape=(self.config['n_classes'],), name="action_onehot") #(bs, n_classes)
listener_prob_2 = dot([listener_probs, target_onehot_placeholder], axes=1)
listener_prob_2 = Lambda(lambda x:K.squeeze(x, axis=1))(listener_prob_2)
self.listener_model = Model(inputs=inputs + [target_onehot_placeholder], outputs=[listener_probs, listener_infer_action, t_trans, listener_prob_2])
else:
self.load()
#check!!!
def build_train_fn(self):
"""
Batch input and output.
"""
#direct prob input!!!
#reward_placeholder = K.placeholder(shape=(None,), name="reward") #(?)
action_prob = self.listener_model.output[3]
#loss = K.log(-action_prob)*reward_placeholder
#loss = - action_prob * reward_placeholder
loss = - action_prob
loss = K.mean(loss)
self.opt = Adam(lr=self.config['listener_lr'])
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights,loss=loss)
#if os.path.exists(self.optfilename):
# self.load_opt()
self.train_fn = K.function(
#inputs = self.listener_model.input + [reward_placeholder],
inputs = self.listener_model.input,
outputs=[loss, loss], updates=self.updates)
def sample_from_listener_policy(self, speaker_message, candidates):
"""
Input and output are all just one instance. No bs dimensize.
"""
X = self.reshape_message_candidates(speaker_message, candidates) + [np.zeros([1, self.config['n_classes']])]
listener_probs, listener_infer_action, _t_trans, _lp2 = self.listener_model.predict_on_batch(X)
listener_probs = np.squeeze(listener_probs) #shape(n_class)
#listener_probs = scipy.special.softmax(listener_probs)
listener_probs = softmax(listener_probs)
#pdb.set_trace() #???norm???
listener_action = np.random.choice(np.arange(self.config['n_classes']), p=listener_probs) #int
return listener_action, listener_probs
def infer_from_listener_policy(self, speaker_message, candidates):
"""
Input and output are all just one instance. No bs | |
components[1][:-2]
return components[1];
#isConstructorOrDestructor: string, list
#A variant of the constructor/ destructor check designed to simplify nesting issues
#This requires less strict matching, but I find it difficult to think of non contrived
#examples in the data where this will fail.
def isConstructorOrDestructorWithList(self, line, classContextList):
result = False
for nextClass in classContextList:
result = result or self.isConstructorOrDestructor(line, nextClass)
if(result):
return result
return result
#Given a string of text and a name of a surrounding class, decide if this is a constructor
#or destructor for the class.
def isConstructorOrDestructor(self, line, classContext):
if(not self.langSwitch.isValidClassName(classContext)):
return False
temp = self.langSwitch.cleanConstructorOrDestructorLine(line)
constructPatt = self.langSwitch.getConstructorOrDestructorRegex(classContext)
if(self.config_info.DEBUG):
print(("Class context: " + classContext))
try:
print(("Checking if a constructor/destructor: " + temp))
except:
print(("Checking if a constructor/destructor: " + str(temp, 'utf-8', errors='ignore')))
return re.search(constructPatt, temp,flags=re.IGNORECASE)
def getBlockPattern(self,line,keywords):
for keyword in keywords:
(k, matched) = self.keywordMatch(keyword[0], line)
if(matched):
return k
return None
#There are many structures that can be mistaken for a function. We'll try to
#ignore as many of them as possible.
#To start, lets use a regex expression with "<return type> <name> (<0+ parameters>) {"
#Also, we should handle template methods like: "template <class type> <return type> <name<type>>(<0+ parameters>) {""
#Returns a string matching the function pattern or "" if no pattern match found.
def getFunctionPattern(self, line):
#Remove potentially problematic structures
temp = self.langSwitch.cleanFunctionLine(line)
if(self.config_info.DEBUG):
try:
print(("Checking if function: \'" + temp + "\'"))
except:
print(("Checking if function: \'" + str(temp, 'utf-8', errors='ignore') + "\'"))
#Select patterns for our language and check against them
funcPatterns = self.langSwitch.getFunctionRegexes()
if(self.config_info.DEBUG):
print(("Checking " + str(len(funcPatterns)) + " patterns."))
for p in funcPatterns:
result = re.search(p, temp)
if(result != None):
if(self.config_info.DEBUG):
print(("Found match with pattern: " + p))
return result.group(0)
return ""
def isFunction(self, line):
return (self.getFunctionPattern(line) != "")
#Determine if the given line is an assignment block using the {
def isAssignment(self, line):
return re.search(assignPattern, line)
#String -> String
#Given a line of code from a diff statement, return the line with any
#string literals removed.
def removeStrings(self, line):
return self.langSwitch.removeStrings(line)
#String, Boolean, String, String, String -> (String, String, Boolean, String, String)
#Given a line of code from a diff statement, a marker if prior lines were a multiblock
#comment, the marker for the type of line, a marker for the type of comment, and
#the current running function name, and returns a 5-tuple containing
#The modified line, the modified line type, the changed commentFlag, the commentType,
#the running function name, and any changes if inside a function (this forces a continue)
def removeComments(self, line, commentFlag, lineType, commentType, functionName, phase):
#Thoughts: if inside block comment and we've added or deleted that line, it can be ignored
#If it exists as code and has been commented out or added back in, it must have a corresponding line.
#However, if inside a comment and the line is unmodified, we need to find if /* has been added or removed
#When it is removed, we should consider the unmodified code as a block of added code. When it is added
#We should consider it as a block of deleted code. (The /* and */ can be ignored, as if they contain code
#They must also have a corresponding section of added or deleted code.)
fChange = UNMARKED
#Remove single line multi block comments...
#line = re.sub(commentPattern, "", line)
line = self.langSwitch.cleanSingleLineBlockComment(line)
if(self.langSwitch.isBlockCommentStart(line)):
commentFlag = True
#We need to consider the content of the line before the /*
line = self.langSwitch.beforeBlockCommentStart(line)
commentType = lineType
if(line.strip() == ""):
if(phase == LOOKFOREND): #Make sure to count this line if inside function before continuing
if(lineType == ADD):
fChange = COMADD
elif(lineType == REMOVE):
fChange = COMDEL
else:
fChange = UNCHANGED
else:
if(lineType == ADD):
fChange = TOTALADD
elif(lineType == REMOVE):
fChange = TOTALDEL
else:
fChange = UNCHANGED
line = ""
elif(self.langSwitch.isBlockCommentEnd(line)):
if(commentFlag): #Normal case were whole /* ... */ comment is changed
commentFlag = False
elif(phase == LOOKFORNAME): #Case where only bottom part of comment is changed and looking for function name.
functionName = "" #Clear the function name
index = self.langSwitch.getBlockCommentEnd(line)
if(len(line) > index + 2): #Case where there is code after comment end.
line = line[index + 2:]
else:
if(phase == LOOKFOREND): #Make sure to count this line if inside function before continuing
if(lineType == ADD):
fChange = COMADD
elif(lineType == REMOVE):
fChange = COMDEL
else:
fChange = UNCHANGED
else:
if(lineType == ADD):
fChange = TOTALADD
elif(lineType == REMOVE):
fChange = TOTALDEL
else:
fChange = UNCHANGED
line = ""
elif(commentFlag): #Inside a block comment
if(lineType == ADD):
line = ""
if(phase == LOOKFOREND): #Make sure to count this line if inside function before continuing
fChange = COMADD
else: #Otherwise, just add it to the total count of lines seen...
fChange = TOTALADD
elif(lineType == REMOVE):
line = ""
if(phase == LOOKFOREND): #Make sure to count this line if inside function before continuing
fChange = COMDEL
else:
fChange = TOTALDEL
if(lineType == OTHER): #If the line is unmodified
if(commentType == ADD): #This line has been commented out, with no corresponding block
lineType = REMOVE
elif(commentType == REMOVE): #This line was commented out, but is now part of code again.
lineType = ADD
else: #Unmodified line in an unmodified comment can be skipped
fChange = UNCHANGED
line = ""
#Remove single line comments
#line = re.sub(commentPattern2, "", line)
line = self.langSwitch.cleanSingleLineComment(line)
return (line,lineType, commentFlag, commentType, functionName, fChange)
#If we have made changes to the comment structure, we want to count changes to the current
#logChunk, function, and blocks separately so we can skip the rest of the changes.
def modifyCountForComment(self, fChange, lineType, keywordDict, keywords, ftotal_add, ftotal_del):
includedKeywords = [k for k in keywords if k[1] == INCLUDED]
if(fChange == COMADD):
if(self.sT.getBlockContext(lineType) != []):
keywordDict = self.incrementBlockContext(keywordDict, lineType, includedKeywords, self.sT.getBlockContext(lineType))
if(self.sT.getFuncContext(lineType) != []):
ftotal_add += 1
self.total_add += 1
elif(fChange == COMDEL):
if(self.sT.getBlockContext(lineType) != []):
keywordDict = self.incrementBlockContext(keywordDict, lineType, includedKeywords, self.sT.getBlockContext(lineType))
if(self.sT.getFuncContext(lineType) != []):
ftotal_del += 1
self.total_add += 1
elif(fChange == TOTALADD):
self.total_add += 1
elif(fChange == TOTALDEL):
self.total_del += 1
elif(fChange != UNCHANGED):
assert("Not a valid fChange type.")
return (keywordDict, ftotal_add, ftotal_del)
#Update the counts of the total log chunk and function in the case of a normal, non comment
#line.
def updateCounts(self, lineType, ftotal_add, ftotal_del, phase, startFlag):
if(lineType == ADD):
self.total_add += 1 #This tracks + for whole chunks.
if(phase == LOOKFOREND):
if(startFlag==0):
ftotal_add += 1
elif(lineType == REMOVE):
self.total_del += 1
if(phase == LOOKFOREND):
if(startFlag==0):
ftotal_del += 1
else:
assert(lineType==OTHER)
return (ftotal_add, ftotal_del)
#String -> [lineType, String]
#Given a line in the diff, return a list of 2 with the first line being ADD/REMOVE/OTHER and the second being
#the line with the +/- removed, if applicable
def markLine(self, line):
if(line.startswith("+")):
return [ADD, line[1:]]
elif(line.startswith("-")):
return [REMOVE, line[1:]]
else:
if(len(line) > 0 and line[0] == " "):
return [OTHER, line[1:]] #Remove whitespace from +/- row, important for languages like python
elif(len(line) > 0 and (line[0] == "/" or line[0] == "\\")):
return [META, line]
else:
return [OTHER, line]
#A Check to see if our regexes match class name
def checkForClassName(self, searchString, classContext):
if(self.langSwitch.isObjectOrientedLanguage()):
className = self.getClassPattern(searchString) #Would C++ constructors outside class A start with A::?
if(className != ""):
if(self.config_info.DEBUG):
try:
print(("Class:" + className))
except:
print(("Class:" + str(className, 'utf-8', errors='ignore')))
classContext.append(self.extractClassName(className)) #Push onto the class list
return classContext
#When we've seen an increase in scope, this function handles the preperation to checking the regex
#updates the scope stacks and maintains any additional information necessary (such as if we've entered a class)
def checkForFunctionName(self, phase, line, lineType, lineNum, functionName, classContext, funcStart, startFlag, ftotal_add, ftotal_del):
if(self.config_info.DEBUG):
print("Scope increase while searching for function.")
if(self.sT.scopeIncreaseCount(line, lineType) > 1):
if(self.config_info.DEBUG):
print("Parsing of multiscope increases like: ")
print(line)
print("is not yet supported.")
raise UnsupportedScopeException("This ordering of scope changes is not yet supported.")
#Check for class context first in these cases
if(self.sT.changeScopeFirst()):
classContext | |
tey ]
# # [ 0 0 1 ][ 1 ] = [ 1 ]
# # or
# # [ x y 1 0 0 0 ] [ A11 ] = [ tex ]
# # [ 0 0 0 x y 1 ] [ A12 ] = [ tey ]
# # [ A13 ]
# # [ A21 ]
# # [ A22 ]
# # [ A23 ]
# # With rows repeated for each point.
# # Solve for Axx values from the known coordinates
# # Then substitute the 2D intersection coordinates as (x,y)
# # and multiply to get (tex,tey), the desired texture coordinates.
# numpoints=np.count_nonzero(polysurf.vertexids[polysurf_polynum,:] >= 0)
# centroid = np.mean(polysurf.vertices[polysurf.vertexids[polysurf_polynum,:numpoints],:],axis=0)
# coordvals = (polysurf.vertices[polysurf.vertexids[polysurf_polynum,:numpoints],:]-centroid.reshape(1,3)).T # coordvals is the coordinates relative to centroid, 3 x numpoints
# texcoordvals = self.texcoord[polysurf_polynum,:numpoints].T # texcoordvals is the texture coordinates, 2 rows by numpoints cols... # Note that textures are in range 0...1 by convention
#
# # calculate SVD
# (U,s,Vt)=scipy.linalg.svd(coordvals,full_matrices=True,compute_uv=True)
#
# # extract columns for 2d coordinate basis vectors
# # want columns that correspond to the largest two
# # singular values
# xcolindex=0
# ycolindex=1
#
# if abs(s[0]) < abs(s[1]) and abs(s[0]) < abs(s[2]):
# # element 0 is smallest s.v.
# xcolindex=2
# pass
# if abs(s[1]) < abs(s[2]) and abs(s[1]) < abs(s[0]):
# # element 1 is smallest s.v.
# ycolindex=2
# pass
#
# To2D=U[:,np.array((xcolindex,ycolindex))].T # 2x3... Rows of To2D are x and y basis vectors, respectively
#
# coordvals2d = np.dot(To2D,coordvals) # 2 rows by numpoints cols... in 2D basis relative to centroid
#
# TexXformMtx=np.zeros((2*numpoints,6),dtype='d')
# TexXformMtx[:(2*numpoints):2,0]=coordvals2d[0,:] # assign 'x' elements
# TexXformMtx[:(2*numpoints):2,1]=coordvals2d[1,:] # assign 'y' elements
# TexXformMtx[:(2*numpoints):2,2]=1 # assign '1' entries
# TexXformMtx[1:(2*numpoints):2,3]=coordvals2d[0,:] # assign 'x' elements
# TexXformMtx[1:(2*numpoints):2,4]=coordvals2d[1,:] # assign 'y' elements
# TexXformMtx[1:(2*numpoints):2,5]=1 # assign '1' entries
#
# TexCoordVec=np.zeros((2*numpoints),dtype='d')
# TexCoordVec[:(2*numpoints):2] = texcoordvals[0,:] # assign tex
# TexCoordVec[1:(2*numpoints):2] = texcoordvals[1,:] # assign tey
#
# (AijVals,residuals,rank,lstsq_s) = np.linalg.lstsq(TexXformMtx,TexCoordVec)
# AijMat=AijVals.reshape(2,3) # reshape to 2x3
# AijMatExt = np.concatenate((AijMat,np.array((0.0,0.0,1.0),dtype='d').reshape(1,3)),axis=0) # Add 0.0, 0.0, 1.0 row to bottom of matrix
#
# AijMatInv=np.linalg.inv(AijMatExt)
#
# return (centroid,s,xcolindex,ycolindex,To2D, AijMat,AijMatInv)
def eval_texcoord_polygonvertex(self,polysurf,polysurf_polynum,polysurf_vertexnum):
# Can supply vectors as polysurf_polynum and/or polysurf_vertexnum
#texcoords = self.texcoord[polysurf_polynum,polysurf_vertexnum,:]
firstidx=polysurf.vertexidx_indices[polysurf_polynum]
texcoords = self.texcoord[self.texcoordidx[firstidx+polysurf_vertexnum],:]
return texcoords
def invalidateprojinfo(self):
self.inplane2texcoords = None
self.texcoords2inplane = None
pass
def buildprojinfo(self,polysurf):
# See also scope_coin3d.cpp:DetermineTexXform
# see also polygonalsurface_intrinsicparameterization.py/_determine_tex_xform()
# and preceding steps in polygonalsurface.py:buildprojinfo()
# 5. Evaluate a transform
# [ A11 A12 A13 ][ x ] = [ tex ]
# [ A21 A22 A23 ][ y ] = [ tey ]
# [ 0 0 1 ][ 1 ] = [ 1 ]
# or
# [ x y 1 0 0 0 ] [ A11 ] = [ tex ]
# [ 0 0 0 x y 1 ] [ A12 ] = [ tey ]
# [ A13 ]
# [ A21 ]
# [ A22 ]
# [ A23 ]
# With rows repeated for each point.
# Solve for Axx values from the known coordinates
# Then substitute the 2D intersection coordinates as (x,y)
# and multiply to get (tex,tey), the desired texture coordinates.
if self.inplane2texcoords is not None:
return # already built
numpolys=polysurf.vertexidx_indices.shape[0]
self.inplane2texcoords = np.zeros((numpolys,2,3),dtype='d')
self.texcoords2inplane = np.zeros((numpolys,3,3),dtype='d')
for polynum in range(numpolys):
firstidx=polysurf.vertexidx_indices[polynum]
numpoints=polysurf.numvertices[polynum]
centroid = polysurf.refpoints[polynum,:]
coordvals = (polysurf.vertices[polysurf.vertexidx[firstidx:(firstidx+numpoints)],:]-centroid.reshape(1,3)).T # coordvals is the coordinates relative to centroid, 3 x numpoints
To2D = polysurf.inplanemats[polynum,:,:]
coordvals2d = np.dot(To2D,coordvals) # 2 rows by numpoints cols... in 2D basis relative to centroid
texcoordvals = self.texcoord[self.texcoordidx[firstidx:(firstidx+numpoints)],:].T # texcoordvals is the texture coordinates, 2 rows by numpoints cols... # Note that textures are in range 0...1 by convention
TexXformMtx=np.zeros((2*numpoints,6),dtype='d')
TexXformMtx[:(2*numpoints):2,0]=coordvals2d[0,:] # assign 'x' elements
TexXformMtx[:(2*numpoints):2,1]=coordvals2d[1,:] # assign 'y' elements
TexXformMtx[:(2*numpoints):2,2]=1 # assign '1' entries
TexXformMtx[1:(2*numpoints):2,3]=coordvals2d[0,:] # assign 'x' elements
TexXformMtx[1:(2*numpoints):2,4]=coordvals2d[1,:] # assign 'y' elements
TexXformMtx[1:(2*numpoints):2,5]=1 # assign '1' entries
TexCoordVec=np.zeros((2*numpoints),dtype='d')
TexCoordVec[:(2*numpoints):2] = texcoordvals[0,:] # assign tex
TexCoordVec[1:(2*numpoints):2] = texcoordvals[1,:] # assign tey
(AijVals,residuals,rank,lstsq_s) = np.linalg.lstsq(TexXformMtx,TexCoordVec,rcond=-1)
AijMat=AijVals.reshape(2,3) # reshape to 2x3
AijMatExt = np.concatenate((AijMat,np.array((0.0,0.0,1.0),dtype='d').reshape(1,3)),axis=0) # Add 0.0, 0.0, 1.0 row to bottom of matrix
# NOTE: Possible bug: This matrix inversion (next line) will
# fail if the polygon has zero area in texture space due to
# (for example) limited precision in writing down the
# texture coordinates in the data file.
#
# Not sure what to do in this case...
AijMatInv=np.linalg.inv(AijMatExt)
# Assign AijMat
self.inplane2texcoords[polynum,:,:]=AijMat
self.texcoords2inplane[polynum,:,:]=AijMatInv
pass
pass
def _evaluate_curvature(self,polysurf,polynum,u,v):
# Evaluate the curvature, within polygon # polynum
# at (u,v) coordinates... (u,v) in texture coordinate
# range [0...1]
# ... C accelerated version available
if polynum >= polysurf.vertexidx_indices.shape[0]:
# This polynum corresponds to a redundant texture
polysurf_polynum=self.texcoordredundant_polystartpolynum[polynum]
pass
else:
polysurf_polynum=polynum
pass
To2D=polysurf.inplanemats[polysurf_polynum,:,:] # To2D is 2x3
#AijMat=self.inplane2texcoords[polynum,:,:]
AijMatInv=self.texcoords2inplane[polynum,:,:]
# Note Capital UV represent the texture parameterization
# of the in-plane 3D space of this facet.
TexUVExt = np.inner(AijMatInv,np.array((u,v,1.0)))
TexUVExt /= TexUVExt[2] # normalize inhomogeneous coordinates
# These coordinates of this (u,v) of this facet are relative to its centroid,
# and are in terms of the basis vectors in To2D
TexUV = TexUVExt[:2]
# Get 3D coordinates relative to centroid
Tex3D = np.inner(To2D.T,TexUV)
# Need to evaluate 3D vertex coords, relative to centroid,
# Use them to weight the vertex curvatures
# according to distance from our point.
centroid = polysurf.refpoints[polysurf_polynum,:] # Centroied in 3d coords
firstidx=polysurf.vertexidx_indices[polysurf_polynum]
numpoints=polysurf.numvertices[polysurf_polynum]
# Check to see if we have curvatures at all vertices:
if np.isnan(polysurf.principal_curvatures[polysurf.vertexidx[firstidx:(firstidx+numpoints)],0]).any():
# abort if we are missing a curvature
#curvmat[vcnt,ucnt,:,:]=np.NaN
return np.array(((np.NaN,np.NaN),(np.NaN,np.NaN)),dtype='d')
# For this facet, the 3D coords of the vertices are
coordvals = (polysurf.vertices[polysurf.vertexidx[firstidx:(firstidx+numpoints)],:]-centroid.reshape(1,3)).T # coordvals is the coordinates relative to centroid, 3 x numpoints
# Now coordvals is 3 x numvertices, coordinates of the vertices
# relative to centroid
# Tex3D is 3 vector, coordinates of our (u,v) location
# relative to centroid.
#
# Perform weighted average
dists = vecnorm(Tex3D.reshape(3,1) - coordvals,axis=0)
eps = np.max(dists)/10000.0 # small number, so we don't divide by 0
rawweights=1.0/(dists+eps)
totalweights=np.sum(rawweights)
weights=rawweights/totalweights
## The 2D coords of the vertices are
#coordvals2d = np.dot(To2D,coordvals) # 2 rows by numpoints cols... in 2D basis relative to centroid
# Likewise 2D coords of the curvature_tangent_axes
CTA_2D = np.inner(To2D,polysurf.curvature_tangent_axes[polysurf.vertexidx[firstidx:(firstidx+numpoints)],:,:]).transpose(1,0,2) # Transpose to keep broadcast axis to the left. Pre-transpose axes lengths are: 2 (2D axes) by # of vertices by 2 (principal curvature)
# CTA_2D axes: # of vertices by 2 (2D axes) by 2 (principal curvature)
# Normalize curvature_tangent_axes (should be unit length)
CTA_2D /= vecnormkeepshape(CTA_2D,1) # Axis is axis 0 because it came from To2D
# Construct curvature matrices ...
# Need to construct V*K*V', broadcasting over which vertex
curvmatrices=np.einsum('...ij,...j,...jk->...ik', CTA_2D,polysurf.principal_curvatures[polysurf.vertexidx[firstidx:(firstidx+numpoints)],:],CTA_2D.transpose(0,2,1)) # result is # of vertices by 2x2 curvature matrix
# Weighting of vertices relative to our point (u,v)
weightedcurvmatrices = weights.reshape(numpoints,1,1)*curvmatrices
# meancurvmatrix (weighted average)
meancurvmatrix = weightedcurvmatrices.sum(axis=0)
# meancurvmatrix is a 2x2 which should be close to symmetric
asymmetry = meancurvmatrix[1,0]-meancurvmatrix[0,1]
if abs(asymmetry) > 0.1*np.linalg.norm(meancurvmatrix):
sys.stderr.write("_evaluate_curvature: WARNING Large asymmetry in mean curvature matrix at (u,v) = (%g,%g). Matrix = %s\n" % (u,v,str(meancurvmatrix)))
pass
# correct asymmetry
meancurvmatrix[1,0] -= asymmetry/2.0
meancurvmatrix[0,1] += asymmetry/2.0
## Determine principal curvatures
#(princcurvs,evects) = np.linalg.eig(meancurvmatrix)
# curvtangentaxes3d = np.dot(To2D.T,evects)
#
# # We don't want the eigenframe to be mirrored relative to the (U,V)
# # frame, for consistency in interpreting positive vs. negative curvature.
# # ... so if the dot/inner product of (UxV) with (TANGENT0xTANGENT1)
# is negative, that indicates mirroring
# Negating one of the eigenvectors will un-mirror it.
#if np.inner(np.cross(To2D[0,:],To2D[1,:]),np.cross(curvtangentaxes[:,0],curvtangentaxes[:,1])) < 0.0:
# curvtangentaxes3d[:,0]=-curvtangentaxes3d[:,0]
# | |
#!/usr/bin/env python
import struct
from .gdsPrimitives import *
class Gds2reader:
"""Class to read in a file in GDSII format and populate a layout class with it"""
## Based on info from http://www.rulabinsky.com/cavd/text/chapc.html
global offset
offset=0
def __init__(self,layoutObject,debugToTerminal = 0):
self.fileHandle = None
self.layoutObject = layoutObject
self.debugToTerminal=debugToTerminal
#do we dump debug data to the screen
def print64AsBinary(self,number):
for index in range(0,64):
print((number>>(63-index))&0x1,eol='')
print("\n")
def stripNonASCII(self,bytestring):
string = bytestring.decode('utf-8')
return string
def ieeeDoubleFromIbmData(self,ibmData):
#the GDS double is in IBM 370 format like this:
#(1)sign (7)exponent (56)mantissa
#exponent is excess 64, mantissa has no implied 1
#a normal IEEE double is like this:
#(1)sign (11)exponent (52)mantissa
data = struct.unpack('>q',ibmData)[0]
sign = (data >> 63)&0x01
exponent = (data >> 56) & 0x7f
mantissa = data<<8 #chop off sign and exponent
if mantissa == 0:
newFloat = 0.0
else:
exponent = ((exponent-64)*4)+1023 #convert to double exponent
#re normalize
while mantissa & 0x8000000000000000 == 0:
mantissa<<=1
exponent-=1
mantissa<<=1 #remove the assumed high bit
exponent-=1
#check for underflow error -- should handle these properly!
if(exponent<=0):
print("Underflow Error")
elif(exponent == 2047):
print("Overflow Error")
#re assemble
newFloat=(sign<<63)|(exponent<<52)|((mantissa>>12)&0xfffffffffffff)
asciiDouble = struct.pack('>q',newFloat)
#convert back to double
newFloat = struct.unpack('>d',asciiDouble)[0]
return newFloat
def ieeeFloatCheck(self,aFloat):
asciiDouble = struct.pack('>d',aFloat)
data = struct.unpack('>q',asciiDouble)[0]
sign = data >> 63
exponent = ((data >> 52) & 0x7ff)-1023
# BINWU: Cleanup
#print(exponent+1023)
mantissa = data << 12 #chop off sign and exponent
# BINWU: Cleanup
#self.print64AsBinary((sign<<63)|((exponent+1023)<<52)|(mantissa>>12))
asciiDouble = struct.pack('>q',(sign<<63)|(exponent+1023<<52)|(mantissa>>12))
newFloat = struct.unpack('>d',asciiDouble)[0]
print("Check:"+str(newFloat))
def readNextRecord(self):
global offset
recordLengthAscii = self.fileHandle.read(2) #first 2 bytes tell us the length of the record
if len(recordLengthAscii)==0:
return
recordLength = struct.unpack(">h",recordLengthAscii) #gives us a tuple with a short int inside
offset_int = int(recordLength[0]) # extract length
offset += offset_int # count offset
#print(offset) #print out the record numbers for de-bugging
record = self.fileHandle.read(recordLength[0]-2) #read the rest of it (first 2 bytes were already read)
return record
def readHeader(self):
self.layoutObject.info.clear()
## Header
record = self.readNextRecord()
idBits = record[0:2]
if(idBits==b'\x00\x02' and len(record)==4):
gdsVersion = struct.unpack(">h",record[2:4])[0]
self.layoutObject.info["gdsVersion"]=gdsVersion
if(self.debugToTerminal==1):
print("GDS II Version "+str(gdsVersion))
else:
print("Invalid GDSII Header")
return -1
#read records until we hit the UNITS section... this is the last part of the header
while 1:
record = self.readNextRecord()
idBits = record[0:2]
## Modified Date
if idBits==b'\x01\x02' and len(record)==26:
modYear = struct.unpack(">h",record[2:4])[0]
modMonth = struct.unpack(">h",record[4:6])[0]
modDay = struct.unpack(">h",record[6:8])[0]
modHour = struct.unpack(">h",record[8:10])[0]
modMinute = struct.unpack(">h",record[10:12])[0]
modSecond = struct.unpack(">h",record[12:14])[0]
lastAccessYear = struct.unpack(">h",record[14:16])[0]
lastAccessMonth = struct.unpack(">h",record[16:18])[0]
lastAccessDay = struct.unpack(">h",record[18:20])[0]
lastAccessHour = struct.unpack(">h",record[20:22])[0]
lastAccessMinute = struct.unpack(">h",record[22:24])[0]
lastAccessSecond = struct.unpack(">h",record[24:26])[0]
self.layoutObject.info["dates"]=(modYear,modMonth,modDay,modHour,modMinute,modSecond,\
lastAccessYear,lastAccessMonth,lastAccessDay,lastAccessHour,lastAccessMinute,lastAccessSecond)
if(self.debugToTerminal==1):
print("Date Modified:"+str(modYear)+","+str(modMonth)+","+str(modDay)+","+str(modHour)+","+str(modMinute)+","+str(modSecond))
print("Date Last Accessed:"+str(lastAccessYear)+","+str(lastAccessMonth)+","+str(lastAccessDay)+\
","+str(lastAccessHour)+","+str(lastAccessMinute)+","+str(lastAccessSecond))
## LibraryName
elif(idBits==b'\x02\x06'):
libraryName = record[2::].decode("utf-8")
self.layoutObject.info["libraryName"]=libraryName
if(self.debugToTerminal==1):
print("Library: "+libraryName)
## reference libraries
elif(idBits==b'\x1F\x06'):
referenceLibraryA = record[2:46]
referenceLibraryB = record[47:91]
self.layoutObject.info["referenceLibraries"]=(referenceLibraryA,referenceLibraryB)
if(self.debugToTerminal==1):
print( "Reference Libraries:"+referenceLibraryA+","+referenceLibraryB)
elif(idBits==b'\x20\x06'):
fontA = record[2:45]
fontB = record[46:89]
fontC = record[90:133]
fontD = record[134:177]
self.layoutObject.info["fonts"]=(fontA,fontB,fontC,fontD)
if(self.debugToTerminal==1):
print("Fonts:"+fontA+","+fontB+","+fontC+","+fontD)
elif(idBits==b'\x23\x06'):
attributeTable = record[2:45]
self.layoutObject.info["attributeTable"]=attributeTable
if(self.debugToTerminal==1):
print("Attributes:"+attributeTable)
elif(idBits==b'\x22\x02'):
generations = struct.unpack(">h",record[2]+record[3])
self.layoutObject.info["generations"]=generations
if(self.debugToTerminal==1):
print("Generations:"+generations )
elif(idBits==b'\x36\x02'):
fileFormat = struct.unpack(">h",record[2]+record[3])
self.layoutObject.info["fileFormat"]=fileFormat
if(self.debugToTerminal==1):
print("File Format:"+fileFormat)
elif(idBits==b'\x37\x06'):
mask = record[2::]
self.layoutObject.info["mask"] = mask
if(self.debugToTerminal==1):
print("Mask: "+mask)
elif(idBits==b'\x03\x05'): #this is also wrong b/c python doesn't natively have an 8 byte float
userUnits=self.ieeeDoubleFromIbmData(record[2:10])
dbUnits=self.ieeeDoubleFromIbmData
self.layoutObject.info["units"] = (userUnits,dbUnits)
if(self.debugToTerminal==1):
print("Units: 1 user unit="+str(userUnits)+" database units, 1 database unit="+str(dbUnits)+" meters.")
break;
if(self.debugToTerminal==1):
print("End of GDSII Header Found")
return 1
def readBoundary(self):
##reads in a boundary type structure = a filled polygon
thisBoundary=GdsBoundary()
while 1:
record = self.readNextRecord()
idBits = record[0:2]
if(idBits==b'\x26\x01'): #ELFLAGS
elementFlags = struct.unpack(">h",record[2:4])[0]
thisBoundary.elementFlags=elementFlags
if(self.debugToTerminal==1):
print("\t\tElement Flags: "+str(elementFlags))
elif(idBits==b'\x2F\x03'): #PLEX
plex = struct.unpack(">i",record[2:6])[0]
thisBoundary.plex=plex
if(self.debugToTerminal==1):
print("\t\tPLEX: "+str(plex))
elif(idBits==b'\x0D\x02'): #Layer
drawingLayer = struct.unpack(">h",record[2:4])[0]
thisBoundary.drawingLayer=drawingLayer
if drawingLayer not in self.layoutObject.layerNumbersInUse:
self.layoutObject.layerNumbersInUse += [drawingLayer]
if(self.debugToTerminal==1):
print("\t\tDrawing Layer: "+str(drawingLayer))
elif(idBits==b'\x16\x02'): #Purpose
purposeLayer = struct.unpack(">h",record[2:4])[0]
thisBoundary.purposeLayer=purposeLayer
if(self.debugToTerminal==1):
print("\t\tPurpose Layer: "+str(purposeLayer))
elif(idBits==b'\x0E\x02'): #DataType
dataType = struct.unpack(">h",record[2:4])[0]
thisBoundary.dataType=dataType
if(self.debugToTerminal==1):
print("\t\t\tData Type: "+str(dataType))
elif(idBits==b'\x10\x03'): #XY Data Points
numDataPoints = len(record)-2 #packed as XY coordinates 4 bytes each
thisBoundary.coordinates=[]
for index in range(2,numDataPoints+2,8): #incorporate the 2 byte offset
x=struct.unpack(">i",record[index:index+4])[0]
y=struct.unpack(">i",record[index+4:index+8])[0]
thisBoundary.coordinates+=[(x,y)]
if(self.debugToTerminal==1):
print("\t\t\tXY Point: "+str(x)+","+str(y))
elif(idBits==b'\x11\x00'): #End Of Element
break;
return thisBoundary
def readPath(self): #reads in a path structure
thisPath=GdsPath()
while 1:
record = self.readNextRecord()
idBits = record[0:2]
if(idBits==b'\x26\x01'): #ELFLAGS
elementFlags = struct.unpack(">h",record[2:4])[0]
thisPath.elementFlags=elementFlags
if(self.debugToTerminal==1):
print("\t\tElement Flags: "+str(elementFlags))
elif(idBits==b'\x2F\x03'): #PLEX
plex = struct.unpack(">i",record[2:6])[0]
thisPath.plex=plex
if(self.debugToTerminal==1):
print("\t\tPLEX: "+str(plex))
elif(idBits==b'\x0D\x02'): #Layer
drawingLayer = struct.unpack(">h",record[2:4])[0]
thisPath.drawingLayer=drawingLayer
if drawingLayer not in self.layoutObject.layerNumbersInUse:
self.layoutObject.layerNumbersInUse += [drawingLayer]
if(self.debugToTerminal==1):
print("\t\t\tDrawing Layer: "+str(drawingLayer))
elif(idBits==b'\x16\x02'): #Purpose
purposeLayer = struct.unpack(">h",record[2:4])[0]
thisPath.purposeLayer=purposeLayer
if(self.debugToTerminal==1):
print("\t\tPurpose Layer: "+str(purposeLayer))
elif(idBits==b'\x21\x02'): #Path type
pathType = struct.unpack(">h",record[2:4])[0]
thisPath.pathType=pathType
if(self.debugToTerminal==1):
print("\t\t\tPath Type: "+str(pathType))
elif(idBits==b'\x0F\x03'): #Path width
pathWidth = struct.unpack(">i",record[2:6])[0]
thisPath.pathWidth=pathWidth
if(self.debugToTerminal==1):
print("\t\t\tPath Width: "+str(pathWidth))
elif(idBits==b'\x10\x03'): #XY Data Points
numDataPoints = len(record)-2 #packed nas XY coordinates 4 bytes each
thisPath.coordinates=[]
for index in range(2,numDataPoints+2,8): #incorporate the 2 byte offset
x=struct.unpack(">i",record[index:index+4])[0]
y=struct.unpack(">i",record[index+4:index+8])[0]
thisPath.coordinates+=[(x,y)]
if(self.debugToTerminal==1):
print("\t\t\tXY Point: "+str(x)+","+str(y))
elif(idBits==b'\x11\x00'): #End Of Element
break;
return thisPath
def readSref(self): #reads in a reference to another structure
thisSref=GdsSref()
while 1:
record = self.readNextRecord()
idBits = record[0:2]
if(idBits==b'\x26\x01'): #ELFLAGS
elementFlags = struct.unpack(">h",record[2:4])[0]
thisSref.elementFlags=elementFlags
if(self.debugToTerminal==1):
print("\t\tElement Flags: "+str(elementFlags))
elif(idBits==b'\x2F\x03'): #PLEX
plex = struct.unpack(">i",record[2:6])[0]
thisSref.plex=plex
if(self.debugToTerminal==1):
print("\t\tPLEX: "+str(plex))
elif(idBits==b'\x12\x06'): #Reference Name
sName = self.stripNonASCII(record[2::])
thisSref.sName=sName.rstrip()
if(self.debugToTerminal==1):
print("\t\tReference Name:"+sName)
elif(idBits==b'\x1A\x01'): #Transformation
transFlags = struct.unpack(">H",record[2:4])[0]
mirrorFlag = bool(transFlags&0x8000) ##these flags are a bit sketchy
rotateFlag = bool(transFlags&0x0002)
magnifyFlag = bool(transFlags&0x0004)
thisSref.transFlags=[mirrorFlag,magnifyFlag,rotateFlag]
if(self.debugToTerminal==1):
print("\t\t\tMirror X:"+str(mirrorFlag))
print( "\t\t\tRotate:"+str(rotateFlag))
print("\t\t\tMagnify:"+str(magnifyFlag))
elif(idBits==b'\x1B\x05'): #Magnify
magFactor=self.ieeeDoubleFromIbmData(record[2:10])
thisSref.magFactor=magFactor
if(self.debugToTerminal==1):
print("\t\t\tMagnification:"+str(magFactor))
elif(idBits==b'\x1C\x05'): #Rotate Angle
rotateAngle=self.ieeeDoubleFromIbmData(record[2:10])
thisSref.rotateAngle=rotateAngle
if(self.debugToTerminal==1):
print("\t\t\tRotate Angle (CCW):"+str(rotateAngle))
elif(idBits==b'\x10\x03'): #XY Data Points
index=2
x=struct.unpack(">i",record[index:index+4])[0]
y=struct.unpack(">i",record[index+4:index+8])[0]
thisSref.coordinates=(x,y)
if(self.debugToTerminal==1):
print("\t\t\tXY Point: "+str(x)+","+str(y))
elif(idBits==b'\x11\x00'): #End Of Element
break;
return thisSref
def readAref(self): #an array of references
thisAref = GdsAref()
while 1:
record = self.readNextRecord()
idBits = record[0:2]
if(idBits==b'\x26\x01'): #ELFLAGS
elementFlags = struct.unpack(">h",record[2:4])[0]
thisAref.elementFlags=elementFlags
if(self.debugToTerminal==1):
print("\t\tElement Flags: "+str(elementFlags))
elif(idBits==b'\x2F\x03'): #PLEX
plex = struct.unpack(">i",record[2:6])[0]
thisAref.plex=plex
if(self.debugToTerminal==1):
print("\t\tPLEX: "+str(plex))
elif(idBits==b'\x12\x06'): #Reference Name
aName = record[2::]
thisAref.aName=aName
if(self.debugToTerminal==1):
print("\t\tReference Name:"+aName)
elif(idBits==b'\x1A\x01'): #Transformation
transFlags = struct.unpack(">H",record[2:4])[0]
mirrorFlag = bool(transFlags&0x8000) ##these flags are a bit sketchy
rotateFlag = bool(transFlags&0x0002)
magnifyFlag = bool(transFlags&0x0004)
thisAref.transFlags=[mirrorFlag,magnifyFlag,rotateFlag]
if(self.debugToTerminal==1):
print("\t\t\tMirror X:"+str(mirrorFlag))
print("\t\t\tRotate:"+str(rotateFlag))
print("\t\t\tMagnify:"+str(magnifyFlag))
elif(idBits==b'\x1B\x05'): #Magnify
magFactor=self.ieeeDoubleFromIbmData(record[2:10])
thisAref.magFactor=magFactor
if(self.debugToTerminal==1):
print("\t\t\tMagnification:"+str(magFactor))
elif(idBits==b'\x1C\x05'): #Rotate Angle
rotateAngle=self.ieeeDoubleFromIbmData(record[2:10])
thisAref.rotateAngle=rotateAngle
if(self.debugToTerminal==1):
print("\t\t\tRotate Angle (CCW):"+str(rotateAngle))
elif(idBits==b'\x10\x03'): #XY Data Points
index=2
topLeftX=struct.unpack(">i",record[index:index+4])[0]
topLeftY=struct.unpack(">i",record[index+4:index+8])[0]
rightMostX=struct.unpack(">i",record[index+8:index+12])[0]
bottomMostY=struct.unpack(">i",record[index+12:index+16])[0]
thisAref.coordinates=[(topLeftX,topLeftY),(rightMostX,topLeftY),(topLeftX,bottomMostY)]
if(self.debugToTerminal==1):
print("\t\t\tTop Left Point: "+str(topLeftX)+","+str(topLeftY))
print("\t\t\t\tArray Width: "+str(rightMostX-topLeftX))
print("\t\t\t\tArray Height: "+str(topLeftY-bottomMostY))
elif(idBits==b'\x11\x00'): #End Of Element
break;
return thisAref
def readText(self):
##reads in a text structure
thisText=GdsText()
while 1:
record = self.readNextRecord()
idBits = record[0:2]
if(idBits==b'\x26\x01'): #ELFLAGS
elementFlags = struct.unpack(">h",record[2:4])[0]
thisText.elementFlags=elementFlags
if(self.debugToTerminal==1):
print("\t\tElement Flags: "+str(elementFlags))
elif(idBits==b'\x2F\x03'): #PLEX
plex = struct.unpack(">i",record[2:6])[0]
thisText.plex=plex
if(self.debugToTerminal==1):
print("\t\tPLEX: "+str(plex))
elif(idBits==b'\x0D\x02'): #Layer
drawingLayer = struct.unpack(">h",record[2:4])[0]
thisText.drawingLayer=drawingLayer
if drawingLayer not in self.layoutObject.layerNumbersInUse:
self.layoutObject.layerNumbersInUse += [drawingLayer]
if(self.debugToTerminal==1):
print("\t\tDrawing Layer: "+str(drawingLayer))
elif(idBits==b'\x16\x02'): #Purpose
purposeLayer = struct.unpack(">h",record[2:4])[0]
thisText.purposeLayer=purposeLayer
if(self.debugToTerminal==1):
print("\t\tPurpose Layer: "+str(purposeLayer))
elif(idBits==b'\x1A\x01'): #Transformation
transFlags = struct.unpack(">H",record[2:4])[0]
mirrorFlag = bool(transFlags&0x8000) ##these flags are a bit sketchy
rotateFlag = bool(transFlags&0x0002)
magnifyFlag = bool(transFlags&0x0004)
thisText.transFlags=[mirrorFlag,magnifyFlag,rotateFlag]
if(self.debugToTerminal==1):
print("\t\t\tMirror X:"+str(mirrorFlag))
print("\t\t\tRotate:"+str(rotateFlag))
print("\t\t\tMagnify:"+str(magnifyFlag))
elif(idBits==b'\x1B\x05'): #Magnify
magFactor=self.ieeeDoubleFromIbmData(record[2:10])
thisText.magFactor=magFactor
if(self.debugToTerminal==1):
print("\t\t\tMagnification:"+str(magFactor))
elif(idBits==b'\x1C\x05'): #Rotate Angle
rotateAngle=self.ieeeDoubleFromIbmData(record[2:10])
thisText.rotateAngle=rotateAngle
if(self.debugToTerminal==1):
print("\t\t\tRotate Angle (CCW):"+str(rotateAngle))
elif(idBits==b'\x21\x02'): #Path type
pathType = struct.unpack(">h",record[2:4])[0]
thisText.pathType=pathType
if(self.debugToTerminal==1):
print("\t\t\tPath Type: "+str(pathType))
elif(idBits==b'\x0F\x03'): #Path width
pathWidth = struct.unpack(">i",record[2:6])[0]
thisText.pathWidth=pathWidth
if(self.debugToTerminal==1):
print("\t\t\tPath Width: "+str(pathWidth))
elif(idBits==b'\x1A\x01'): #Text Presentation
presentationFlags = struct.unpack(">H",record[2:4])[0]
font = (presentationFlags&0x0030)>>4 ##these flags are a bit sketchy
verticalFlags = (presentationFlags&0x000C)
horizontalFlags = (presentationFlags&0x0003)
thisText.presentationFlags=(font,verticalFlags,horizontalFlags)
if(self.debugToTerminal==1):
print("\t\t\tFont:"+str(font))
if(verticalFlags==0):
if(self.debugToTerminal==1):
print("\t\t\tVertical: Top")
elif(verticalFlags==1):
if(self.debugToTerminal==1):
print("\t\t\tVertical: Middle")
elif(verticalFlags==2):
if(self.debugToTerminal==1):
print("\t\t\tVertical: Bottom")
if(horizontalFlags==0):
if(self.debugToTerminal==1):
print("\t\t\tHorizontal: Left")
elif(horizontalFlags==1):
if(self.debugToTerminal==1):
print("\t\t\tHorizontal: Center")
elif(horizontalFlags==2):
if(self.debugToTerminal==1):
print("\t\t\tHorizontal: Right")
elif(idBits==b'\x10\x03'): #XY Data Points
index=2
x=struct.unpack(">i",record[index:index+4])[0]
y=struct.unpack(">i",record[index+4:index+8])[0]
thisText.coordinates=[(x,y)]
if(self.debugToTerminal==1):
print("\t\t\tXY Point: "+str(x)+","+str(y))
elif(idBits==b'\x19\x06'): #Text String - also the last record in this element
textString = record[2::].decode('utf-8')
thisText.textString=textString
if(self.debugToTerminal==1):
print("\t\t\tText String: "+textString)
elif(idBits==b'\x11\x00'): #End Of Element
break;
return thisText
def readNode(self):
##reads in a node type structure = an electrical net
thisNode = GdsNode()
while 1:
record = self.readNextRecord()
idBits = record[0:2]
if(idBits==b'\x26\x01'): #ELFLAGS
elementFlags = struct.unpack(">h",record[2:4])[0]
thisNode.elementFlags=elementFlags
if(self.debugToTerminal==1):
print("\t\tElement Flags: "+str(elementFlags))
elif(idBits==b'\x2F\x03'): #PLEX
plex = struct.unpack(">i",record[2:6])[0]
thisNode.plex=plex
if(self.debugToTerminal==1):
print("\t\tPLEX: "+str(plex))
elif(idBits==b'\x0D\x02'): #Layer
drawingLayer = struct.unpack(">h",record[2:4])[0]
thisNode.drawingLayer=drawingLayer
if drawingLayer not in self.layoutObject.layerNumbersInUse:
self.layoutObject.layerNumbersInUse += [drawingLayer]
if(self.debugToTerminal==1):
print("\t\tDrawing Layer: "+str(drawingLayer))
elif(idBits==b'\x2A\x02'): #Node Type
nodeType = struct.unpack(">h",record[2:4])[0]
| |
<reponame>vinc3PO/PubChemPy<filename>pubchempy/compound.py<gh_stars>1-10
import json
from .functions import get_json, request, _parse_prop, request_SDS
from .decorators import deprecated, memoized_property
from .mapper import ELEMENTS, CoordinateType, BondType
from .errors import ResponseParseError, NotFoundError
from itertools import zip_longest
from .logger import createLogger
log = createLogger(__name__)
class Atom(object):
"""Class to represent an atom in a :class:`~pubchempy.Compound`."""
def __init__(self, aid, number, x=None, y=None, z=None, charge=0):
"""Initialize with an atom ID, atomic number, coordinates and optional change.
:param int aid: Atom ID
:param int number: Atomic number
:param float x: X coordinate.
:param float y: Y coordinate.
:param float z: (optional) Z coordinate.
:param int charge: (optional) Formal charge on atom.
"""
self.aid = aid
"""The atom ID within the owning Compound."""
self.number = number
"""The atomic number for this atom."""
self.x = x
"""The x coordinate for this atom."""
self.y = y
"""The y coordinate for this atom."""
self.z = z
"""The z coordinate for this atom. Will be ``None`` in 2D Compound records."""
self.charge = charge
"""The formal charge on this atom."""
def __repr__(self):
return 'Atom(%s, %s)' % (self.aid, self.element)
def __eq__(self, other):
return (isinstance(other, type(self)) and self.aid == other.aid and self.element == other.element and
self.x == other.x and self.y == other.y and self.z == other.z and self.charge == other.charge)
@deprecated('Dictionary style access to Atom attributes is deprecated')
def __getitem__(self, prop):
"""Allow dict-style access to attributes to ease transition from when atoms were dicts."""
if prop in {'element', 'x', 'y', 'z', 'charge'}:
return getattr(self, prop)
raise KeyError(prop)
@deprecated('Dictionary style access to Atom attributes is deprecated')
def __setitem__(self, prop, val):
"""Allow dict-style setting of attributes to ease transition from when atoms were dicts."""
setattr(self, prop, val)
@deprecated('Dictionary style access to Atom attributes is deprecated')
def __contains__(self, prop):
"""Allow dict-style checking of attributes to ease transition from when atoms were dicts."""
if prop in {'element', 'x', 'y', 'z', 'charge'}:
return getattr(self, prop) is not None
return False
@property
def element(self):
"""The element symbol for this atom."""
return ELEMENTS.get(self.number, None)
def to_dict(self):
"""Return a dictionary containing Atom data."""
data = {'aid': self.aid, 'number': self.number, 'element': self.element}
for coord in {'x', 'y', 'z'}:
if getattr(self, coord) is not None:
data[coord] = getattr(self, coord)
if self.charge is not 0:
data['charge'] = self.charge
return data
def set_coordinates(self, x, y, z=None):
"""Set all coordinate dimensions at once."""
self.x = x
self.y = y
self.z = z
@property
def coordinate_type(self):
"""Whether this atom has 2D or 3D coordinates."""
return '2d' if self.z is None else '3d'
class Bond(object):
"""Class to represent a bond between two atoms in a :class:`~pubchempy.Compound`."""
def __init__(self, aid1, aid2, order=BondType.SINGLE, style=None):
"""Initialize with begin and end atom IDs, bond order and bond style.
:param int aid1: Begin atom ID.
:param int aid2: End atom ID.
:param int order: Bond order.
"""
self.aid1 = aid1
"""ID of the begin atom of this bond."""
self.aid2 = aid2
"""ID of the end atom of this bond."""
self.order = order
"""Bond order."""
self.style = style
"""Bond style annotation."""
def __repr__(self):
return 'Bond(%s, %s, %s)' % (self.aid1, self.aid2, self.order)
def __eq__(self, other):
return (isinstance(other, type(self)) and self.aid1 == other.aid1 and self.aid2 == other.aid2 and
self.order == other.order and self.style == other.style)
@deprecated('Dictionary style access to Bond attributes is deprecated')
def __getitem__(self, prop):
"""Allow dict-style access to attributes to ease transition from when bonds were dicts."""
if prop in {'order', 'style'}:
return getattr(self, prop)
raise KeyError(prop)
@deprecated('Dictionary style access to Bond attributes is deprecated')
def __setitem__(self, prop, val):
"""Allow dict-style setting of attributes to ease transition from when bonds were dicts."""
setattr(self, prop, val)
@deprecated('Dictionary style access to Atom attributes is deprecated')
def __contains__(self, prop):
"""Allow dict-style checking of attributes to ease transition from when bonds were dicts."""
if prop in {'order', 'style'}:
return getattr(self, prop) is not None
return False
@deprecated('Dictionary style access to Atom attributes is deprecated')
def __delitem__(self, prop):
"""Delete the property prop from the wrapped object."""
if not hasattr(self.__wrapped, prop):
raise KeyError(prop)
delattr(self.__wrapped, prop)
def to_dict(self):
"""Return a dictionary containing Bond data."""
data = {'aid1': self.aid1, 'aid2': self.aid2, 'order': self.order}
if self.style is not None:
data['style'] = self.style
return data
class Compound(object):
"""Corresponds to a single record from the PubChem Compound database.
The PubChem Compound database is constructed from the Substance database using a standardization and deduplication
process. Each Compound is uniquely identified by a CID.
"""
def __init__(self, record):
"""Initialize with a record dict from the PubChem PUG REST service.
For most users, the ``from_cid()`` class method is probably a better way of creating Compounds.
:param dict record: A compound record returned by the PubChem PUG REST service.
"""
self._record = None
self._atoms = {}
self._bonds = {}
self.record = record
@property
def record(self):
"""The raw compound record returned by the PubChem PUG REST service."""
return self._record
@record.setter
def record(self, record):
self._record = record
#log.debug('Created %s' % self)
self._setup_atoms()
self._setup_bonds()
def _setup_atoms(self):
"""Derive Atom objects from the record."""
# Delete existing atoms
self._atoms = {}
# Create atoms
aids = self.record['atoms']['aid']
elements = self.record['atoms']['element']
if not len(aids) == len(elements):
raise ResponseParseError('Error parsing atom elements')
for aid, element in zip(aids, elements):
self._atoms[aid] = Atom(aid=aid, number=element)
# Add coordinates
if 'coords' in self.record:
coord_ids = self.record['coords'][0]['aid']
xs = self.record['coords'][0]['conformers'][0]['x']
ys = self.record['coords'][0]['conformers'][0]['y']
zs = self.record['coords'][0]['conformers'][0].get('z', [])
if not len(coord_ids) == len(xs) == len(ys) == len(self._atoms) or (zs and not len(zs) == len(coord_ids)):
raise ResponseParseError('Error parsing atom coordinates')
for aid, x, y, z in zip_longest(coord_ids, xs, ys, zs):
self._atoms[aid].set_coordinates(x, y, z)
# Add charges
if 'charge' in self.record['atoms']:
for charge in self.record['atoms']['charge']:
self._atoms[charge['aid']].charge = charge['value']
def _setup_bonds(self):
"""Derive Bond objects from the record."""
self._bonds = {}
if 'bonds' not in self.record:
return
# Create bonds
aid1s = self.record['bonds']['aid1']
aid2s = self.record['bonds']['aid2']
orders = self.record['bonds']['order']
if not len(aid1s) == len(aid2s) == len(orders):
raise ResponseParseError('Error parsing bonds')
for aid1, aid2, order in zip(aid1s, aid2s, orders):
self._bonds[frozenset((aid1, aid2))] = Bond(aid1=aid1, aid2=aid2, order=order)
# Add styles
if 'coords' in self.record and 'style' in self.record['coords'][0]['conformers'][0]:
aid1s = self.record['coords'][0]['conformers'][0]['style']['aid1']
aid2s = self.record['coords'][0]['conformers'][0]['style']['aid2']
styles = self.record['coords'][0]['conformers'][0]['style']['annotation']
for aid1, aid2, style in zip(aid1s, aid2s, styles):
self._bonds[frozenset((aid1, aid2))].style = style
@classmethod
def from_cid(cls, cid, **kwargs):
"""Retrieve the Compound record for the specified CID.
Usage::
c = Compound.from_cid(6819)
:param int cid: The PubChem Compound Identifier (CID).
"""
record = json.loads(request(cid, **kwargs).read().decode())['PC_Compounds'][0]
return cls(record)
def __repr__(self):
return 'Compound(%s)' % self.cid if self.cid else 'Compound()'
def __eq__(self, other):
return isinstance(other, type(self)) and self.record == other.record
def to_dict(self, properties=None):
"""Return a dictionary containing Compound data. Optionally specify a list of the desired properties.
synonyms, aids and sids are not included unless explicitly specified using the properties parameter. This is
because they each require an extra request.
"""
if not properties:
skip = {'aids', 'sids', 'synonyms'}
properties = [p for p in dir(Compound) if isinstance(getattr(Compound, p), property) and p not in skip]
return {p: [i.to_dict() for i in getattr(self, p)] if p in {'atoms', 'bonds'} else getattr(self, p) for p in properties}
def to_series(self, properties=None):
"""Return a pandas :class:`~pandas.Series` containing Compound data. Optionally specify a list of the desired
properties.
synonyms, aids and sids are not included unless explicitly specified using the properties parameter. This is
because they each require an extra request.
"""
import pandas as pd
return pd.Series(self.to_dict(properties))
@property
def cid(self):
"""The PubChem Compound Identifier (CID).
.. note::
When searching using a SMILES or InChI query that is not present in the PubChem Compound database, an
automatically generated record may be returned that contains properties that have been calculated on the
fly. These records will not have a CID property.
"""
if 'id' in self.record and 'id' in self.record['id'] and 'cid' in self.record['id']['id']:
return self.record['id']['id']['cid']
@property
def elements(self):
"""List of element symbols for atoms in this Compound."""
return [a.element for a in self.atoms]
@property
def atoms(self):
"""List of :class:`Atoms <pubchempy.Atom>` in this Compound."""
return sorted(self._atoms.values(), key=lambda x: x.aid)
@property
def bonds(self):
"""List of :class:`Bonds <pubchempy.Bond>` between :class:`Atoms <pubchempy.Atom>` in this Compound."""
return sorted(self._bonds.values(), key=lambda x: (x.aid1, x.aid2))
@memoized_property
def synonyms(self):
| |
<reponame>JacobMSD/ef_python<filename>SpatialMesh.py
import sys
import h5py
import numpy as np
from math import ceil
from Vec3d import Vec3d
from common import production_assert
class SpatialMesh():
def __init__( self ):
pass
@classmethod
def init_from_config( cls, conf ):
new_obj = cls()
new_obj.check_correctness_of_related_config_fields( conf )
new_obj.init_x_grid( conf )
new_obj.init_y_grid( conf )
new_obj.init_z_grid( conf )
new_obj.allocate_ongrid_values()
new_obj.fill_node_coordinates()
new_obj.set_boundary_conditions( conf )
return new_obj
@classmethod
def init_from_h5( cls, h5group ):
new_obj = cls()
new_obj.x_volume_size = h5group.attrs["x_volume_size"]
new_obj.y_volume_size = h5group.attrs["y_volume_size"]
new_obj.z_volume_size = h5group.attrs["z_volume_size"]
new_obj.x_cell_size = h5group.attrs["x_cell_size"]
new_obj.y_cell_size = h5group.attrs["y_cell_size"]
new_obj.z_cell_size = h5group.attrs["z_cell_size"]
new_obj.x_n_nodes = h5group.attrs["x_n_nodes"]
new_obj.y_n_nodes = h5group.attrs["y_n_nodes"]
new_obj.z_n_nodes = h5group.attrs["z_n_nodes"]
#
# todo: don't allocate. read into flat arrays. then reshape
new_obj.allocate_ongrid_values()
#
dim = new_obj.node_coordinates.size
tmp_x = np.empty( dim, dtype = 'f8' )
tmp_y = np.empty_like( tmp_x )
tmp_z = np.empty_like( tmp_x )
#
tmp_x = h5group["./node_coordinates_x"]
tmp_y = h5group["./node_coordinates_y"]
tmp_z = h5group["./node_coordinates_z"]
for global_idx, (vx, vy, vz) in enumerate( zip( tmp_x, tmp_y, tmp_z ) ):
# todo: highly nonoptimal; make view or reshape?
i, j, k = new_obj.global_idx_to_node_ijk( global_idx )
new_obj.node_coordinates[i][j][k] = Vec3d( vx, vy, vz )
#
tmp_rho = h5group["./charge_density"]
tmp_phi = h5group["./potential"]
for global_idx, (rho, phi) in enumerate( zip( tmp_rho, tmp_phi ) ):
i, j, k = new_obj.global_idx_to_node_ijk( global_idx )
new_obj.charge_density[i][j][k] = rho
new_obj.potential[i][j][k] = phi
#
tmp_x = h5group["./electric_field_x"]
tmp_y = h5group["./electric_field_y"]
tmp_z = h5group["./electric_field_z"]
for global_idx, (vx, vy, vz) in enumerate( zip( tmp_x, tmp_y, tmp_z ) ):
i, j, k = new_obj.global_idx_to_node_ijk( global_idx )
new_obj.electric_field[i][j][k] = Vec3d( vx, vy, vz )
#
return new_obj
def allocate_ongrid_values( self ):
nx = self.x_n_nodes
ny = self.y_n_nodes
nz = self.z_n_nodes
self.node_coordinates = np.empty( (nx, ny, nz), dtype=object )
self.charge_density = np.zeros( (nx, ny, nz), dtype='f8' )
self.potential = np.zeros( (nx, ny, nz), dtype='f8' )
self.electric_field = np.full( (nx, ny, nz), Vec3d.zero(), dtype=object )
def check_correctness_of_related_config_fields( self, conf ):
self.grid_x_size_gt_zero( conf )
self.grid_x_step_gt_zero_le_grid_x_size( conf )
self.grid_y_size_gt_zero( conf )
self.grid_y_step_gt_zero_le_grid_y_size( conf )
self.grid_z_size_gt_zero( conf )
self.grid_z_step_gt_zero_le_grid_z_size( conf )
def init_x_grid( self, conf ):
spat_mesh_conf = conf["Spatial mesh"]
self.x_volume_size = spat_mesh_conf.getfloat("grid_x_size")
self.x_n_nodes = ceil( spat_mesh_conf.getfloat("grid_x_size") /
spat_mesh_conf.getfloat("grid_x_step") ) + 1
self.x_cell_size = self.x_volume_size / ( self.x_n_nodes - 1 )
if ( self.x_cell_size != spat_mesh_conf.getfloat("grid_x_step") ):
print( "X_step was shrinked to {:.3f} from {:.3f} "
"to fit round number of cells".format(
self.x_cell_size, spat_mesh_conf.getfloat("grid_x_step") ) )
def init_y_grid( self, conf ):
spat_mesh_conf = conf["Spatial mesh"]
self.y_volume_size = spat_mesh_conf.getfloat("grid_y_size")
self.y_n_nodes = ceil( spat_mesh_conf.getfloat("grid_y_size") /
spat_mesh_conf.getfloat("grid_y_step") ) + 1
self.y_cell_size = self.y_volume_size / ( self.y_n_nodes - 1 )
if ( self.y_cell_size != spat_mesh_conf.getfloat("grid_y_step") ):
print( "Y_step was shrinked to {:.3f} from {:.3f} "
"to fit round number of cells".format(
self.y_cell_size, spat_mesh_conf.getfloat("grid_y_step") ) )
def init_z_grid( self, conf ):
spat_mesh_conf = conf["Spatial mesh"]
self.z_volume_size = spat_mesh_conf.getfloat("grid_z_size")
self.z_n_nodes = ceil( spat_mesh_conf.getfloat("grid_z_size") /
spat_mesh_conf.getfloat("grid_z_step") ) + 1
self.z_cell_size = self.z_volume_size / ( self.z_n_nodes - 1 )
if ( self.z_cell_size != spat_mesh_conf.getfloat("grid_z_step") ):
print( "Z_step was shrinked to {:.3f} from {:.3f} "
"to fit round number of cells".format(
self.z_cell_size, spat_mesh_conf.getfloat("grid_z_step") ) )
def fill_node_coordinates( self ):
for i in range( self.x_n_nodes ):
for j in range( self.y_n_nodes ):
for k in range( self.z_n_nodes ):
self.node_coordinates[i][j][k] = Vec3d(
i * self.x_cell_size, j * self.y_cell_size, k * self.z_cell_size )
def clear_old_density_values( self ):
self.charge_density.fill( 0 )
def set_boundary_conditions( self, conf ):
phi_left = conf["Boundary conditions"].getfloat("boundary_phi_left")
phi_right = conf["Boundary conditions"].getfloat("boundary_phi_right")
phi_top = conf["Boundary conditions"].getfloat("boundary_phi_top")
phi_bottom = conf["Boundary conditions"].getfloat("boundary_phi_bottom")
phi_near = conf["Boundary conditions"].getfloat("boundary_phi_near")
phi_far = conf["Boundary conditions"].getfloat("boundary_phi_far")
#
nx = self.x_n_nodes
ny = self.y_n_nodes
nz = self.z_n_nodes
for i in range( nx ):
for k in range( nz ):
self.potential[i][0][k] = phi_bottom
self.potential[i][ny-1][k] = phi_top
for j in range( ny ):
for k in range( nz ):
self.potential[0][j][k] = phi_right
self.potential[nx-1][j][k] = phi_left
for i in range( nx ):
for j in range( ny ):
self.potential[i][j][0] = phi_near
self.potential[i][j][nz-1] = phi_far
def is_potential_equal_on_boundaries( self ):
nx = self.x_n_nodes
ny = self.y_n_nodes
nz = self.z_n_nodes
return \
( self.potential[0][2][2] == self.potential[nx-1][2][2] == \
self.potential[2][0][2] == self.potential[2][ny-1][2] == \
self.potential[2][2][0] == self.potential[2][2][nz-1] )
def print( self ):
self.print_grid()
self.print_ongrid_values()
def print_grid( self ):
print( "Grid:" )
print( "Length: x = {:.3f}, y = {:.3f}, z = {:.3f}".format(
self.x_volume_size, self.y_volume_size, self.z_volume_size ) )
print( "Cell size: x = {:.3f}, y = {:.3f}, z = {:.3f}".format(
self.x_cell_size, self.y_cell_size, self.z_cell_size ) )
print( "Total nodes: x = {:d}, y = {:d}, z = {:d}".format(
self.x_n_nodes, self.y_n_nodes, self.z_n_nodes ) )
def print_ongrid_values( self ):
nx = self.x_n_nodes
ny = self.y_n_nodes
nz = self.z_n_nodes
print( "x_node y_node z_node | "
"charge_density | potential | electric_field(x,y,z)" )
for i in range( nx ):
for j in range( ny ):
for k in range( nz ):
"{:8d} {:8d} {:8d} | "
"{:14.3f} | {:14.3f} | "
"{:14.3f} {:14.3f} {:14.3f}".format(
i, j, k,
self.charge_density[i][j][k],
self.potential[i][j][k],
self.electric_field[i][j][k].x,
self.electric_field[i][j][k].y,
self.electric_field[i][j][k].z )
def write_to_file( self, h5file ):
groupname = "/Spatial_mesh";
h5group = h5file.create_group( groupname )
self.write_hdf5_attributes( h5group )
self.write_hdf5_ongrid_values( h5group )
def write_hdf5_attributes( self, h5group ):
h5group.attrs.create( "x_volume_size", self.x_volume_size )
h5group.attrs.create( "y_volume_size", self.y_volume_size )
h5group.attrs.create( "z_volume_size", self.z_volume_size )
h5group.attrs.create( "x_cell_size", self.x_cell_size )
h5group.attrs.create( "y_cell_size", self.y_cell_size )
h5group.attrs.create( "z_cell_size", self.z_cell_size )
h5group.attrs.create( "x_n_nodes", self.x_n_nodes )
h5group.attrs.create( "y_n_nodes", self.y_n_nodes )
h5group.attrs.create( "z_n_nodes", self.z_n_nodes )
def write_hdf5_ongrid_values( self, h5group ):
# todo: without compound datasets
# there is this copying problem.
dim = self.node_coordinates.size
tmp_x = np.empty( dim, dtype = 'f8' )
tmp_y = np.empty_like( tmp_x )
tmp_z = np.empty_like( tmp_x )
# todo: make view instead of copy
flat_node_coords = self.node_coordinates.ravel( order = 'C' )
print( len( flat_node_coords ), dim )
for i, v in enumerate( flat_node_coords ):
tmp_x[i] = v.x
tmp_y[i] = v.y
tmp_z[i] = v.z
h5group.create_dataset( "./node_coordinates_x", data = tmp_x )
h5group.create_dataset( "./node_coordinates_y", data = tmp_y )
h5group.create_dataset( "./node_coordinates_z", data = tmp_z )
# C (C-order): index along the first axis varies slowest
# in self.node_coordinates.flat above default order is C
flat_phi = self.potential.ravel( order = 'C' )
h5group.create_dataset( "./potential", data = flat_phi )
flat_rho = self.charge_density.ravel( order = 'C' )
h5group.create_dataset( "./charge_density", data = flat_rho )
#
flat_field = self.electric_field.ravel( order = 'C' )
for i, v in enumerate( flat_field ):
tmp_x[i] = v.x
tmp_y[i] = v.y
tmp_z[i] = v.z
h5group.create_dataset( "./electric_field_x", data = tmp_x )
h5group.create_dataset( "./electric_field_y", data = tmp_y )
h5group.create_dataset( "./electric_field_z", data = tmp_z )
def grid_x_size_gt_zero( self, conf ):
production_assert( conf["Spatial mesh"].getfloat("grid_x_size") > 0,
"grid_x_size < 0" )
def grid_x_step_gt_zero_le_grid_x_size( self, conf ):
production_assert(
( conf["Spatial mesh"].getfloat("grid_x_step") > 0 ) and
( conf["Spatial mesh"].getfloat("grid_x_step") <=
conf["Spatial mesh"].getfloat("grid_x_size") ),
"grid_x_step < 0 or grid_x_step >= grid_x_size" )
def grid_y_size_gt_zero( self, conf ):
production_assert(
conf["Spatial mesh"].getfloat("grid_y_size") > 0,
"grid_y_size < 0" )
def grid_y_step_gt_zero_le_grid_y_size( self, conf ):
production_assert(
( conf["Spatial mesh"].getfloat("grid_y_step") > 0 ) and
( conf["Spatial mesh"].getfloat("grid_y_step") <=
conf["Spatial mesh"].getfloat("grid_y_size") ),
"grid_y_step < 0 or grid_y_step >= grid_y_size" )
def grid_z_size_gt_zero( self, conf ):
production_assert( conf["Spatial mesh"].getfloat("grid_z_size") > 0,
"grid_z_size < 0" )
def grid_z_step_gt_zero_le_grid_z_size( self, conf ):
production_assert(
( conf["Spatial mesh"].getfloat("grid_z_step") > 0 ) and
( conf["Spatial mesh"].getfloat("grid_z_step") <=
conf["Spatial mesh"].getfloat("grid_z_size") ),
"grid_z_step < 0 or grid_z_step >= grid_z_size" )
def node_number_to_coordinate_x( self, i ):
if i >= 0 and i < self.x_n_nodes:
return i * self.x_cell_size
else:
print( "invalid node number i={:d} "
"at node_number_to_coordinate_x".format( i ) )
sys.exit( -1 )
def node_number_to_coordinate_y( self, j ):
if j >= 0 and j < self.y_n_nodes:
return j * self.y_cell_size
else:
print( "invalid node number j={:d} "
"at node_number_to_coordinate_y".format( j ) )
sys.exit( -1 )
def node_number_to_coordinate_z( self, k ):
if k >= 0 and k < self.z_n_nodes:
return k * self.z_cell_size
else:
print( "invalid node number k={:d} "
"at node_number_to_coordinate_z".format( k ) )
sys.exit( -1 )
def global_idx_to_node_ijk( self, global_idx ):
# In row-major order: (used to save on disk)
# global_index = i * nz * ny +
# j * nz +
# k
#
nx = self.x_n_nodes
| |
bot = ty[i][0]
if top-bot < 0.1:
frac = 0.5
else:
frac = 0.8
arrowheights.append(top - (top-bot)*frac)
for i in range(2*numpops-1):
period = lastperiod[i]
arrowheight = max(popbox[i][0][1],arrowheights[period] -periodposcount[period]*2*arrowheightinc)
head = [confint[i][0],arrowheight]
tail = [popbox[i][1][0],arrowheight]
arrowa(head,tail,2,color,graylevel)
head = [confint[i][1],arrowheight]
tail = [popbox[i][1][0],arrowheight]
arrowa(head,tail,0, color, graylevel)
periodposcount[period] += 1
if scaledpop != [] :
ane = scaledpop[rootpop]/1000
anes = fround(ane)
dotext([0.15,0.05]," Ancestral Ne (thousands): " + anes,0, False)
else :
dotext([0.15,0.05]," Ancestral 4Nu: " + str(slist[4][4][rootpop][1]),0, False)
if simplecolor:
w("0 0 0 setrgbcolor")
return popbox
def set_tlines(ty,numpops,scaledtime, lastt_lower_y):
"""
line0y - default height of time 0
eventimes - if True, space split times evenly
lastt_lower_y - height of oldest split time, by default is 1/(numpops+1), else can be set by user
"""
tmax = tlowest = slist[5][4][numpops-2][3] ## bottom of confidence interval of lowest t
t = []
for i in range(numpops-1):
t.append([slist[5][4][i][1],slist[5][4][i][2],slist[5][4][i][3]]) ## [time, upper ci, lower ci]
ty = []
if localyscale == -1:
yint = line0y - lastt_lower_y
for i in range(numpops-1):
ty.append([])
if eventimes == False:
for j in range(3):
ty[i].append(line0y - (t[i][j]*yint)/tmax)
else:
ty[i].append(line0y - ((i+1)/float(numpops+1)*yint)/tmax)
else :
timeumean = slist[7][4][1]
scaleumean = slist[7][4][2]
for i in range(numpops-1):
ty.append([])
for j in range(3):
ty[i].append(line0y - (t[i][j] * (scaleumean/timeumean/1e6)* localyscale))
if ty[i][j] < lineINFy :
print " time line too low in graph, reduce local y scale (-y value) "
lastt_lower_y = ty[numpops-2][2]
## print "ty : ",ty
return ty, lastt_lower_y
def print_tlines(ty,numpops,scaledtime, farright):
""" print the split time lines and confidence interval lines """
xinc = 0.005
if(scaledtime != []):
if max(scaledtime)/1e6 < 1.0:
yearscaler = 1e3
yearscalestring = " KYR"
else:
yearscaler = 1e6
yearscalestring = " MYR"
if eventimes == False:
for i in range(numpops-1):
if (ty[i][1] > ty[i][0]):
yline(ty[i][1],farright,1,2,graylevel)
yline(ty[i][0],farright,1,0,0)
if (ty[i][2] < ty[i][0]):
yline(ty[i][2],farright,1,2,graylevel)
if(scaledtime != []):
scaledtime[i] /= yearscaler
mtime = round(scaledtime[i],-int(math.log10(scaledtime[i])-2))
nstr = str(mtime) + yearscalestring
## str(int(round(scaledtime[i],-int(math.log10(scaledtime[i])-2)))) + " yrs"
dotext([xinc*(i+2),ty[i][0]+0.001],nstr,0, False)
else :
nstr = fround(slist[5][4][i][1]) + "tu"
dotext([xinc*(i+2),ty[i][0]+0.001],nstr,0, False)
if (ty[i][1] > ty[i][0]):
arrowa([xinc*(i+1),ty[i][1]],[xinc*(i+1),ty[i][0]],1, black, graylevel)
if (ty[i][2] < ty[i][0]):
arrowa([xinc*(i+1),ty[i][2]],[xinc*(i+1),ty[i][0]],3, black, graylevel)
else:
for i in range(numpops-1):
yline(ty[i][0],farright,1,0,0)
if(scaledtime != []):
scaledtime[i] /= yearscaler
mtime = round(scaledtime[i],-int(math.log10(scaledtime[i])-2))
nstr = str(mtime) + yearscalestring
## str(int(round(scaledtime[i],-int(math.log10(scaledtime[i])-2)))) + " yrs"
dotext([xinc*(i+2),ty[i][0]+0.001],nstr,0, False)
else :
nstr = fround(slist[5][4][i][1]) + "tu"
dotext([xinc*(i+2),ty[i][0]+0.001],nstr,0, False)
return ty
def print_mcurves(slist,numpops, popbox, plist, color):
"""migration arrows:
note - migration arrows are drawn in the forward direction!!
likelihood ratio=ratio of the highest probability to the probability at 2NM = 0
Sinficant likelihood ratios:
2.70554 at p=0.05 The ratio of probabilities (as opposed to twice the log ratio) is 3.86813
5.41189 at p = 0.01 the ratio of prbabilities is 14.9685
9.54954 at p = 0.001 the ration of probabilities is 118.483
3.86813 <= ratio <= 14.9685 upper arrow is a dash (0.95 on chi square 50% 0.0 and 50% 1df)
14.9685 <= ratio <= 118.483 upper arrow is a dotted (0.99 on chi square 50% 0.0 and 50% 1df)
118.483 <= ratio upper arrow is a solid line (0.999 on chi square 50% 0.0 and 50% 1df)
list of things in miginfo[i]
0 topop
1 frompop
2 direction
3 period
4 the number in this period
5 2NM est
6 log likelihood ratio stat
also save # events to print in the period"""
def checkm(val2NM, llr):
return (moption == 'a' and val2NM > min2NM) or \
(moption == 's' and llr >= 2.74) or \
val2NM > moption
mperiodnum = [0]*(numpops-1)
if len(slist[6]) > 4:
sml = slist[6][4]
miginfo = []
mi = 0
for i in range(len(sml)):
## pratio = sml[i][3]/sml[i][2]
## llr = 2*math.log(pratio)
## alternate code to get values from Marginal peak location tables
llr = sml[i][2]
if checkm(sml[i][1],llr) :
miginfo.append([])
c1 = max(sml[i][0].find("M"),sml[i][0].find("m")) ## either upper of lower case
c2 = sml[i][0].find(">")
miginfo[mi].append(int(sml[i][0][c2+1:len(sml[i][0])]))
miginfo[mi].append(int(sml[i][0][c1+1:c2]))
found1 = False
found2 = False
p = 0
while 1 :
for j in range(len(plist[p])):
if plist[p][j] == miginfo[mi][0]:
found1 = True
if found2 :
direction = 2
else:
direction = 0
if plist[p][j] == miginfo[mi][1]:
found2 = True
if found1 and found2 :
break
else:
p += 1
miginfo[mi].append(direction)
miginfo[mi].append(p)
miginfo[mi].append(mperiodnum[p])
mperiodnum[p] += 1
miginfo[mi].append(sml[i][1])
miginfo[mi].append(llr)
mi += 1
mboxfrac = 0.3
## set height of curves
y = []
for i in range(len(miginfo)):
frompop = miginfo[i][0]
period = miginfo[i][3]
hi = popbox[frompop][1][1]
for j in range (len(plist[period])):
if hi > popbox[plist[period][j]][1][1] :
hi = popbox[plist[period][j]][1][1]
lo = 0
for j in range (len(plist[period])):
if lo < popbox[plist[period][j]][0][1] :
lo = popbox[plist[period][j]][0][1]
y.append(hi - (hi - lo)*(miginfo[i][4]+1)/(mperiodnum[miginfo[i][3]]+1))
for i in range(len(miginfo)):
frompop = miginfo[i][0]
topop = miginfo[i][1]
period = miginfo[i][3]
direc = miginfo[i][2]
val2NM = fround(miginfo[i][5])
if miginfo[i][6] >= 2.70554 and miginfo[i][6] < 5.41189 :
val2NM += "*"
if miginfo[i][6] >= 5.41189 and miginfo[i][6] < 9.54954 :
val2NM += "**"
if miginfo[i][6] >= 9.54954 :
val2NM += "***"
text2NMwidth = textwide(val2NM,2.5)
if direc == 0 :
tailx = popbox[frompop][1][0] - (popbox[frompop][1][0]-popbox[frompop][0][0])*mboxfrac
headx = popbox[topop][0][0] + (popbox[topop][1][0] - popbox[topop][0][0]) * mboxfrac
if (text2NMwidth > abs(tailx-headx)):
tailx -= (text2NMwidth - abs(tailx-headx))/2
headx += (text2NMwidth - abs(tailx-headx))/2
if direc == 2:
tailx = popbox[frompop][0][0] + (popbox[frompop][1][0] - popbox[frompop][0][0]) * mboxfrac
headx = popbox[topop][1][0] - (popbox[topop][1][0]-popbox[topop][0][0])* mboxfrac
if (text2NMwidth > abs(tailx-headx)):
tailx += (text2NMwidth - abs(tailx-headx))/2
headx -= (text2NMwidth - abs(tailx-headx))/2
migrationcurvearrow(val2NM,[headx,y[i]],[tailx,y[i]],direc,red)
##***********************************************************************************
##////////////// Command line use ///////////////////////////////////////////////////
##***********************************************************************************
def scancommandline():
""" command line consists of flags, each with a dash, '-', followed immediately by a letter
some flags should be followed by a value, depending on the flag. The value can be placed
immediately after the flag or spaces can be inserted """
def aflag ():
global label_a_pops
label_a_pops = True
def bflag (tempval):
global popboxspaceadj
popboxspaceadj = float(tempval)
def dflag ():
global skipdemographicscaling
skipdemographicscaling = True
def eflag():
global eventimes
eventimes = True
def iflag (tempname):
global imfilename
imfilename = tempname
def oflag (tempname):
global outputfilename
outputfilename= tempname
def gflag (tempval):
global globalscale
globalscale = float(tempval)
def xflag (tempval):
global localxscale
localxscale = float(tempval)
def yflag (tempval):
global localyscale
localyscale = float(tempval)
def hflag (tempval):
global arrowheightadj
arrowheightadj = float(tempval)
def fflag(tempval):
global font, bifont
font = tempval
bifont = font + "-BoldItalic"
def mflag(tempval):
global moption
if tempval[0].isdigit():
moption = float(tempval)
else:
moption = tempval
def pflag(tempval):
global fontsize
global fontfixed
fontsize = float(tempval)
fontfixed = True
def tflag(tempval):
global lastt_lower_y
global set_lastt_lower_y
lastt_lower_y = float(tempval)
set_lastt_lower_y = False
def sflag ():
global dosquare
global maximumxpoint
dosquare = True
maximumxpoint = 576.1
def uflag ():
global simplecolor
simplecolor = True
def vflag ():
global rgbcolor
rgbcolor = True
def removewhitespace(temps):
return "".join(temps.split())
def cleanarglist(arglist,flags_with_values,flags_without_values):
newarg = []
if arglist[0][0] != "-": # skip program name at beginning of list
arglist = arglist[1:]
ai = 0
while ai < len(arglist):
if removewhitespace(arglist[ai]) != "":
arglist[ai] = removewhitespace(arglist[ai])
else:
print "bad whitespace in command line: ",repr(" ",join(arglist))
sys.exit(1)
if arglist[ai][0] == '-' :
if arglist[ai][1] in flags_with_values and len(arglist[ai])==2: ## found a space in the command line
arglist[ai] = arglist[ai] + arglist[ai+1]
newarg.append(arglist[ai])
ai += 1
else:
newarg.append(arglist[ai])
else:
print "error on command line, \"-\" not found:",arglist[ai]
sys.exit(1)
ai += 1
return newarg
def checkallflags(flags_with_values,flags_withoutvalues,cldic):
"""
checks that flags_with_values,flags_withoutvalues and cldic all make use of the appropriate flags
"""
if len(set(flags_with_values).intersection(set(flags_without_values))) > 0:
print "error some flags appear in two lists of flags, with and without required values:",set(flags_with_values).intersection(set(flags_without_values))
sys.exit(1)
for flag in set(flags_with_values).union(set(flags_withoutvalues)):
if flag not in cldic:
print "error some flag mismatch between strings of flags and dictionary of flags:",flag
sys.exit(1)
return
cldic = {'a':aflag,'b':bflag,'d':dflag,'e':eflag,'f':fflag,\
'g':gflag,'h':hflag,'i':iflag,'m':mflag,'o':oflag,\
'p':pflag, 's':sflag, 't':tflag,'u':uflag,'v':vflag,\
'x':xflag,'y':yflag}
flags_with_values = "bfghimoptxy"
flags_without_values = "adesuv"
checkallflags(flags_with_values,flags_without_values,cldic)
argv = cleanarglist(sys.argv,flags_with_values,flags_without_values)
for i in range(0,len(argv)):
if argv[i][0] == '-' :
flaglet = argv[i][1].lower()
## print i, flaglet
if len(argv[i]) == 2 :
if i == (len(argv)-1):
cldic[flaglet]()
else :
if argv[i+1][0] == '-' :
cldic[flaglet]()
else :
cldic[flaglet](argv[i+1])
i += 1
else :
if (len(argv[i]) < 2):
print "problem on command line | |
<filename>Converse/dialog_tree/tree_manager.py
# Copyright (c) 2020, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
import logging
from collections import deque
from Converse.dialog_tree.dial_tree import TaskTree
from Converse.config.task_config import TaskConfig
log = logging.getLogger(__name__)
class TreeManager:
"""Handles all tree related operations including tree building and traversal.
For dial_state_manager.StateManager to take tree related operations.
After updating entity info on the tree, StateManager should call
TreeManager.traverse() to get the updated cur_node and cur_entity.
If there's new task, StateManager should call TreeManager.set_task(task_name) to
set the task and call TreeManager.traverse() to initialize the cur_node
and cur_entity.
If cur_task exceeds max turn, StateManager can force it to finish by calling
TreeManager.force_finish_task(). No need to call traverse() again after
calling force_finish_task().
StateManager should call reset_prev_task() each time after receiving information
from dial_policy.DialPolicy.
Attributes:
task_config (str): File path to task configuration yaml file.
task_tree (dial_tree.TaskTree): Task Tree built by the task file.
task_stack (:obj:'list' of :obj: 'str'): List of tasks in the order that
they are created.
cur_task (str): Current task name.
Can be modified by functions set_task, _switch_task, _check_task
cur_node (dial_tree.Leaf): Current node under the current task.
cur_entity (str): Current entity name under the current node.
finish (bool): True if current task tree is finished, false otherwise.
Can be modified by by functions _check_task, set_task
finished_node (:obj:'set' of :obj: 'dial_tree.AndNode'
/'dial_tree.OrNode'/'dial_tree.Leaf'): List of finished tree node.
Can be modified by by functions _unfinished_node, _switch_node, _set_entity
parent_dicts (:obj:'dict' of :obj: 'dial_tree.AndNode'
/'dial_tree.OrNode'/'dial_tree.Leaf'): Key is child node,
value is parrent node.
prev_tasks (list): Previously finished tasks in the order that they finished in.
prev_tasks_success (list): A list that is the same length as self.prev_tasks
where an entry is True when the task at the same index in self.prev_tasks
finished successfully and False otherwise.
"""
def __init__(self, task_config: TaskConfig):
self.task_path = task_config
self.task_tree = TaskTree(task_config)
self.task_stack = []
self.cur_task = None
self.cur_node = None
self.cur_entity = None
self.finish = False
self.finished_node = set()
self.parent_dicts = {}
self.prev_node = None
self.prev_tasks = []
self.prev_tasks_success = []
def update_entity(self, entity_value, status=True):
"""
Update entity value for the current entity.
"""
if not self.cur_node:
return
self.cur_node.info[self.cur_entity] = entity_value
if self.cur_node.cnt:
for en in self.cur_node.info:
self.cur_node.expand.add(en)
else:
self.cur_node.expand.add(self.cur_entity)
if status:
self.cur_node.verified.add(self.cur_entity)
if self.cur_entity in self.cur_node.wrong:
self.cur_node.wrong.remove(self.cur_entity)
elif entity_value is not None:
self.cur_node.wrong.add(self.cur_entity)
def set_current_entity(self):
"""For visualization"""
if not self.cur_node:
return
self.update_entity(None, False)
if self.prev_node:
self.prev_node.current = None
self.cur_node.current = self.cur_entity
self.prev_node = self.cur_node
else:
self.cur_node.current = self.cur_entity
self.prev_node = self.cur_node
@property
def prev_task_finished(self):
"""bool: True if the zeroth element in prev_tasks is in self.finished_node
and False otherwise.
"""
if not self.prev_tasks:
return False
prev_task = self.prev_tasks[0]
return (
prev_task in self.task_tree.root.child
and self.task_tree.root.child[prev_task] in self.finished_node
)
def reset_states(self):
"""
Reset all of the states in the tree manager.
"""
self.task_tree = TaskTree(self.task_path)
self.task_stack = []
self.cur_task = None
self.cur_node = None
self.cur_entity = None
self.finish = False
self.finished_node = set()
self.parent_dicts = {}
self.reset_prev_task()
def reset_prev_task(self):
"""
Resets the instance variables associated with the previous task.
"""
self.prev_tasks = []
self.prev_tasks_success = []
def next(self, asr_out):
"""
only for unit test
"""
self.unit_test_leaf_handler(asr_out)
res = self.traverse()
if res:
log.info(
"cur_task: %s,cur_node: %s, cur_entity: %s",
self.cur_task,
self.cur_node,
self.cur_entity,
)
log.info("task_stack %s", self.task_stack)
# print(self.parent_dicts)
def unit_test_leaf_handler(self, asr_out):
"""
only for unit test
"""
assert self.cur_task
assert self.cur_node
assert self.cur_entity
if asr_out == "yes":
self.update_entity("True")
if asr_out == "no":
self.update_entity("False", False)
if asr_out == "new task":
pass
def traverse(self):
"""The traversal function for dialogue manager.
Must use after setting cur_task.
You can use self.set_task to initialize cur_task.
Returns:
(
dial_tree.Leaf: cur_node,
str: cur_entity
)
"""
assert not self.finish, "current task is finished!"
assert self.cur_task, "no current task!"
if not self.cur_node:
self.cur_node = self.task_tree.root.child[self.cur_task]
self._set_entity()
self.set_current_entity()
return (self.cur_node, self.cur_entity)
else:
if self._unfinished_node(self.cur_node):
self._next_entity()
self.set_current_entity()
return (self.cur_node, self.cur_entity)
else:
self._check_task()
if self.finish:
self._empty_task_handler()
return None
else:
if self.cur_node:
self._switch_node()
else:
self.cur_node = self.task_tree.root.child[self.cur_task]
self._set_entity()
self.set_current_entity()
return (self.cur_node, self.cur_entity)
def _switch_node(self):
"""Switch self.cur_node from a leaf node to another leaf node.
In each call, either set self.cur_node to another node under
the same parent node, or set self.cur_node to current parent
node if there's no avaliable leaf node under the current parent node.
After using this function, should use _set_entity to set the first
entity under this node.
"""
self._check_task()
assert self.cur_task
assert self.cur_node
if self.cur_task not in self.parent_dicts:
self._parent_helper(self.cur_task)
p = self.parent_dicts[self.cur_task][self.cur_node]
p_type = p.__class__.__name__
if p_type == self.task_tree.and_node:
for c in p.child:
c_node = p.child[c]
if c_node(): # c_node is successed
continue
else: # c_node is failed
if c_node in self.finished_node: # c_node is seen
self.finished_node.add(p)
p.success = False
self.cur_node = p
self._switch_node()
return
else: # c_node is unseen
self.cur_node = c_node
return
# no avaliable node under current parent node
self.finished_node.add(p)
p.success = True
self.cur_node = p
if p.name == self.cur_task:
self.task_stack.pop()
self._switch_node()
if p_type == self.task_tree.or_node:
for c in p.child:
c_node = p.child[c]
if c_node():
self.finished_node.add(p)
p.success = True
self.cur_node = p
self._switch_node()
return
else:
if c_node in self.finished_node:
continue
else:
self.cur_node = c_node
return
self.finished_node.add(p)
p.success = False
self.cur_node = p
if p.name == self.cur_task:
self.task_stack.pop()
self._switch_node()
def _set_entity(self):
"""A recursive function to find next leaf node, set the next entity under current node.
Use after _switch_node unless initialize cur_entity.
If no available node/entity,
will return False and add related nodes to self.finished_node.
Return:
True if set cur_entity successfully;
False if there's no available node or entity.
"""
assert self.cur_node
node_type = self.cur_node.__class__.__name__
if (
(self.cur_node.name in self.task_tree.tasks)
and (self.task_stack[-1] != self.cur_node.name)
and (not self.cur_node())
and self.cur_node not in self.finished_node
):
self.task_stack.append(self.cur_node.name)
self.cur_task = self.task_stack[-1]
# and node
if node_type == self.task_tree.and_node:
rec_flag = False
for c in self.cur_node.child:
c_node = self.cur_node.child[c]
c_type = self.cur_node.child[c].__class__.__name__
if not c_node():
if c_node not in self.finished_node: # unseen and false
if c_type == self.task_tree.leaf_node:
self.cur_node = c_node
self._next_entity()
return True
else: # complex structure
rec_flag = True
self.cur_node = c_node
self._set_entity()
break
else: # seen and false
self.cur_node.success = False
self.finished_node.add(self.cur_node)
return False
else:
continue
if not rec_flag:
self.finished_node.add(self.cur_node)
return False
# or node
elif node_type == self.task_tree.or_node:
rec_flag = False
for c in self.cur_node.child:
c_node = self.cur_node.child[c]
c_type = self.cur_node.child[c].__class__.__name__
if not c_node():
if c_node not in self.finished_node: # unseen and false
if c_type == self.task_tree.leaf_node:
self.cur_node = c_node
self._next_entity()
return True
else: # complex structure
rec_flag = True
self.cur_node = c_node
self._set_entity()
break
else: # seen and false
continue
else:
self.cur_node.success = True
self.finished_node.add(self.cur_node)
return False
if not rec_flag:
self.finished_node.add(self.cur_node)
return False
# leaf node
elif node_type == self.task_tree.leaf_node:
self._next_entity()
return True
def _unfinished_node(self, node):
"""Check whether a leaf node is finished.
Used on leaf node.
If all the entities are seen:
the node is finished, return False
if some entities are seen,
some entities are wrong, and we need to verify all the entities:
the node is finished, return False
some entities are wrong, and the wrong count is greater than allowed:
the node is finished, return False
some entities are wrong, and the wrong count is smaller than allowed:
the node is unfinished, stay in the same node and go to next entity,
return True
if no entity is seen:
the node is unfinished,
stay in the same node and go to next entity,
return True
Return:
True if should stay in current node;
False if should go to next node.
"""
assert node.__class__.__name__ == self.task_tree.leaf_node
# when we have to verify all the entities in the group
if node.cnt == 0:
for en in node.info:
if en in node.wrong:
self.finished_node.add(node)
return False
else:
if not self.cur_node.info[en]:
return True
self.finished_node.add(node)
return False
# when we don't have to verify all the entities,
# we may stay at the current node and go to next entity
else:
allow_cant_verify = len(self.cur_node.info) - self.cur_node.cnt
unseen_entity_flag = False
verified_entity = 0
for en in node.info:
if not node.info[en]:
if en in node.wrong:
allow_cant_verify -= | |
<reponame>hyperion-ml/hyperion
"""
Copyright 2019 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import logging
import numpy as np
import torch
import torch.nn as nn
from torch.nn import Conv1d, Linear, BatchNorm1d
from ..layers import ActivationFactory as AF
from ..layers import NormLayer2dFactory as NLF
from ..layer_blocks import (
ResNetInputBlock,
ResNetBasicBlock,
ResNetBNBlock,
SEResNetBasicBlock,
SEResNetBNBlock,
Res2NetBasicBlock,
Res2NetBNBlock,
)
from ..layer_blocks import ResNetEndpointBlock
from .net_arch import NetArch
class ResNet(NetArch):
"""ResNet2D base class
Attributes:
block: resnet basic block type in ['basic', 'bn', 'sebasic', 'sebn'], meaning
basic resnet block, bottleneck resnet block, basic block with squeeze-excitation,
and bottleneck block with squeeze-excitation
num_layers: list with the number of layers in each of the 4 layer blocks that we find in
resnets, after each layer block feature maps are downsmapled times 2 in each dimension
and channels are upsampled times 2.
in_channels: number of input channels
conv_channels: number of output channels in first conv layer (stem)
base_channels: number of channels in the first layer block
out_units: number of logits in the output layer, if 0 there is no output layer and resnet is used just
as feature extractor, for example for x-vector encoder.
in_kernel_size: kernels size of first conv layer
hid_act: str or dictionary describing hidden activations.
out_act: output activation
zero_init_residual: initializes batchnorm weights to zero so each residual block behaves as identitiy at
the beggining. We observed worse results when using this option in x-vectors
groups: number of groups in convolutions
replace_stride_with_dilation: use dialted conv nets instead of downsammpling, we never tested this.
dropout_rate: dropout rate
norm_layer: norm_layer object or str indicating type layer-norm object, if None it uses BatchNorm2d
do_maxpool: if False, removes the maxpooling layer at the stem of the network.
in_norm: if True, adds another batch norm layer in the input
se_r: squeeze-excitation dimension compression
time_se: if True squeeze-excitation embedding is obtaining by averagin only in the time dimension,
instead of time-freq dimension or HxW dimensions
in_feats: input feature size (number of components in dimension of 2 of input tensor), this is only
required when time_se=True to calculcate the size of the squeeze excitation matrices.
"""
def __init__(
self,
block,
num_layers,
in_channels,
conv_channels=64,
base_channels=64,
out_units=0,
hid_act={"name": "relu6", "inplace": True},
out_act=None,
in_kernel_size=7,
in_stride=2,
zero_init_residual=False,
multilevel=False,
endpoint_channels=64,
groups=1,
replace_stride_with_dilation=None,
dropout_rate=0,
norm_layer=None,
norm_before=True,
do_maxpool=True,
in_norm=True,
se_r=16,
time_se=False,
in_feats=None,
res2net_scale=4,
res2net_width_factor=1,
):
super().__init__()
logging.info("{}".format(locals()))
self.block = block
self.has_se = False
self.is_res2net = False
if isinstance(block, str):
if block == "basic":
self._block = ResNetBasicBlock
elif block == "bn":
self._block = ResNetBNBlock
elif block == "sebasic":
self._block = SEResNetBasicBlock
self.has_se = True
elif block == "sebn":
self._block = SEResNetBNBlock
self.has_se = True
elif block == "res2basic":
self._block = Res2NetBasicBlock
self.is_res2net = True
elif block == "res2bn":
self._block = Res2NetBNBlock
self.is_res2net = True
elif block == "seres2bn" or block == "tseres2bn":
self._block = Res2NetBNBlock
self.has_se = True
self.is_res2net = True
else:
self._block = block
self.num_layers = num_layers
self.in_channels = in_channels
self.conv_channels = conv_channels
self.base_channels = base_channels
self.out_units = out_units
self.in_kernel_size = in_kernel_size
self.in_stride = in_stride
self.hid_act = hid_act
self.groups = groups
self.norm_before = norm_before
self.do_maxpool = do_maxpool
self.in_norm = in_norm
self.dropout_rate = dropout_rate
# self.width_per_group = width_per_group
self.se_r = se_r
self.time_se = time_se
self.in_feats = in_feats
self.res2net_scale = res2net_scale
self.res2net_width_factor = res2net_width_factor
self.multilevel = multilevel
self.endpoint_channels = endpoint_channels
self.norm_layer = norm_layer
norm_groups = None
if norm_layer == "group-norm":
norm_groups = min(base_channels // 2, 32)
norm_groups = max(norm_groups, groups)
self._norm_layer = NLF.create(norm_layer, norm_groups)
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.replace_stride_with_dilation = replace_stride_with_dilation
self.groups = groups
# self.width_per_group = width_per_group
if in_norm:
self.in_bn = norm_layer(in_channels)
self.in_block = ResNetInputBlock(
in_channels,
conv_channels,
kernel_size=in_kernel_size,
stride=in_stride,
activation=hid_act,
norm_layer=self._norm_layer,
norm_before=norm_before,
do_maxpool=do_maxpool,
)
self._context = self.in_block.context
self._downsample_factor = self.in_block.downsample_factor
self.cur_in_channels = conv_channels
self.layer1 = self._make_layer(self._block, base_channels, num_layers[0])
self.layer2 = self._make_layer(
self._block,
2 * base_channels,
num_layers[1],
stride=2,
dilate=replace_stride_with_dilation[0],
)
self.layer3 = self._make_layer(
self._block,
4 * base_channels,
num_layers[2],
stride=2,
dilate=replace_stride_with_dilation[1],
)
self.layer4 = self._make_layer(
self._block,
8 * base_channels,
num_layers[3],
stride=2,
dilate=replace_stride_with_dilation[2],
)
if self.multilevel:
self.endpoint2 = ResNetEndpointBlock(
2 * base_channels * self._block.expansion,
self.endpoint_channels,
1,
activation=self.hid_act,
norm_layer=self._norm_layer,
norm_before=self.norm_before,
)
self.endpoint3 = ResNetEndpointBlock(
4 * base_channels * self._block.expansion,
self.endpoint_channels,
2,
activation=self.hid_act,
norm_layer=self._norm_layer,
norm_before=self.norm_before,
)
self.endpoint4 = ResNetEndpointBlock(
8 * base_channels * self._block.expansion,
self.endpoint_channels,
4,
activation=self.hid_act,
norm_layer=self._norm_layer,
norm_before=self.norm_before,
)
self.with_output = False
self.out_act = None
if out_units > 0:
self.with_output = True
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.output = nn.Linear(self.cur_in_channels, out_units)
self.out_act = AF.create(out_act)
for m in self.modules():
if isinstance(m, nn.Conv2d):
act_name = "relu"
if isinstance(hid_act, str):
act_name = hid_act
if isinstance(hid_act, dict):
act_name = hid_act["name"]
if act_name == "swish":
act_name = "relu"
try:
nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity=act_name
)
except:
nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
self.zero_init_residual = zero_init_residual
if zero_init_residual:
for m in self.modules():
if isinstance(m, ResNetBNBlock):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, ResNetBasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, channels, num_blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
kwargs = {}
if self.has_se:
if self.time_se:
num_feats = int(self.in_feats / (self._downsample_factor * stride))
kwargs = {"se_r": self.se_r, "time_se": True, "num_feats": num_feats}
else:
kwargs = {"se_r": self.se_r}
if self.is_res2net:
kwargs["scale"] = self.res2net_scale
kwargs["width_factor"] = self.res2net_width_factor
layers = []
layers.append(
block(
self.cur_in_channels,
channels,
activation=self.hid_act,
stride=stride,
dropout_rate=self.dropout_rate,
groups=self.groups,
dilation=previous_dilation,
norm_layer=self._norm_layer,
norm_before=self.norm_before,
**kwargs
)
)
self._context += layers[0].context * self._downsample_factor
self._downsample_factor *= layers[0].downsample_factor
self.cur_in_channels = channels * block.expansion
for _ in range(1, num_blocks):
layers.append(
block(
self.cur_in_channels,
channels,
activation=self.hid_act,
dropout_rate=self.dropout_rate,
groups=self.groups,
dilation=self.dilation,
norm_layer=self._norm_layer,
norm_before=self.norm_before,
**kwargs
)
)
self._context += layers[-1].context * self._downsample_factor
return nn.Sequential(*layers)
def _compute_out_size(self, in_size):
"""Computes output size given input size.
Output size is not the same as input size because of
downsampling steps.
Args:
in_size: input size of the H or W dimensions
Returns:
output_size
"""
out_size = int((in_size - 1) // self.in_stride + 1)
if self.do_maxpool:
out_size = int((out_size - 1) // 2 + 1)
for i in range(3):
if not self.replace_stride_with_dilation[i]:
out_size = int((out_size - 1) // 2 + 1)
return out_size
def in_context(self):
"""
Returns:
Tuple (past, future) context required to predict one frame.
"""
return (self._context, self._context)
def in_shape(self):
"""
Returns:
Tuple describing input shape for the network
"""
return (None, self.in_channels, None, None)
def out_shape(self, in_shape=None):
"""Computes the output shape given the input shape
Args:
in_shape: input shape
Returns:
Tuple describing output shape for the network
"""
if self.with_output:
return (None, self.out_units)
if in_shape is None:
return (None, self.layer4[-1].out_channels, None, None)
assert len(in_shape) == 4
if in_shape[2] is None:
H = None
else:
H = self._compute_out_size(in_shape[2])
if in_shape[3] is None:
W = None
else:
W = self._compute_out_size(in_shape[3])
if self.multilevel:
return (in_shape[0], self.endpoint_channels, int(in_shape[2] // 2), None)
return (in_shape[0], self.layer4[-1].out_channels, H, W)
def forward(self, x, use_amp=False):
if use_amp:
with torch.cuda.amp.autocast():
return self._forward(x)
return self._forward(x)
def _forward(self, x):
"""forward function
Args:
x: input tensor of size=(batch, Cin, Hin, Win) for image or
size=(batch, C, freq, time) for audio
Returns:
Tensor with output logits of size=(batch, out_units) if out_units>0,
otherwise, it returns tensor of represeantions of size=(batch, Cout, Hout, Wout)
"""
if self.in_norm:
x = self.in_bn(x)
feats = []
x = self.in_block(x)
x = self.layer1(x)
x = self.layer2(x)
if self.multilevel:
feats.append(x)
x = self.layer3(x)
if self.multilevel:
feats.append(x)
x = self.layer4(x)
if self.multilevel:
feats.append(x)
if self.multilevel:
out2 = self.endpoint2(feats[0])
out3 = self.endpoint3(feats[1])
out4 = self.endpoint4(feats[2])
x = torch.mean(torch.stack([out2, out3, out4]), 0)
if self.with_output:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.output(x)
if self.out_act is not None:
x = self.out_act(x)
return x
def forward_hid_feats(self, x, layers=None, return_output=False):
"""forward function which also returns intermediate hidden representations
Args:
x: input tensor of size=(batch, Cin, | |
# Monitoring provides basic "is it up?" insight, along with performance data about how an installation is running.
# Note: Do NOT return a 'error' state when a 'warning' state would do.
# The system is coded to block on errors during starts / restarts. So, if the number of
# messages in a queue is astronomically high, but the system is still running, that is a warning.
# If an error is returned in this state, then on an upgrade to fix whatever the issue is will block and fail.
import copy
import datetime
import re
import socket
import string
import sys
import time
import traceback
import angel.util.terminal
import angel.util.network
import angel.stats.mem_stats
import angel.settings
import angel.constants
from devops.stats import *
from devops.unix_helpers import set_proc_title
def run_status_check(angel_obj, do_all_checks=False, do_state_checks=False, do_service_checks=False, check_only_these_services=None, format=None, interval=None, timeout=None):
''' Performs various status checks on the running system.
do_all_checks: flip this on to make sure all checks are run, so that in the future as we add additional check flags, they'll default on.
do_state_checks: check that the running services match what should be configured
do_service_checks: call status() on each running service, gathering health and performance data
check_only_these_services: if defined, and do_service_checks is true, only inspect the named services
* Note that checks that this function runs are expected to complete quickly and run as efficiently as possible;
* this function is run in a continuous loop by collectd and polled by nagios on every node in production.
* Please take care when adding any additional logic that it is as efficient as possible!
format:
"" / None -- default action is to print human-readable status info
"collectd" -- run in continuous mode for collectd with given interval (defaults to 10)
"nagios" -- output nagios-formatted output and return a valid nagios exit code
"errors-only" -- display only error info; return non-zero if errors or unknown state
"silent" -- don't output anything; just return an exit code
'''
if do_all_checks:
do_state_checks = do_service_checks = True
if interval is None:
interval = 10 # Used only in collectd currently
if format == '':
format = None
if timeout is None:
if format is None:
timeout = 10 # Most likely a command-line user
else:
timeout = 14 # Nagios nrpe is set to 15 seconds
if format == 'collectd':
try:
run_collectd_monitor(angel_obj, check_only_these_services, interval) # Will only return once services are stopped
if angel_obj.are_services_running():
print >>sys.stderr, "Error: run_collectd_monitor() unexpectedly returned!"
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(0)
except Exception as e:
print >>sys.stderr, "Error: run_collectd_monitor thew an exception(%s)." % e
sys.exit(1)
# For all other formats, we'll query status and generate output in the requested format.
# This function could use some clean-up / refactoring, but conceptually it's simple:
# 1) set up some common variables; 2) call status_check on all services; 3) generate the output.
# To-do: there's some odd rare network condition that causes a ~30 second delay in the following 3 lines
# even when services are stopped -- presumably hostname lookup stuff when DNS is unresolvable?
# Wasn't able to trace it further than this before networking resumed; so leaving this note here for now.
services_are_running = angel_obj.are_services_running()
running_services = sorted(angel_obj.get_running_service_names())
enabled_services = sorted(angel_obj.get_enabled_services())
running_unexpectedly = list(set(running_services) - set(enabled_services))
if not services_are_running:
running_unexpectedly = running_services
not_running_but_should_be = list(set(enabled_services) - set(running_services))
if 'devops' in not_running_but_should_be:
not_running_but_should_be.remove('devops')
left_column_width = 10
if len(running_services):
# Find the length of the longest service name:
left_column_width = max(left_column_width, 1 + max(map(len, running_services)))
# Default format (usually command line user) prints some info before checking each service status:
if format is None and do_state_checks:
_print_status_preamble(angel_obj, left_column_width)
if len(running_services) and do_service_checks:
print "-" * angel.util.terminal.terminal_width()
# Gather data for each service by calling their status() functions:
time_exceeded = False
stat_structs = {}
if do_service_checks:
start_time = time.time()
unused_ret_val, stat_structs = angel_obj.service_status(services_to_check=check_only_these_services, timeout=timeout)
end_time = time.time()
check_time = end_time - start_time
if check_time > timeout:
time_exceeded = True
if stat_structs is None:
print >>sys.stderr, "Error: service status struct invalid"
return angel.constants.STATE_UNKNOWN
# Run through the data for each status, checking it:
service_info = {}
status_seen_by_type = {}
status_data = {}
state_message = ''
if do_state_checks:
state_message = "%s %s" % (angel_obj.get_project_code_branch(), angel_obj.get_project_code_version())
if format == 'nagios':
if angel_obj.is_multinode_install() or True:
public_ip = angel_obj.get_public_ip_addr()
private_ip = angel_obj.get_private_ip_addr()
if private_ip != public_ip:
state_message += " on " + public_ip
def _merge_status_data(key_prefix, new_status_data):
for k in new_status_data:
new_key = "%s_%s" % (key_prefix, k)
if new_key in status_data:
print >>sys.stderr, "Warning: %s already in status_data?" % new_key
status_data[new_key] = new_status_data[k]
# Run through the results for each service, building up our results set:
for key in sorted(stat_structs):
if stat_structs[key] is None or not isinstance(stat_structs[key], dict):
# Then the given service failed to return anything from status() -- stub in an entry here:
stat_structs[key] = {}
stat_structs[key]['state'] = angel.constants.STATE_UNKNOWN
stat_structs[key]['message'] = 'Status check failed'
if time_exceeded:
stat_structs[key]['message'] = 'Status check failed or timed out'
try:
# Generate a lower-cased name of the service, without the word "service" in it:
this_service_name = '-'.join(re.findall('[A-Z][^A-Z]*', string.replace(key, 'Service', ''))).lower()
service_info[this_service_name] = {}
this_state = stat_structs[key]['state']
if this_state is None:
print >>sys.stderr, "Error: service %s failed to return a state code" % this_service_name
this_state = angel.constants.STATE_UNKNOWN
service_info[this_service_name]['state'] = this_state
status_seen_by_type[this_state] = True
this_message = 'Unknown'
if 'message' in stat_structs[key] and stat_structs[key]['message'] is not None:
this_message = stat_structs[key]['message']
if this_state != angel.constants.STATE_RUNNING_OK or do_state_checks is False:
if len(state_message):
state_message += ", "
if not (check_only_these_services is not None and 1 == len(check_only_these_services)):
# If we're only checking one service, don't preface the status message with the service name.
state_message += "%s: " % this_service_name
state_message += this_message.split("\n")[0]
try:
state_name = angel.constants.STATE_CODE_TO_TEXT[this_state]
except:
state_name = 'UNKNOWN(%s)' % this_state
format_str = "{:>%s}:{:>9} {}" % left_column_width
service_info[this_service_name]['message'] = format_str.format(this_service_name, state_name, this_message.split("\n")[0])
service_info[this_service_name]['message_raw'] = this_message.split("\n")[0]
if 'data' in stat_structs[key]:
_merge_status_data(this_service_name.lower(), stat_structs[key]['data'])
except:
print >>sys.stderr, "Error in status check %s: %s\n%s" % (key, sys.exc_info()[0], traceback.format_exc(sys.exc_info()[2]))
state_message += " error in %s status data" % (str(key))
status_seen_by_type[angel.constants.STATE_UNKNOWN] = True
# Reduce multiple status_codes down to one value for our exit_code. This isn't elegant, but it seems to be the cleanest way of managing this.
# Order of importance, most important to least important, in general:
# Decommissioned > Unknown > Error > Stopped > Starting|Stopping > Warn > Okay
# - If we're "ok" but the node is marked as in maintenance mode, we flip the level up one to warning.
# - If a service is in starting or stopping state, that masks any Warn level stuff.
# - If the single status code is stopped, but services are supposed to be running, then that's a real error.
extra_state_message = ''
if services_are_running:
if do_state_checks:
extra_state_message += " Running %s services" % len(running_services)
exit_code = angel.constants.STATE_RUNNING_OK
else:
exit_code = angel.constants.STATE_UNKNOWN
else:
exit_code = angel.constants.STATE_STOPPED
enabled_services_str = copy.copy(enabled_services)
try:
enabled_services_str.remove('devops')
except:
pass
enabled_services_str = ', '.join(enabled_services_str)
if angel_obj.is_decommissioned():
exit_code = angel.constants.STATE_DECOMMISSIONED
extra_state_message = ' DECOMMISSIONED'
elif angel.constants.STATE_UNKNOWN in status_seen_by_type:
exit_code = angel.constants.STATE_UNKNOWN
elif angel.constants.STATE_ERROR in status_seen_by_type:
exit_code = angel.constants.STATE_ERROR
elif angel.constants.STATE_STOPPED in status_seen_by_type:
exit_code = angel.constants.STATE_STOPPED
elif angel.constants.STATE_STARTING in status_seen_by_type:
exit_code = angel.constants.STATE_STARTING
elif angel.constants.STATE_STOPPING in status_seen_by_type:
exit_code = angel.constants.STATE_STOPPING
elif angel.constants.STATE_WARN in status_seen_by_type:
exit_code = angel.constants.STATE_WARN
elif angel.constants.STATE_RUNNING_OK in status_seen_by_type:
exit_code = angel.constants.STATE_RUNNING_OK
if services_are_running:
extra_state_message = ' ok: running %s' % enabled_services_str
else:
if do_service_checks:
extra_state_message = ' unknown state for services %s' % enabled_services_str
if do_state_checks:
if services_are_running:
if exit_code == angel.constants.STATE_STOPPED:
# If all the services are reporting STOPPED state, but we're supposed to be running, that's an error:
exit_code = angel.constants.STATE_ERROR
if angel_obj.is_in_maintenance_mode():
extra_state_message += ' (in maintenance mode)'
if exit_code == angel.constants.STATE_RUNNING_OK:
exit_code = angel.constants.STATE_WARN
if not services_are_running:
if len(running_services) and False:
extra_state_message += ' (stopped; running %s; normally runs %s)' % (', '.join(running_services), enabled_services_str)
else:
extra_state_message += ' (stopped; normally runs %s)' % enabled_services_str
if exit_code == angel.constants.STATE_RUNNING_OK or exit_code == angel.constants.STATE_WARN:
exit_code = angel.constants.STATE_STOPPED
if len(running_unexpectedly):
extra_state_message += ' (running unexpected services: %s)' | |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# To use this plugin, you need to first access https://api.telegram.org
# You need to create a bot and acquire it's Token Identifier (bot_token)
#
# Basically you need to create a chat with a user called the 'BotFather'
# and type: /newbot
#
# Then follow through the wizard, it will provide you an api key
# that looks like this:123456789:alphanumeri_characters
#
# For each chat_id a bot joins will have a chat_id associated with it.
# You will need this value as well to send the notification.
#
# Log into the webpage version of the site if you like by accessing:
# https://web.telegram.org
#
# You can't check out to see if your entry is working using:
# https://api.telegram.org/botAPI_KEY/getMe
#
# Pay attention to the word 'bot' that must be present infront of your
# api key that the BotFather gave you.
#
# For example, a url might look like this:
# https://api.telegram.org/bot123456789:alphanumeric_characters/getMe
#
# Development API Reference::
# - https://core.telegram.org/bots/api
import requests
import re
import os
from json import loads
from json import dumps
from .NotifyBase import NotifyBase
from ..common import NotifyType
from ..common import NotifyImageSize
from ..common import NotifyFormat
from ..utils import parse_bool
from ..utils import parse_list
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
from ..attachment.AttachBase import AttachBase
TELEGRAM_IMAGE_XY = NotifyImageSize.XY_256
# Chat ID is required
# If the Chat ID is positive, then it's addressed to a single person
# If the Chat ID is negative, then it's targeting a group
IS_CHAT_ID_RE = re.compile(
r'^(@*(?P<idno>-?[0-9]{1,32})|(?P<name>[a-z_-][a-z0-9_-]+))$',
re.IGNORECASE,
)
class NotifyTelegram(NotifyBase):
"""
A wrapper for Telegram Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Telegram'
# The services URL
service_url = 'https://telegram.org/'
# The default secure protocol
secure_protocol = 'tgram'
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_telegram'
# Default Notify Format
notify_format = NotifyFormat.HTML
# Telegram uses the http protocol with JSON requests
notify_url = 'https://api.telegram.org/bot'
# Allows the user to specify the NotifyImageSize object
image_size = NotifyImageSize.XY_256
# The maximum allowable characters allowed in the body per message
body_maxlen = 4096
# Title is to be part of body
title_maxlen = 0
# Telegram is limited to sending a maximum of 100 requests per second.
request_rate_per_sec = 0.001
# Define object templates
templates = (
'{schema}://{bot_token}',
'{schema}://{bot_token}/{targets}',
)
# Telegram Attachment Support
mime_lookup = (
# This list is intentionally ordered so that it can be scanned
# from top to bottom. The last entry is a catch-all
# Animations are documented to only support gif or H.264/MPEG-4
# Source: https://core.telegram.org/bots/api#sendanimation
{
'regex': re.compile(r'^(image/gif|video/H264)', re.I),
'function_name': 'sendAnimation',
'key': 'animation',
},
# This entry is intentially placed below the sendAnimiation allowing
# it to catch gif files. This then becomes a catch all to remaining
# image types.
# Source: https://core.telegram.org/bots/api#sendphoto
{
'regex': re.compile(r'^image/.*', re.I),
'function_name': 'sendPhoto',
'key': 'photo',
},
# Video is documented to only support .mp4
# Source: https://core.telegram.org/bots/api#sendvideo
{
'regex': re.compile(r'^video/mp4', re.I),
'function_name': 'sendVideo',
'key': 'video',
},
# Voice supports ogg
# Source: https://core.telegram.org/bots/api#sendvoice
{
'regex': re.compile(r'^(application|audio)/ogg', re.I),
'function_name': 'sendVoice',
'key': 'voice',
},
# Audio supports mp3 and m4a only
# Source: https://core.telegram.org/bots/api#sendaudio
{
'regex': re.compile(r'^audio/(mpeg|mp4a-latm)', re.I),
'function_name': 'sendAudio',
'key': 'audio',
},
# Catch All (all other types)
# Source: https://core.telegram.org/bots/api#senddocument
{
'regex': re.compile(r'.*', re.I),
'function_name': 'sendDocument',
'key': 'document',
},
)
# Telegram's HTML support doesn't like having HTML escaped
# characters passed into it. to handle this situation, we need to
# search the body for these sequences and convert them to the
# output the user expected
__telegram_escape_html_entries = (
# Comments
(re.compile(
r'\s*<!.+?-->\s*',
(re.I | re.M | re.S)), '', {}),
# the following tags are not supported
(re.compile(
r'\s*<\s*(!?DOCTYPE|p|div|span|body|script|link|'
r'meta|html|font|head|label|form|input|textarea|select|iframe|'
r'source|script)([^a-z0-9>][^>]*)?>\s*',
(re.I | re.M | re.S)), '', {}),
# All closing tags to be removed are put here
(re.compile(
r'\s*<\s*/(span|body|script|meta|html|font|head|'
r'label|form|input|textarea|select|ol|ul|link|'
r'iframe|source|script)([^a-z0-9>][^>]*)?>\s*',
(re.I | re.M | re.S)), '', {}),
# Bold
(re.compile(
r'<\s*(strong)([^a-z0-9>][^>]*)?>',
(re.I | re.M | re.S)), '<b>', {}),
(re.compile(
r'<\s*/\s*(strong)([^a-z0-9>][^>]*)?>',
(re.I | re.M | re.S)), '</b>', {}),
(re.compile(
r'\s*<\s*(h[1-6]|title)([^a-z0-9>][^>]*)?>\s*',
(re.I | re.M | re.S)), '{}<b>', {'html': '\r\n'}),
(re.compile(
r'\s*<\s*/\s*(h[1-6]|title)([^a-z0-9>][^>]*)?>\s*',
(re.I | re.M | re.S)),
'</b>{}', {'html': '<br/>'}),
# Italic
(re.compile(
r'<\s*(caption|em)([^a-z0-9>][^>]*)?>',
(re.I | re.M | re.S)), '<i>', {}),
(re.compile(
r'<\s*/\s*(caption|em)([^a-z0-9>][^>]*)?>',
(re.I | re.M | re.S)), '</i>', {}),
# Bullet Lists
(re.compile(
r'<\s*li([^a-z0-9>][^>]*)?>\s*',
(re.I | re.M | re.S)), ' -', {}),
# convert pre tags to code (supported by Telegram)
(re.compile(
r'<\s*pre([^a-z0-9>][^>]*)?>',
(re.I | re.M | re.S)), '{}<code>', {'html': '\r\n'}),
(re.compile(
r'<\s*/\s*pre([^a-z0-9>][^>]*)?>',
(re.I | re.M | re.S)), '</code>{}', {'html': '\r\n'}),
# New Lines
(re.compile(
r'\s*<\s*/?\s*(ol|ul|br|hr)\s*/?>\s*',
(re.I | re.M | re.S)), '\r\n', {}),
(re.compile(
r'\s*<\s*/\s*(br|p|hr|li|div)([^a-z0-9>][^>]*)?>\s*',
(re.I | re.M | re.S)), '\r\n', {}),
# HTML Spaces ( ) and tabs ( ) aren't supported
# See https://core.telegram.org/bots/api#html-style
(re.compile(r'\ ?', re.I), ' ', {}),
# Tabs become 3 spaces
(re.compile(r'\ ?', re.I), ' ', {}),
# Some characters get re-escaped by the Telegram upstream
# service so we need to convert these back,
(re.compile(r'\'?', re.I), '\'', {}),
(re.compile(r'\"?', re.I), '"', {}),
# New line cleanup
(re.compile(r'\r*\n[\r\n]+', re.I), '\r\n', {}),
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'bot_token': {
'name': _('Bot Token'),
'type': 'string',
'private': True,
'required': True,
# Token required as part of the API request, allow the word 'bot'
# infront of it
'regex': (r'^(bot)?(?P<key>[0-9]+:[a-z0-9_-]+)$', 'i'),
},
'target_user': {
'name': _('Target Chat ID'),
'type': 'string',
'map_to': 'targets',
'map_to': 'targets',
'regex': (r'^((-?[0-9]{1,32})|([a-z_-][a-z0-9_-]+))$', 'i'),
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'image': {
'name': _('Include Image'),
'type': 'bool',
'default': False,
'map_to': 'include_image',
},
'detect': {
'name': _('Detect Bot Owner'),
'type': 'bool',
'default': True,
'map_to': 'detect_owner',
},
'silent': {
'name': _('Silent Notification'),
'type': 'bool',
'default': False,
},
'preview': {
'name': _('Web Page Preview'),
'type': 'bool',
'default': False,
},
'to': {
'alias_of': 'targets',
},
})
def __init__(self, bot_token, targets, detect_owner=True,
include_image=False, silent=None, preview=None, **kwargs):
"""
Initialize Telegram Object
"""
super(NotifyTelegram, self).__init__(**kwargs)
self.bot_token = validate_regex(
bot_token, *self.template_tokens['bot_token']['regex'],
fmt='{key}')
if not self.bot_token:
err = 'The Telegram Bot Token specified ({}) is invalid.'.format(
bot_token)
self.logger.warning(err)
raise TypeError(err)
# Parse our list
self.targets = parse_list(targets)
# Define whether or not we should make audible alarms
self.silent = self.template_args['silent']['default'] \
if silent is None else bool(silent)
# Define whether or not we should display a web page preview
self.preview = self.template_args['preview']['default'] \
if preview is None else bool(preview)
# if detect_owner is set to True, we will attempt to determine who
# the bot owner is based on the first person who messaged it. This
# is not a fool proof way of doing things as over time Telegram removes
# the message history for the bot. So what appears (later on) to be
# the first message to it, maybe another user who sent it a message
# much later. Users who set this flag should update their Apprise
# URL later to directly include the user that we should message.
self.detect_owner = detect_owner
| |
= None
floor: Optional[float] = None
refPeriodStart: Optional[Date] = None
refPeriodEnd: Optional[Date] = None
dayCounter: Optional[DayCounter] = None
isInArrears: Optional[bool] = None
exCouponDate: Optional[Date] = None
class CmsCoupon(BaseModel):
resource_name: Optional[Literal["CmsCoupon"]] = "CmsCoupon"
paymentDate: Date
nominal: float
startDate: Date
endDate: Date
fixingDays: int
index: SwapIndex
gearing: Optional[float] = None
spread: Optional[float] = None
refPeriodStart: Optional[Date] = None
refPeriodEnd: Optional[Date] = None
dayCounter: Optional[DayCounter] = None
isInArrears: Optional[bool] = None
exCouponDate: Optional[Date] = None
class CmsSpreadCoupon(BaseModel):
resource_name: Optional[Literal["CmsSpreadCoupon"]] = "CmsSpreadCoupon"
paymentDate: Date
nominal: float
startDate: Date
endDate: Date
fixingDays: float
index: SwapSpreadIndex
gearing: Optional[float] = None
spread: Optional[float] = None
refPeriodStart: Optional[Date] = None
refPeriodEnd: Optional[Date] = None
dayCounter: Optional[DayCounter] = None
isInArrears: Optional[bool] = None
exCouponDate: Optional[Date] = None
class AnalyticHaganPricer(BaseModel):
resource_name: Optional[Literal["AnalyticHaganPricer"]] = "AnalyticHaganPricer"
v: SwaptionVolatilityStructureHandle
model: GFunctionFactoryYieldCurveModel
meanReversion: QuoteHandle
class NumericHaganPricer(BaseModel):
resource_name: Optional[Literal["NumericHaganPricer"]] = "NumericHaganPricer"
v: SwaptionVolatilityStructureHandle
model: GFunctionFactoryYieldCurveModel
meanReversion: QuoteHandle
lowerLimit: Optional[float] = None
upperLimit: Optional[float] = None
precision: Optional[float] = None
class CappedFlooredCmsCoupon(BaseModel):
resource_name: Optional[
Literal["CappedFlooredCmsCoupon"]
] = "CappedFlooredCmsCoupon"
paymentDate: Date
nominal: float
startDate: Date
endDate: Date
fixingDays: float
index: SwapIndex
gearing: Optional[float] = None
spread: Optional[float] = None
cap: Optional[float] = None
floor: Optional[float] = None
refPeriodStart: Optional[Date] = None
refPeriodEnd: Optional[Date] = None
dayCounter: Optional[DayCounter] = None
isInArrears: Optional[bool] = None
exCouponDate: Optional[Date] = None
class CappedFlooredCmsSpreadCoupon(BaseModel):
resource_name: Optional[
Literal["CappedFlooredCmsSpreadCoupon"]
] = "CappedFlooredCmsSpreadCoupon"
paymentDate: Date
nominal: float
startDate: Date
endDate: Date
fixingDays: float
index: SwapSpreadIndex
gearing: Optional[float] = None
spread: Optional[float] = None
cap: Optional[float] = None
floor: Optional[float] = None
refPeriodStart: Optional[Date] = None
refPeriodEnd: Optional[Date] = None
dayCounter: Optional[DayCounter] = None
isInArrears: Optional[bool] = None
exCouponDate: Optional[Date] = None
class LinearTsrPricer(BaseModel):
resource_name: Optional[Literal["LinearTsrPricer"]] = "LinearTsrPricer"
swaptionVol: SwaptionVolatilityStructureHandle
meanReversion: QuoteHandle
couponDiscountCurve: Optional[YieldTermStructureHandle] = None
settings: Optional[LinearTsrPricerSettings] = None
class LognormalCmsSpreadPricer(BaseModel):
resource_name: Optional[
Literal["LognormalCmsSpreadPricer"]
] = "LognormalCmsSpreadPricer"
cmsPricer: CmsCouponPricer
correlation: QuoteHandle
couponDiscountCurve: Optional[YieldTermStructureHandle] = None
IntegrationPoints: Optional[int] = None
volatilityType: Optional[VolatilityType] = None
shift1: Optional[float] = None
shift2: Optional[float] = None
class SwaptionHelper0(BaseModel):
resource_name: Optional[Literal["SwaptionHelper"]] = "SwaptionHelper"
exerciseDate: Date
endDate: Date
volatility: QuoteHandle
index: IborIndex
fixedLegTenor: Period
fixedLegDayCounter: DayCounter
floatingLegDayCounter: DayCounter
termStructure: YieldTermStructureHandle
errorType: Optional[BlackCalibrationHelperCalibrationErrorType] = None
strike: Optional[float] = None
nominal: Optional[float] = None
type: Optional[VolatilityType] = None
shift: Optional[float] = None
class SwaptionHelper1(BaseModel):
resource_name: Optional[Literal["SwaptionHelper"]] = "SwaptionHelper"
exerciseDate: Date
length: Period
volatility: QuoteHandle
index: IborIndex
fixedLegTenor: Period
fixedLegDayCounter: DayCounter
floatingLegDayCounter: DayCounter
termStructure: YieldTermStructureHandle
errorType: Optional[BlackCalibrationHelperCalibrationErrorType] = None
strike: Optional[float] = None
nominal: Optional[float] = None
type: Optional[VolatilityType] = None
shift: Optional[float] = None
class SwaptionHelper2(BaseModel):
resource_name: Optional[Literal["SwaptionHelper"]] = "SwaptionHelper"
maturity: Period
length: Period
volatility: QuoteHandle
index: IborIndex
fixedLegTenor: Period
fixedLegDayCounter: DayCounter
floatingLegDayCounter: DayCounter
termStructure: YieldTermStructureHandle
errorType: Optional[BlackCalibrationHelperCalibrationErrorType] = None
strike: Optional[float] = None
nominal: Optional[float] = None
type: Optional[VolatilityType] = None
shift: Optional[float] = None
class CapHelper(BaseModel):
resource_name: Optional[Literal["CapHelper"]] = "CapHelper"
length: Period
volatility: QuoteHandle
index: IborIndex
fixedLegFrequency: float
fixedLegDayCounter: DayCounter
includeFirstSwaplet: bool
termStructure: YieldTermStructureHandle
errorType: Optional[BlackCalibrationHelperCalibrationErrorType] = None
type: Optional[VolatilityType] = None
shift: Optional[float] = None
class HestonModelHelper(BaseModel):
resource_name: Optional[Literal["HestonModelHelper"]] = "HestonModelHelper"
maturity: Period
calendar: Calendar
s0: float
strikePrice: float
volatility: QuoteHandle
riskFreeRate: YieldTermStructureHandle
dividendYield: YieldTermStructureHandle
errorType: Optional[BlackCalibrationHelperCalibrationErrorType] = None
class VanillaOptionBase(BaseModel):
resource_name: Optional[Literal["VanillaOption"]] = "VanillaOption"
payoff: StrikedTypePayoff
exercise: Exercise
class EuropeanOption(BaseModel):
resource_name: Optional[Literal["EuropeanOption"]] = "EuropeanOption"
payoff: StrikedTypePayoff
exercise: Exercise
class ForwardVanillaOptionBase(BaseModel):
resource_name: Optional[Literal["ForwardVanillaOption"]] = "ForwardVanillaOption"
moneyness: float
resetDate: Date
payoff: StrikedTypePayoff
exercise: Exercise
class QuantoVanillaOption(BaseModel):
resource_name: Optional[Literal["QuantoVanillaOption"]] = "QuantoVanillaOption"
payoff: StrikedTypePayoff
exercise: Exercise
class QuantoForwardVanillaOption(BaseModel):
resource_name: Optional[
Literal["QuantoForwardVanillaOption"]
] = "QuantoForwardVanillaOption"
moneyness: float
resetDate: Date
payoff: StrikedTypePayoff
exercise: Exercise
class AnalyticHestonEngineIntegration0(BaseModel):
resource_name: Optional[
Literal["AnalyticHestonEngineIntegration"]
] = "AnalyticHestonEngineIntegration"
intAlgo: AnalyticHestonEngineIntegrationAlgorithm
quadrature: GaussianQuadrature
class AnalyticHestonEngineIntegration1(BaseModel):
resource_name: Optional[
Literal["AnalyticHestonEngineIntegration"]
] = "AnalyticHestonEngineIntegration"
intAlgo: AnalyticHestonEngineIntegrationAlgorithm
integrator: Integrator
AnalyticHestonEngineIntegration = Union[
AnalyticHestonEngineIntegration0, AnalyticHestonEngineIntegration1
]
class AnalyticPTDHestonEngine0(BaseModel):
resource_name: Optional[
Literal["AnalyticPTDHestonEngine"]
] = "AnalyticPTDHestonEngine"
model: PiecewiseTimeDependentHestonModel
cpxLog: AnalyticPTDHestonEngineComplexLogFormula
itg: AnalyticPTDHestonEngineIntegration
andersenPiterbargEpsilon: Optional[float] = None
class DividendVanillaOption(BaseModel):
resource_name: Optional[Literal["DividendVanillaOption"]] = "DividendVanillaOption"
payoff: StrikedTypePayoff
exercise: Exercise
dividendDates: List[Date]
dividends: List[float]
class BarrierOption(BaseModel):
resource_name: Optional[Literal["BarrierOption"]] = "BarrierOption"
barrierType: BarrierType
barrier: float
rebate: float
payoff: StrikedTypePayoff
exercise: Exercise
class FdmSchemeDesc(BaseModel):
resource_name: Optional[Literal["FdmSchemeDesc"]] = "FdmSchemeDesc"
type: FdmSchemeDescFdmSchemeType
theta: float
mu: float
class FdBlackScholesVanillaEngine0(BaseModel):
resource_name: Optional[
Literal["FdBlackScholesVanillaEngine"]
] = "FdBlackScholesVanillaEngine"
value: GeneralizedBlackScholesProcess
quantoHelper: FdmQuantoHelper
tGrid: Optional[int] = None
xGrid: Optional[int] = None
dampingSteps: Optional[int] = None
schemeDesc: Optional[FdmSchemeDesc] = None
localVol: Optional[bool] = None
illegalLocalVolOverwrite: Optional[float] = None
cashDividendModel: Optional[FdBlackScholesVanillaEngineCashDividendModel] = None
class FdBlackScholesVanillaEngine1(BaseModel):
resource_name: Optional[
Literal["FdBlackScholesVanillaEngine"]
] = "FdBlackScholesVanillaEngine"
process: GeneralizedBlackScholesProcess
tGrid: Optional[int] = None
xGrid: Optional[int] = None
dampingSteps: Optional[int] = None
schemeDesc: Optional[FdmSchemeDesc] = None
localVol: Optional[bool] = None
illegalLocalVolOverwrite: Optional[float] = None
cashDividendModel: Optional[FdBlackScholesVanillaEngineCashDividendModel] = None
class FdOrnsteinUhlenbeckVanillaEngine(BaseModel):
resource_name: Optional[
Literal["FdOrnsteinUhlenbeckVanillaEngine"]
] = "FdOrnsteinUhlenbeckVanillaEngine"
value: OrnsteinUhlenbeckProcess
rTS: YieldTermStructure
tGrid: Optional[int] = None
xGrid: Optional[int] = None
dampingSteps: Optional[int] = None
epsilon: Optional[float] = None
schemeDesc: Optional[FdmSchemeDesc] = None
class FdBatesVanillaEngine(BaseModel):
resource_name: Optional[Literal["FdBatesVanillaEngine"]] = "FdBatesVanillaEngine"
model: BatesModel
tGrid: Optional[int] = None
xGrid: Optional[int] = None
vGrid: Optional[int] = None
dampingSteps: Optional[int] = None
schemeDesc: Optional[FdmSchemeDesc] = None
class FdHestonVanillaEngine0(BaseModel):
resource_name: Optional[Literal["FdHestonVanillaEngine"]] = "FdHestonVanillaEngine"
model: HestonModel
quantoHelper: FdmQuantoHelper
tGrid: Optional[int] = None
xGrid: Optional[int] = None
vGrid: Optional[int] = None
dampingSteps: Optional[int] = None
schemeDesc: Optional[FdmSchemeDesc] = None
leverageFct: Optional[LocalVolTermStructure] = None
class FdHestonVanillaEngine1(BaseModel):
resource_name: Optional[Literal["FdHestonVanillaEngine"]] = "FdHestonVanillaEngine"
model: HestonModel
tGrid: Optional[int] = None
xGrid: Optional[int] = None
vGrid: Optional[int] = None
dampingSteps: Optional[int] = None
schemeDesc: Optional[FdmSchemeDesc] = None
leverageFct: Optional[LocalVolTermStructure] = None
class FdCEVVanillaEngine(BaseModel):
resource_name: Optional[Literal["FdCEVVanillaEngine"]] = "FdCEVVanillaEngine"
f0: float
alpha: float
beta: float
rTS: YieldTermStructureHandle
tGrid: Optional[int] = None
xGrid: Optional[int] = None
dampingSteps: Optional[int] = None
scalingFactor: Optional[float] = None
eps: Optional[float] = None
schemeDesc: Optional[FdmSchemeDesc] = None
class FdSabrVanillaEngine(BaseModel):
resource_name: Optional[Literal["FdSabrVanillaEngine"]] = "FdSabrVanillaEngine"
f0: float
alpha: float
beta: float
nu: float
rho: float
rTS: YieldTermStructureHandle
tGrid: Optional[int] = None
fGrid: Optional[int] = None
xGrid: Optional[int] = None
dampingSteps: Optional[int] = None
scalingFactor: Optional[float] = None
eps: Optional[float] = None
schemeDesc: Optional[FdmSchemeDesc] = None
class FdBlackScholesBarrierEngine(BaseModel):
resource_name: Optional[
Literal["FdBlackScholesBarrierEngine"]
] = "FdBlackScholesBarrierEngine"
process: GeneralizedBlackScholesProcess
tGrid: Optional[int] = None
xGrid: Optional[int] = None
dampingSteps: Optional[int] = None
schemeDesc: Optional[FdmSchemeDesc] = None
localVol: Optional[bool] = None
illegalLocalVolOverwrite: Optional[float] = None
class ContinuousAveragingAsianOption(BaseModel):
resource_name: Optional[
Literal["ContinuousAveragingAsianOption"]
] = "ContinuousAveragingAsianOption"
averageType: AverageType
payoff: StrikedTypePayoff
exercise: Exercise
class DiscreteAveragingAsianOption(BaseModel):
resource_name: Optional[
Literal["DiscreteAveragingAsianOption"]
] = "DiscreteAveragingAsianOption"
averageType: AverageType
runningAccumulator: float
pastFixings: int
fixingDates: List[Date]
payoff: StrikedTypePayoff
exercise: Exercise
class DoubleBarrierOptionBase(BaseModel):
resource_name: Optional[Literal["DoubleBarrierOption"]] = "DoubleBarrierOption"
barrierType: DoubleBarrierType
barrier_lo: float
barrier_hi: float
rebate: float
payoff: StrikedTypePayoff
exercise: Exercise
class QuantoDoubleBarrierOption(BaseModel):
resource_name: Optional[
Literal["QuantoDoubleBarrierOption"]
] = "QuantoDoubleBarrierOption"
barrierType: DoubleBarrierType
barrier_lo: float
barrier_hi: float
rebate: float
payoff: StrikedTypePayoff
exercise: Exercise
class DeltaVolQuote0(BaseModel):
resource_name: Optional[Literal["DeltaVolQuote"]] = "DeltaVolQuote"
delta: float
vol: QuoteHandle
maturity: float
deltaType: DeltaVolQuoteDeltaType
class DeltaVolQuote1(BaseModel):
resource_name: Optional[Literal["DeltaVolQuote"]] = "DeltaVolQuote"
vol: QuoteHandle
deltaType: DeltaVolQuoteDeltaType
maturity: float
atmType: DeltaVolQuoteAtmType
class DeltaVolQuoteHandle(BaseModel):
resource_name: Optional[Literal["DeltaVolQuoteHandle"]] = "DeltaVolQuoteHandle"
value: Optional[DeltaVolQuote] = None
class RelinkableDeltaVolQuoteHandle(BaseModel):
resource_name: Optional[
Literal["RelinkableDeltaVolQuoteHandle"]
] = "RelinkableDeltaVolQuoteHandle"
value: Optional[DeltaVolQuote] = None
class VannaVolgaBarrierEngine(BaseModel):
resource_name: Optional[
Literal["VannaVolgaBarrierEngine"]
] = "VannaVolgaBarrierEngine"
atmVol: DeltaVolQuoteHandle
vol25Put: DeltaVolQuoteHandle
vol25Call: DeltaVolQuoteHandle
spotFX: QuoteHandle
domesticTS: YieldTermStructureHandle
foreignTS: YieldTermStructureHandle
adaptVanDelta: Optional[bool] = None
bsPriceWithSmile: Optional[float] = None
class FdSimpleBSSwingEngine(BaseModel):
resource_name: Optional[Literal["FdSimpleBSSwingEngine"]] = "FdSimpleBSSwingEngine"
process: GeneralizedBlackScholesProcess
tGrid: Optional[int] = None
xGrid: Optional[int] = None
schemeDesc: Optional[FdmSchemeDesc] = None
class GJRGARCHModel(BaseModel):
resource_name: Optional[Literal["GJRGARCHModel"]] = "GJRGARCHModel"
process: GJRGARCHProcess
class AnalyticGJRGARCHEngine(BaseModel):
resource_name: Optional[
Literal["AnalyticGJRGARCHEngine"]
] = "AnalyticGJRGARCHEngine"
process: GJRGARCHModel
class PlainVanillaPayoff(BaseModel):
resource_name: Optional[Literal["PlainVanillaPayoff"]] = "PlainVanillaPayoff"
type: OptionType
strike: float
class PercentageStrikePayoff(BaseModel):
resource_name: Optional[
Literal["PercentageStrikePayoff"]
] = "PercentageStrikePayoff"
type: OptionType
moneyness: float
class CashOrNothingPayoff(BaseModel):
resource_name: Optional[Literal["CashOrNothingPayoff"]] = "CashOrNothingPayoff"
type: OptionType
strike: float
payoff: float
class AssetOrNothingPayoff(BaseModel):
resource_name: Optional[Literal["AssetOrNothingPayoff"]] = "AssetOrNothingPayoff"
type: OptionType
strike: float
class SuperSharePayoff(BaseModel):
resource_name: Optional[Literal["SuperSharePayoff"]] = "SuperSharePayoff"
type: OptionType
strike: float
increment: float
class GapPayoff(BaseModel):
resource_name: Optional[Literal["GapPayoff"]] = "GapPayoff"
type: OptionType
strike: float
strikePayoff: float
class VanillaForwardPayoff(BaseModel):
resource_name: Optional[Literal["VanillaForwardPayoff"]] = "VanillaForwardPayoff"
type: OptionType
strike: float
class BasketOption(BaseModel):
resource_name: Optional[Literal["BasketOption"]] = "BasketOption"
payoff: BasketPayoff
exercise: Exercise
class Fd2dBlackScholesVanillaEngine(BaseModel):
resource_name: Optional[
Literal["Fd2dBlackScholesVanillaEngine"]
] = "Fd2dBlackScholesVanillaEngine"
p1: GeneralizedBlackScholesProcess
p2: GeneralizedBlackScholesProcess
correlation: float
xGrid: Optional[int] = None
yGrid: Optional[int] = None
tGrid: Optional[int] = None
dampingSteps: Optional[int] = None
schemeDesc: Optional[FdmSchemeDesc] = None
localVol: Optional[bool] = None
illegalLocalVolOverwrite: Optional[float] = None
class EverestOption(BaseModel):
resource_name: Optional[Literal["EverestOption"]] = "EverestOption"
notional: float
guarantee: float
exercise: Exercise
class BlackDeltaCalculator(BaseModel):
resource_name: Optional[Literal["BlackDeltaCalculator"]] = "BlackDeltaCalculator"
ot: OptionType
dt: DeltaVolQuoteDeltaType
spot: float
dDiscount: DiscountFactor
fDiscount: DiscountFactor
stDev: float
class CallabilityPrice(BaseModel):
resource_name: Optional[Literal["CallabilityPrice"]] = "CallabilityPrice"
amount: float
type: CallabilityPriceType
class CallabilityBase(BaseModel):
resource_name: Optional[Literal["Callability"]] = "Callability"
price: CallabilityPrice
type: CallabilityType
date: Date
class | |
%gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (M1_LAYER)
fout.write(cmd_str)
x2 = x_center + PITCH/2 - CHANNEL_LENGTH/2 - 0.31
y2 = y_center + PITCH/2 - CHANNEL_LENGTH/2 - 0.31
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "erase %s \n" % (M1_LAYER)
fout.write(cmd_str)
#diagonal runners and contacts
for x_index in range(ARRAY_SIZE):
for y_index in range(ARRAY_SIZE):
#m1 first layer
x_center = PITCH * x_index + PITCH/2
y_center = PITCH * y_index + PITCH/2
x1 = x_center - PITCH/2 + CHANNEL_LENGTH/2 + CHANNEL_M1_SPACING + 0.23
y1 = y_center - PITCH/2 + CHANNEL_LENGTH/2 + CHANNEL_M1_SPACING + 0.23
x2 = x_center + PITCH/2 - CHANNEL_LENGTH/2 - CHANNEL_M1_SPACING - 0.23
y2 = y_center + PITCH/2 - CHANNEL_LENGTH/2 - CHANNEL_M1_SPACING - 0.23
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (M1_LAYER)
fout.write(cmd_str)
#m2 second layer
x1 = x_center - PITCH/2 + 1
y1 = y_center - PITCH/2 + 1
x2 = x_center + PITCH/2 - 1
y2 = y_center + PITCH/2 - 1
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (M2_LAYER)
fout.write(cmd_str)
#m1 contact
x1 = x_center - PITCH/2 + 1.1
y1 = y_center - PITCH/2 + 1.1
x2 = x_center + PITCH/2 - 1.1
y2 = y_center + PITCH/2 - 1.1
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (M1_CONTACT_LAYER)
fout.write(cmd_str)
#place runners
x1 = x_center - PITCH/2
y1 = y_center + PITCH/2
x2 = x_center + PITCH/2
y2 = y_center - PITCH/2
cmd_str = "wire segment %s %gum %gum %gum %gum %gum -noendcap \n" % (M3_LAYER, M3_WIDTH, x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "wire segment %s %gum %gum %gum %gum %gum -noendcap \n" % (M4_LAYER, M4_WIDTH, x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "wire segment %s %gum %gum %gum %gum %gum -noendcap \n" % (M5_LAYER, M5_WIDTH, x1, y1, x2, y2)
fout.write(cmd_str)
#place contacts
#m2-m3 contact
# x1 = x_center - PITCH/2 + 1 + M2_VIA_SPACE
# y1 = y_center + PITCH/2 - 1 - M2_VIA_SPACE
# x2 = x_center - PITCH/2 + 1 + M2_VIA_SPACE + round(((M3_WIDTH/2 - M2_VIA_SPACE)**(1/2)),1)
# y2 = y_center + PITCH/2 - 1 - M2_VIA_SPACE
# x3 = x_center + PITCH/2 - 1 - M2_VIA_SPACE
# y3 = y_center - PITCH/2 + 1 + M2_VIA_SPACE + round(((M3_WIDTH/2 - M2_VIA_SPACE)**(1/2)),1)
# x4 = x_center + PITCH/2 - 1 - M2_VIA_SPACE
# y4 = y_center - PITCH/2 + 1 + M2_VIA_SPACE
# x5 = x_center + PITCH/2 - 1 - M2_VIA_SPACE - round(((M3_WIDTH/2 - M2_VIA_SPACE)**(1/2)),1)
# y5 = y_center - PITCH/2 + 1 + M2_VIA_SPACE
# x6 = x_center - PITCH/2 + 1 + M2_VIA_SPACE
# y6 = y_center + PITCH/2 - 1 - M2_VIA_SPACE - round(((M3_WIDTH/2 - M2_VIA_SPACE)**(1/2)),1)
# cmd_str = "polygon %s %gum %gum %gum %gum %gum %gum %gum %gum %gum %gum %gum %gum \n" % (M2_CONTACT_LAYER, x1, y1, x2, y2, x3, y3, x4, y4, x5, y5, x6, y6)
# fout.write(cmd_str)
# #m3-m4 contact
# x1 = x_center - PITCH/2 + 1 + M3_VIA_SPACE
# y1 = y_center + PITCH/2 - 1 - M3_VIA_SPACE
# x2 = x_center - PITCH/2 + 1 + M3_VIA_SPACE + round(((M4_WIDTH/2 - M3_VIA_SPACE)**(1/2)),1)
# y2 = y_center + PITCH/2 - 1 - M3_VIA_SPACE
# x3 = x_center + PITCH/2 - 1 - M3_VIA_SPACE
# y3 = y_center - PITCH/2 + 1 + M3_VIA_SPACE + round(((M4_WIDTH/2 - M3_VIA_SPACE)**(1/2)),1)
# x4 = x_center + PITCH/2 - 1 - M3_VIA_SPACE
# y4 = y_center - PITCH/2 + 1 + M3_VIA_SPACE
# x5 = x_center + PITCH/2 - 1 - M3_VIA_SPACE - round(((M4_WIDTH/2 - M3_VIA_SPACE)**(1/2)),1)
# y5 = y_center - PITCH/2 + 1 + M3_VIA_SPACE
# x6 = x_center - PITCH/2 + 1 + M3_VIA_SPACE
# y6 = y_center + PITCH/2 - 1 - M3_VIA_SPACE - round(((M4_WIDTH/2 - M3_VIA_SPACE)**(1/2)),1)
# cmd_str = "polygon %s %gum %gum %gum %gum %gum %gum %gum %gum %gum %gum %gum %gum \n" % (M3_CONTACT_LAYER, x1, y1, x2, y2, x3, y3, x4, y4, x5, y5, x6, y6)
# fout.write(cmd_str)
# #m4-m5 contact
# x1 = x_center - PITCH/2 + 1 + M4_VIA_SPACE
# y1 = y_center + PITCH/2 - 1 - M4_VIA_SPACE
# x2 = x_center - PITCH/2 + 1 + M4_VIA_SPACE + round(((M5_WIDTH/2 - M4_VIA_SPACE)**(1/2)),1)
# y2 = y_center + PITCH/2 - 1 - M4_VIA_SPACE
# x3 = x_center + PITCH/2 - 1 - M4_VIA_SPACE
# y3 = y_center - PITCH/2 + 1 + M4_VIA_SPACE + round(((M5_WIDTH/2 - M4_VIA_SPACE)**(1/2)),1)
# x4 = x_center + PITCH/2 - 1 - M4_VIA_SPACE
# y4 = y_center - PITCH/2 + 1 + M4_VIA_SPACE
# x5 = x_center + PITCH/2 - 1 - M4_VIA_SPACE - round(((M5_WIDTH/2 - M4_VIA_SPACE)**(1/2)),1)
# y5 = y_center - PITCH/2 + 1 + M4_VIA_SPACE
# x6 = x_center - PITCH/2 + 1 + M4_VIA_SPACE
# y6 = y_center + PITCH/2 - 1 - M4_VIA_SPACE - round(((M5_WIDTH/2 - M4_VIA_SPACE)**(1/2)),1)
#cmd_str = "polygon %s %gum %gum %gum %gum %gum %gum %gum %gum %gum %gum %gum %gum \n" % (M4_CONTACT_LAYER, x1, y1, x2, y2, x3, y3, x4, y4, x5, y5, x6, y6)
#fout.write(cmd_str)
#magic issue prevents correct contact placement. todo: get fixed
x1 = x_center - M4_VIA_SIZE/2
y1 = y_center - M4_VIA_SIZE/2
x2 = x_center + M4_VIA_SIZE/2
y2 = y_center + M4_VIA_SIZE/2
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (M4_CONTACT_LAYER)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (M3_CONTACT_LAYER)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (M2_CONTACT_LAYER)
fout.write(cmd_str)
#gate interconnect
#vertical interconnect
for x_index in range(ARRAY_SIZE+1):
x_center = PITCH * x_index
x1 = x_center - CHANNEL_LENGTH/2
x2 = x_center + CHANNEL_LENGTH/2
y1 = -CHANNEL_LENGTH/2
y2 = PITCH * ARRAY_SIZE + CHANNEL_LENGTH/2
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (POLY_LAYER)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (M2_LAYER)
fout.write(cmd_str)
#horizontal interconnect
for y_index in range (ARRAY_SIZE+1):
y_center = PITCH * y_index
x1 = -CHANNEL_LENGTH/2
x2 = PITCH * ARRAY_SIZE + CHANNEL_LENGTH/2
y1 = y_center - CHANNEL_LENGTH/2
y2 = y_center + CHANNEL_LENGTH/2
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (POLY_LAYER)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (M2_LAYER)
fout.write(cmd_str)
# gate vias
for x_index in range(ARRAY_SIZE+1):
for y_index in range(ARRAY_SIZE+1):
#0.5 x 0.5 locali, M1, M2,
# 0.4 x 0.4 pcontact, vialocali, M1-M2 via
x_center = x_index * PITCH
y_center = y_index * PITCH
x1 = x_center - CHANNEL_LENGTH/2
y1 = y_center - CHANNEL_LENGTH/2
x2 = x_center + CHANNEL_LENGTH/2
y2 = y_center + CHANNEL_LENGTH/2
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (LOCALI_LAYER)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (M1_LAYER)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (M2_LAYER)
fout.write(cmd_str)
x1 = x_center - CHANNEL_LENGTH/2 + 0.08
y1 = y_center - CHANNEL_LENGTH/2 + 0.08
x2 = x_center + CHANNEL_LENGTH/2 - 0.08
y2 = y_center + CHANNEL_LENGTH/2 - 0.08
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (POLY_CONTACT_LAYER)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (LOCALI_CONTACT_LAYER)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (M1_CONTACT_LAYER)
fout.write(cmd_str)
#substrate contacts
for x_index in range(ARRAY_SIZE):
for y_index in range(ARRAY_SIZE):
if((x_index+y_index)%2==0):
x_center = PITCH * x_index + PITCH/2
y_center = PITCH * y_index + PITCH/2
#fill source area with diff
x1 = x_center - PITCH/2 + 0.54
y1 = y_center - PITCH/2 + 0.54
x2 = x_center + PITCH/2 - 0.54
y2 = y_center + PITCH/2 - 0.54
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (DIFF_LAYER)
fout.write(cmd_str)
#contact area
x1 = x_center - PITCH/2 + 1.1
y1 = y_center - PITCH/2 + 1.1
x2 = x_center + PITCH/2 - 1.1
y2 = y_center + PITCH/2 - 1.1
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (SUBSTRATE_DIFF)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (LOCALI_CONTACT_LAYER)
fout.write(cmd_str)
x1 = x_center - PITCH/2 + 1.22
y1 = y_center - PITCH/2 + 1.22
x2 = x_center + PITCH/2 - 1.22
y2 = y_center + PITCH/2 - 1.22
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (SUBSTRATE_DIFF_CONTACT)
fout.write(cmd_str)
x1 = x_center - PITCH/2 + CHANNEL_LENGTH/2 + CHANNEL_LOCALI_SPACING + 0.17
y1 = y_center - PITCH/2 + CHANNEL_LENGTH/2 + CHANNEL_LOCALI_SPACING + 0.17
x2 = x_center + PITCH/2 - CHANNEL_LENGTH/2 - CHANNEL_LOCALI_SPACING - 0.17
y2 = y_center + PITCH/2 - CHANNEL_LENGTH/2 - CHANNEL_LOCALI_SPACING - 0.17
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str = "paint %s \n" % (LOCALI_LAYER)
fout.write(cmd_str)
# well and inner guard ring (well connected)
x1 = -WELL_EXTENTION
y1 = -WELL_EXTENTION
x2 = PITCH * ARRAY_SIZE + WELL_EXTENTION
y2 = PITCH * ARRAY_SIZE + WELL_EXTENTION
cmd_str = "box %gum %gum %gum %gum\n" % (x1, y1, x2, y2)
fout.write(cmd_str)
cmd_str | |
<gh_stars>1-10
#!/usr/bin/env python
import configparser
import copy
import importlib
import logging
import re
from abc import ABC, abstractmethod
from pathlib import Path
from typing import List, Optional
# import deepdiff
from javus.gppw import GlobalPlatformProWrapper
from javus.settings import PROJECT_ROOT
from javus.utils import SDKVersion, cd
from javus.utils import AttackConfigParser
# TODO add some log initializer
log = logging.getLogger(__file__)
# TODO add handler for printing
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s:%(asctime)s:%(name)s: %(message)s")
handler.setFormatter(formatter)
log.addHandler(handler)
class AbstractAttackExecutor(ABC):
@abstractmethod
def execute(self):
pass
class CommonStage:
install = "install"
send = "send"
uninstall = "uninstall"
class BaseAttackExecutor(AbstractAttackExecutor):
def __init__(
self,
card: "Card",
gp: GlobalPlatformProWrapper,
workdir: Path,
sdk: SDKVersion = None,
):
self.card = card
self.gp = gp
self.workdir = Path(workdir).resolve()
# FIXME
self.attack_name = self.workdir.name
self.aids = configparser.ConfigParser()
self.uninstall_stages = []
self.config = AttackConfigParser(strict=False)
self.installed_applets = []
self.stages = None
self._load_config()
try:
self.sdks = self.config.get_sdk_versions("BUILD", "versions")
except KeyError:
self.sdks = None
def _load_config(self) -> None:
self.config.read(self.workdir / "config.ini")
def _load_aids(self) -> None:
self.aids.read(self.workdir / "aids.ini")
def get_stages(self) -> List[dict]:
# TODO should we double check the content of th STAGES before
# proceeding? e.g. the types of the entries
# first load stages from `<attackname>`.py
stages = self.import_stages()
if stages is not None:
return copy.deepcopy(stages)
module_file = self.workdir / (self.attack_name + ".py")
raise ValueError(
"Cannot load Scenario.STAGES from %s. Does it exist?" % module_file
)
def import_stages(self) -> Optional[List[dict]]:
# the module name can be inferred from the paths
# TODO getting the module path feels a bit hackish - wonder if that works from other
# folders as well - it does
module_name = self.workdir.name
relative_module_path = (
str(self.workdir.relative_to(PROJECT_ROOT)).replace("/", ".")
+ "."
+ module_name
)
try:
stages = getattr(importlib.import_module(relative_module_path), "Scenario",)
return stages.STAGES
except (ModuleNotFoundError, AttributeError):
pass
return None
def _prepare_install(self, *args, **kwargs):
pass
def _install(self, path: str, sdk_version: SDKVersion, *args, **kwargs):
# value is a path/string, that can include {version} for differentiating between
# different versions
if sdk_version is None:
self.sdk_version = self._determine_version()
else:
self.sdk_version = sdk_version
path = path.format(version=self.sdk_version.raw)
log.info("Attempt to install applet: %s", path)
with cd(self.workdir):
result = self.gp.install(path)
if result["returncode"] == 0:
uninstall_stage = {"name": "uninstall", "path": path, "installed": True}
else:
# when the installation is not successful we still want to add uninstall stage
# and mark it as skipped
uninstall_stage = {
"name": "uninstall",
"path": path,
"installed": False,
}
self.uninstall_stages.append(uninstall_stage)
return result
def _assess_install(self, result, *args, **kwargs):
success = True
# FIXME few naive checks, but we can also use --dump on install command
# and make sure e.g. the status words are 9000
if result["returncode"] != 0:
success = False
if "CAP loaded" not in result["stdout"]:
success = False
# make sure it is in the CardState after the installation
result["success"] = success
return result
def _prepare_uninstall(self, *args, **kwargs):
pass
def _uninstall(self, path: str, sdk_version: SDKVersion, *args, **kwargs):
# result = []
# setting SDKVersion is done in _install, that is kinda weird
path = path.format(version=self.sdk_version.raw)
# if self.installed_applets is not None:
# # attemp to uninstall the installed applets in reversed order
# while self.installed_applets:
# path = self.installed_applets.pop()
with cd(self.workdir):
result = self.gp.uninstall(path)
return result
def _assess_uninstall(self, result, *args, **kwargs):
success = True
if result["returncode"] != 0:
success = False
if "deleted" not in result["stdout"]:
success = False
result["success"] = success
return result
def construct_aid(self) -> bytes:
# FIXME this method is a gimmick to be overriden by the custom Executors
rid = bytes.fromhex(self.aids["BUILD"]["pkg.rid"])
pix = bytes.fromhex(self.aids["BUILD"]["applet.pix"])
aid = rid + pix
return aid
def _prepare_send(self, *args, **kwargs):
pass
def _send(self, *args, payload: str, **kwargs):
# TODO prepare payload
aid = self.construct_aid()
# TODO payload may be of varying kinds of hexa/int values values
payload = self._parse_payload(payload)
return self.gp.apdu(payload, aid)
def _assess_send(self, result, *args, expected: str = "9000", **kwargs):
command_apdu = self._parse_payload(kwargs["payload"]).hex().upper()
success = True
if result["returncode"] != 0:
success = False
# TODO verify expected
# by default we expect 9000 status word
try:
if result["communication"][command_apdu]["status"] != expected:
success = False
except KeyError:
success = False
result["success"] = success
# FIXME maybe adding all kwargs is too much
result.update(kwargs)
return result
def _parse_payload(self, raw: str) -> bytes:
clean = self._clean_payload(raw)
if not clean:
return b""
separated = self._separate_payload(clean)
if separated:
try:
return bytes([int(x, 16) for x in separated])
except ValueError:
pass
try:
return bytes([int(x) for x in separated])
except ValueError:
pass
else:
# first assume it is hexadecimal string without spaces and 0x prefix
try:
return bytes.fromhex(clean)
except ValueError:
pass
# FIXME should raise some internal error, that it cannot continue with the attack
# TODO log it
raise RuntimeError("Cannot create a valid payload")
@staticmethod
def _separate_payload(raw: str) -> list:
comma_separated = raw.split(",")
if [raw] != comma_separated:
return [x.strip() for x in comma_separated]
space_separated = raw.split()
if [raw] != space_separated:
return [x.strip() for x in space_separated]
return []
@staticmethod
def _clean_payload(raw: str) -> str:
# remove excess whitespace
stripped = raw.strip()
# reduce whitespace
reduced = re.sub(r"\s+", " ", stripped)
return reduced
def possible_versions(self) -> List["SDKVersion"]:
"""
Returns the intersection of SDKVersions the attack can be build for
and the ones supported by the Card
"""
attack_sdk_versions = SDKVersion.from_list(
self.config["BUILD"]["versions"], sep=","
)
return list(set(attack_sdk_versions).intersection(set(self.card.sdks)))
def _determine_version(self) -> "SDKVersion":
# determine the newest SDK version supported both by the card and the attack
attack_versions = SDKVersion.from_list(self.config["BUILD"]["versions"])
try:
newest = list(set(attack_versions).intersection(set(self.card.sdks)))[-1]
except IndexError:
newest = attack_versions[0]
log.warning(
"Could not determine SDK Version, defaulting to '%s'", str(newest)
)
return newest
def execute(self, sdk_version=None, **kwargs) -> list:
self._load_aids()
stages = self.get_stages()
self.report = []
n_stages = self.get_stages_len(stages)
x = 1
# FIXME print successes of stages
# FIXME stop on SCARD_NO_TRANSANCT in STDOUT/STDERR
for i, stage_data in enumerate(stages):
stage = stage_data.pop("name")
result = self._run_stage(
stage, **stage_data, sdk_version=sdk_version, **kwargs
)
try:
success = "pass" if result["success"] else "fail"
except KeyError:
success = ""
print(" [%2d/%2d] %s: %s" % (x, n_stages, stage, success))
x += 1
result["name"] = stage
result["skipped"] = False
# if i:
# result["diff-state"] = deepdiff.DeepDiff(
# result["state"], self.report[-1]["state"]
# ).to_dict()
# else:
# result["diff-state"] = {}
self.report.append(result)
if not self.optional_stage(stage, stage_data) and not result["success"]:
break
# fill in the rest of the stages, that were not executed
for stage_data in stages[i + 1 :]:
stage = stage_data.pop("name")
print(" [%2d/%2d] %s: skip" % (x, n_stages, stage))
x += 1
# print(stage)
result = {
"name": stage,
"success": False,
"skipped": True,
# "state": None,
# "diff-state": None,
}
try:
# in case we skip, we just copy the previous state - assuming, that skipping
# a stage cannot change the data on the card
result["state"] = self.report[-1]["stage"]
except KeyError:
result["state"] = None
self.report.append(result)
while self.uninstall_stages:
# FIXME add 'pass' 'fail' to the print
stage_data = self.uninstall_stages.pop()
stage = stage_data.pop("name")
print(" [%2d/%2d] %s" % (x, n_stages, stage), end="")
x += 1
if stage_data["installed"]:
result = self._run_stage(
stage, **stage_data, sdk_version=sdk_version, **kwargs
)
result["skipped"] = False
if result["success"]:
print(" pass")
else:
print(" fail")
else:
result = copy.deepcopy(stage_data)
result["skipped"] = True
print(" skip")
result["name"] = stage
# if self.report[-1]["state"] is not None:
# result["diff-state"] = deepdiff.DeepDiff(
# result["state"], self.report[-1]["state"]
# ).to_dict()
# else:
# result["diff-state"] = {}
# try:
# # in case we skip, we just copy the previous state - assuming, that skipping
# # a stage cannot change the data on the card
# result["state"] = self.report[-1]["stage"]
# except KeyError:
# result["state"] = None
self.report.append(result)
# FIXME add also the short description of the attacks
return self.report
@staticmethod
def optional_stage(stage: str, stage_data: dict) -> bool:
try:
return stage_data["optional"]
except KeyError:
if stage == CommonStage.install:
# install is required by default
return False
elif stage == CommonStage.uninstall:
# uninstall stage is optional as it makes sense to continue uninstalling
# applets even if some cannot be uninstalled
return True
# any other stage is deemed required
return False
def _run_stage(self, raw_stage: str, *args, **kwargs):
stage = self._create_stage_name(raw_stage)
prepare_stage = "_prepare_" + stage
try:
prepare_method = getattr(self, prepare_stage)
except AttributeError:
log.info("Cannot find stage method '%s'", prepare_stage)
# prepare_method is optional and lambda cannot use *args, **kwargs
def prepare_method(*args, | |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class nstrafficdomain(base_resource) :
"""Configuration for Traffic Domain resource."""
def __init__(self) :
self._td = 0
self._aliasname = ""
self._vmac = ""
self._state = ""
self.___count = 0
@property
def td(self) :
"""Integer value that uniquely identifies a traffic domain.<br/>Minimum length = 1<br/>Maximum length = 4094."""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
"""Integer value that uniquely identifies a traffic domain.<br/>Minimum length = 1<br/>Maximum length = 4094
:param td:
"""
try :
self._td = td
except Exception as e:
raise e
@property
def aliasname(self) :
"""Name of traffic domain being added.<br/>Minimum length = 1<br/>Maximum length = 31."""
try :
return self._aliasname
except Exception as e:
raise e
@aliasname.setter
def aliasname(self, aliasname) :
"""Name of traffic domain being added.<br/>Minimum length = 1<br/>Maximum length = 31
:param aliasname:
"""
try :
self._aliasname = aliasname
except Exception as e:
raise e
@property
def vmac(self) :
"""Associate the traffic domain with a VMAC address instead of with VLANs. The NetScaler ADC then sends the VMAC address of the traffic domain in all responses to ARP queries for network entities in that domain. As a result, the ADC can segregate subsequent incoming traffic for this traffic domain on the basis of the destination MAC address, because the destination MAC address is the VMAC address of the traffic domain. After creating entities on a traffic domain, you can easily manage and monitor them by performing traffic domain level operations.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._vmac
except Exception as e:
raise e
@vmac.setter
def vmac(self, vmac) :
"""Associate the traffic domain with a VMAC address instead of with VLANs. The NetScaler ADC then sends the VMAC address of the traffic domain in all responses to ARP queries for network entities in that domain. As a result, the ADC can segregate subsequent incoming traffic for this traffic domain on the basis of the destination MAC address, because the destination MAC address is the VMAC address of the traffic domain. After creating entities on a traffic domain, you can easily manage and monitor them by performing traffic domain level operations.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param vmac:
"""
try :
self._vmac = vmac
except Exception as e:
raise e
@property
def state(self) :
"""The state of TrafficDmain.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._state
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(nstrafficdomain_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nstrafficdomain
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.td is not None :
return str(self.td)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""Use this API to add nstrafficdomain.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
addresource = nstrafficdomain()
addresource.td = resource.td
addresource.aliasname = resource.aliasname
addresource.vmac = resource.vmac
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nstrafficdomain() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].td = resource[i].td
addresources[i].aliasname = resource[i].aliasname
addresources[i].vmac = resource[i].vmac
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""Use this API to delete nstrafficdomain.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
deleteresource = nstrafficdomain()
if type(resource) != type(deleteresource):
deleteresource.td = resource
else :
deleteresource.td = resource.td
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nstrafficdomain() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].td = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nstrafficdomain() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].td = resource[i].td
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def clear(cls, client, resource) :
"""Use this API to clear nstrafficdomain.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
clearresource = nstrafficdomain()
clearresource.td = resource.td
return clearresource.perform_operation(client,"clear")
else :
if (resource and len(resource) > 0) :
clearresources = [ nstrafficdomain() for _ in range(len(resource))]
for i in range(len(resource)) :
clearresources[i].td = resource[i].td
result = cls.perform_operation_bulk_request(client, clearresources,"clear")
return result
except Exception as e :
raise e
@classmethod
def enable(cls, client, resource) :
"""Use this API to enable nstrafficdomain.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
enableresource = nstrafficdomain()
if type(resource) != type(enableresource):
enableresource.td = resource
else :
enableresource.td = resource.td
return enableresource.perform_operation(client,"enable")
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
enableresources = [ nstrafficdomain() for _ in range(len(resource))]
for i in range(len(resource)) :
enableresources[i].td = resource[i]
else :
if (resource and len(resource) > 0) :
enableresources = [ nstrafficdomain() for _ in range(len(resource))]
for i in range(len(resource)) :
enableresources[i].td = resource[i].td
result = cls.perform_operation_bulk_request(client, enableresources,"enable")
return result
except Exception as e :
raise e
@classmethod
def disable(cls, client, resource) :
"""Use this API to disable nstrafficdomain.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
disableresource = nstrafficdomain()
if type(resource) != type(disableresource):
disableresource.td = resource
else :
disableresource.td = resource.td
return disableresource.perform_operation(client,"disable")
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
disableresources = [ nstrafficdomain() for _ in range(len(resource))]
for i in range(len(resource)) :
disableresources[i].td = resource[i]
else :
if (resource and len(resource) > 0) :
disableresources = [ nstrafficdomain() for _ in range(len(resource))]
for i in range(len(resource)) :
disableresources[i].td = resource[i].td
result = cls.perform_operation_bulk_request(client, disableresources,"disable")
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the nstrafficdomain resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = nstrafficdomain()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = nstrafficdomain()
obj.td = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [nstrafficdomain() for _ in range(len(name))]
obj = [nstrafficdomain() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = nstrafficdomain()
obj[i].td = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
"""Use this API to fetch filtered set of nstrafficdomain resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = nstrafficdomain()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
"""Use this API to count the nstrafficdomain resources configured | |
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_LinkLen(self)
def set_Rx_Dia(self, Rx_Dia):
"""
set_Rx_Dia(Geometric_Loss_ff_sptr self, float Rx_Dia)
Set geometric loss receiver aperture diameter.
"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_set_Rx_Dia(self, Rx_Dia)
def Rx_Dia(self):
"""
Rx_Dia(Geometric_Loss_ff_sptr self) -> float
Return cyurrent geometric loss receiver aperture diameter.
"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_Rx_Dia(self)
def history(self):
"""history(Geometric_Loss_ff_sptr self) -> unsigned int"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_history(self)
def declare_sample_delay(self, *args):
"""
declare_sample_delay(Geometric_Loss_ff_sptr self, int which, int delay)
declare_sample_delay(Geometric_Loss_ff_sptr self, unsigned int delay)
"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_declare_sample_delay(self, *args)
def sample_delay(self, which):
"""sample_delay(Geometric_Loss_ff_sptr self, int which) -> unsigned int"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_sample_delay(self, which)
def output_multiple(self):
"""output_multiple(Geometric_Loss_ff_sptr self) -> int"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_output_multiple(self)
def relative_rate(self):
"""relative_rate(Geometric_Loss_ff_sptr self) -> double"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_relative_rate(self)
def start(self):
"""start(Geometric_Loss_ff_sptr self) -> bool"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_start(self)
def stop(self):
"""stop(Geometric_Loss_ff_sptr self) -> bool"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_stop(self)
def nitems_read(self, which_input):
"""nitems_read(Geometric_Loss_ff_sptr self, unsigned int which_input) -> uint64_t"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_nitems_read(self, which_input)
def nitems_written(self, which_output):
"""nitems_written(Geometric_Loss_ff_sptr self, unsigned int which_output) -> uint64_t"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_nitems_written(self, which_output)
def max_noutput_items(self):
"""max_noutput_items(Geometric_Loss_ff_sptr self) -> int"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_max_noutput_items(self)
def set_max_noutput_items(self, m):
"""set_max_noutput_items(Geometric_Loss_ff_sptr self, int m)"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_set_max_noutput_items(self, m)
def unset_max_noutput_items(self):
"""unset_max_noutput_items(Geometric_Loss_ff_sptr self)"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_unset_max_noutput_items(self)
def is_set_max_noutput_items(self):
"""is_set_max_noutput_items(Geometric_Loss_ff_sptr self) -> bool"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_is_set_max_noutput_items(self)
def set_min_noutput_items(self, m):
"""set_min_noutput_items(Geometric_Loss_ff_sptr self, int m)"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_set_min_noutput_items(self, m)
def min_noutput_items(self):
"""min_noutput_items(Geometric_Loss_ff_sptr self) -> int"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_min_noutput_items(self)
def max_output_buffer(self, i):
"""max_output_buffer(Geometric_Loss_ff_sptr self, int i) -> long"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_max_output_buffer(self, i)
def set_max_output_buffer(self, *args):
"""
set_max_output_buffer(Geometric_Loss_ff_sptr self, long max_output_buffer)
set_max_output_buffer(Geometric_Loss_ff_sptr self, int port, long max_output_buffer)
"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_set_max_output_buffer(self, *args)
def min_output_buffer(self, i):
"""min_output_buffer(Geometric_Loss_ff_sptr self, int i) -> long"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_min_output_buffer(self, i)
def set_min_output_buffer(self, *args):
"""
set_min_output_buffer(Geometric_Loss_ff_sptr self, long min_output_buffer)
set_min_output_buffer(Geometric_Loss_ff_sptr self, int port, long min_output_buffer)
"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_set_min_output_buffer(self, *args)
def pc_noutput_items(self):
"""pc_noutput_items(Geometric_Loss_ff_sptr self) -> float"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_noutput_items(self)
def pc_noutput_items_avg(self):
"""pc_noutput_items_avg(Geometric_Loss_ff_sptr self) -> float"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_noutput_items_avg(self)
def pc_noutput_items_var(self):
"""pc_noutput_items_var(Geometric_Loss_ff_sptr self) -> float"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_noutput_items_var(self)
def pc_nproduced(self):
"""pc_nproduced(Geometric_Loss_ff_sptr self) -> float"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_nproduced(self)
def pc_nproduced_avg(self):
"""pc_nproduced_avg(Geometric_Loss_ff_sptr self) -> float"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_nproduced_avg(self)
def pc_nproduced_var(self):
"""pc_nproduced_var(Geometric_Loss_ff_sptr self) -> float"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_nproduced_var(self)
def pc_input_buffers_full(self, *args):
"""
pc_input_buffers_full(Geometric_Loss_ff_sptr self, int which) -> float
pc_input_buffers_full(Geometric_Loss_ff_sptr self) -> pmt_vector_float
"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_input_buffers_full(self, *args)
def pc_input_buffers_full_avg(self, *args):
"""
pc_input_buffers_full_avg(Geometric_Loss_ff_sptr self, int which) -> float
pc_input_buffers_full_avg(Geometric_Loss_ff_sptr self) -> pmt_vector_float
"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_input_buffers_full_avg(self, *args)
def pc_input_buffers_full_var(self, *args):
"""
pc_input_buffers_full_var(Geometric_Loss_ff_sptr self, int which) -> float
pc_input_buffers_full_var(Geometric_Loss_ff_sptr self) -> pmt_vector_float
"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_input_buffers_full_var(self, *args)
def pc_output_buffers_full(self, *args):
"""
pc_output_buffers_full(Geometric_Loss_ff_sptr self, int which) -> float
pc_output_buffers_full(Geometric_Loss_ff_sptr self) -> pmt_vector_float
"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_output_buffers_full(self, *args)
def pc_output_buffers_full_avg(self, *args):
"""
pc_output_buffers_full_avg(Geometric_Loss_ff_sptr self, int which) -> float
pc_output_buffers_full_avg(Geometric_Loss_ff_sptr self) -> pmt_vector_float
"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_output_buffers_full_avg(self, *args)
def pc_output_buffers_full_var(self, *args):
"""
pc_output_buffers_full_var(Geometric_Loss_ff_sptr self, int which) -> float
pc_output_buffers_full_var(Geometric_Loss_ff_sptr self) -> pmt_vector_float
"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_output_buffers_full_var(self, *args)
def pc_work_time(self):
"""pc_work_time(Geometric_Loss_ff_sptr self) -> float"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_work_time(self)
def pc_work_time_avg(self):
"""pc_work_time_avg(Geometric_Loss_ff_sptr self) -> float"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_work_time_avg(self)
def pc_work_time_var(self):
"""pc_work_time_var(Geometric_Loss_ff_sptr self) -> float"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_work_time_var(self)
def pc_work_time_total(self):
"""pc_work_time_total(Geometric_Loss_ff_sptr self) -> float"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_work_time_total(self)
def pc_throughput_avg(self):
"""pc_throughput_avg(Geometric_Loss_ff_sptr self) -> float"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_pc_throughput_avg(self)
def set_processor_affinity(self, mask):
"""set_processor_affinity(Geometric_Loss_ff_sptr self, std::vector< int,std::allocator< int > > const & mask)"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_set_processor_affinity(self, mask)
def unset_processor_affinity(self):
"""unset_processor_affinity(Geometric_Loss_ff_sptr self)"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_unset_processor_affinity(self)
def processor_affinity(self):
"""processor_affinity(Geometric_Loss_ff_sptr self) -> std::vector< int,std::allocator< int > >"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_processor_affinity(self)
def active_thread_priority(self):
"""active_thread_priority(Geometric_Loss_ff_sptr self) -> int"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_active_thread_priority(self)
def thread_priority(self):
"""thread_priority(Geometric_Loss_ff_sptr self) -> int"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_thread_priority(self)
def set_thread_priority(self, priority):
"""set_thread_priority(Geometric_Loss_ff_sptr self, int priority) -> int"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_set_thread_priority(self, priority)
def name(self):
"""name(Geometric_Loss_ff_sptr self) -> std::string"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_name(self)
def symbol_name(self):
"""symbol_name(Geometric_Loss_ff_sptr self) -> std::string"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_symbol_name(self)
def input_signature(self):
"""input_signature(Geometric_Loss_ff_sptr self) -> io_signature_sptr"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_input_signature(self)
def output_signature(self):
"""output_signature(Geometric_Loss_ff_sptr self) -> io_signature_sptr"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_output_signature(self)
def unique_id(self):
"""unique_id(Geometric_Loss_ff_sptr self) -> long"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_unique_id(self)
def to_basic_block(self):
"""to_basic_block(Geometric_Loss_ff_sptr self) -> basic_block_sptr"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_to_basic_block(self)
def check_topology(self, ninputs, noutputs):
"""check_topology(Geometric_Loss_ff_sptr self, int ninputs, int noutputs) -> bool"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_check_topology(self, ninputs, noutputs)
def alias(self):
"""alias(Geometric_Loss_ff_sptr self) -> std::string"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_alias(self)
def set_block_alias(self, name):
"""set_block_alias(Geometric_Loss_ff_sptr self, std::string name)"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_set_block_alias(self, name)
def _post(self, which_port, msg):
"""_post(Geometric_Loss_ff_sptr self, swig_int_ptr which_port, swig_int_ptr msg)"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr__post(self, which_port, msg)
def message_ports_in(self):
"""message_ports_in(Geometric_Loss_ff_sptr self) -> swig_int_ptr"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_message_ports_in(self)
def message_ports_out(self):
"""message_ports_out(Geometric_Loss_ff_sptr self) -> swig_int_ptr"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_message_ports_out(self)
def message_subscribers(self, which_port):
"""message_subscribers(Geometric_Loss_ff_sptr self, swig_int_ptr which_port) -> swig_int_ptr"""
return _FSO_Comm_swig.Geometric_Loss_ff_sptr_message_subscribers(self, which_port)
Geometric_Loss_ff_sptr_swigregister = _FSO_Comm_swig.Geometric_Loss_ff_sptr_swigregister
Geometric_Loss_ff_sptr_swigregister(Geometric_Loss_ff_sptr)
Geometric_Loss_ff_sptr.__repr__ = lambda self: "<gr_block %s (%d)>" % (self.name(), self.unique_id())
Geometric_Loss_ff = Geometric_Loss_ff.make;
class Laser_ff(object):
"""
FSO Laser Module.
The block generate optical power output based on average power and extinction ratio.
Constructor Specific Documentation:
Make a laser module block.
Args:
P_avg : average optical power (W)
Wavelen : optical beam wavelength (m)
ExtRatio : extiction ratio
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def make(P_avg, Wavelen, ExtRatio):
"""
make(float P_avg, float Wavelen, float ExtRatio) -> Laser_ff_sptr
FSO Laser Module.
The block generate optical power output based on average power and extinction ratio.
Constructor Specific Documentation:
Make a laser module block.
Args:
P_avg : average optical power (W)
Wavelen : optical beam wavelength (m)
ExtRatio : extiction ratio
"""
return _FSO_Comm_swig.Laser_ff_make(P_avg, Wavelen, ExtRatio)
make = staticmethod(make)
def set_P_avg(self, P_avg):
"""
set_P_avg(Laser_ff self, float P_avg)
Set laser module average optical power.
"""
return _FSO_Comm_swig.Laser_ff_set_P_avg(self, P_avg)
def P_avg(self):
"""
P_avg(Laser_ff self) -> float
Return current laser module average optical power.
"""
return _FSO_Comm_swig.Laser_ff_P_avg(self)
def set_Wavelen(self, Wavelen):
"""
set_Wavelen(Laser_ff self, float Wavelen)
Set laser module wavelength.
"""
return _FSO_Comm_swig.Laser_ff_set_Wavelen(self, Wavelen)
def Wavelen(self):
"""
Wavelen(Laser_ff self) -> float
Return current laser module wavelength.
"""
return _FSO_Comm_swig.Laser_ff_Wavelen(self)
def set_ExtRatio(self, ExtRatio):
"""
set_ExtRatio(Laser_ff self, float ExtRatio)
Set current laser module extinction ratio.
"""
return _FSO_Comm_swig.Laser_ff_set_ExtRatio(self, ExtRatio)
def ExtRatio(self):
"""
ExtRatio(Laser_ff self) -> float
Return current laser module extinction ratio.
"""
return _FSO_Comm_swig.Laser_ff_ExtRatio(self)
__swig_destroy__ = _FSO_Comm_swig.delete_Laser_ff
__del__ = lambda self: None
Laser_ff_swigregister = _FSO_Comm_swig.Laser_ff_swigregister
Laser_ff_swigregister(Laser_ff)
def Laser_ff_make(P_avg, Wavelen, ExtRatio):
"""
Laser_ff_make(float P_avg, float Wavelen, float ExtRatio) -> Laser_ff_sptr
FSO Laser Module.
The block generate optical power output based on average power and extinction ratio.
Constructor Specific Documentation:
Make a laser module block.
Args:
P_avg : average optical power (W)
Wavelen : optical beam wavelength (m)
ExtRatio : extiction ratio
"""
return _FSO_Comm_swig.Laser_ff_make(P_avg, Wavelen, ExtRatio)
class Laser_ff_sptr(object):
"""Proxy of C++ boost::shared_ptr<(gr::FSO_Comm::Laser_ff)> class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(boost::shared_ptr<(gr::FSO_Comm::Laser_ff)> self) -> Laser_ff_sptr
__init__(boost::shared_ptr<(gr::FSO_Comm::Laser_ff)> self, Laser_ff p) -> Laser_ff_sptr
"""
this = _FSO_Comm_swig.new_Laser_ff_sptr(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __deref__(self):
"""__deref__(Laser_ff_sptr self) -> Laser_ff"""
return _FSO_Comm_swig.Laser_ff_sptr___deref__(self)
__swig_destroy__ = _FSO_Comm_swig.delete_Laser_ff_sptr
__del__ = lambda self: None
def make(self, P_avg, Wavelen, ExtRatio):
"""
make(Laser_ff_sptr self, float P_avg, float Wavelen, float ExtRatio) -> Laser_ff_sptr
FSO Laser Module.
The block generate optical power output based on average power and extinction ratio.
Constructor Specific Documentation:
Make a laser module block.
Args:
P_avg : average optical power (W)
Wavelen : optical beam wavelength (m)
ExtRatio : extiction ratio
"""
return _FSO_Comm_swig.Laser_ff_sptr_make(self, P_avg, Wavelen, ExtRatio)
def set_P_avg(self, P_avg):
"""
set_P_avg(Laser_ff_sptr self, float P_avg)
Set laser module average optical power.
"""
return _FSO_Comm_swig.Laser_ff_sptr_set_P_avg(self, P_avg)
def P_avg(self):
"""
P_avg(Laser_ff_sptr self) -> float
Return current laser module average optical power.
"""
return _FSO_Comm_swig.Laser_ff_sptr_P_avg(self)
def set_Wavelen(self, Wavelen):
"""
set_Wavelen(Laser_ff_sptr self, float Wavelen)
Set laser module wavelength.
"""
return _FSO_Comm_swig.Laser_ff_sptr_set_Wavelen(self, Wavelen)
def Wavelen(self):
"""
Wavelen(Laser_ff_sptr self) -> float
Return current laser module wavelength.
"""
return _FSO_Comm_swig.Laser_ff_sptr_Wavelen(self)
def set_ExtRatio(self, ExtRatio):
"""
set_ExtRatio(Laser_ff_sptr self, float ExtRatio)
Set current laser module extinction ratio.
"""
return _FSO_Comm_swig.Laser_ff_sptr_set_ExtRatio(self, ExtRatio)
def ExtRatio(self):
"""
ExtRatio(Laser_ff_sptr self) -> float
Return current laser module extinction ratio.
"""
return _FSO_Comm_swig.Laser_ff_sptr_ExtRatio(self)
def history(self):
"""history(Laser_ff_sptr self) -> unsigned int"""
return _FSO_Comm_swig.Laser_ff_sptr_history(self)
def declare_sample_delay(self, *args):
"""
declare_sample_delay(Laser_ff_sptr self, int which, int delay)
declare_sample_delay(Laser_ff_sptr self, unsigned int delay)
"""
return _FSO_Comm_swig.Laser_ff_sptr_declare_sample_delay(self, *args)
def sample_delay(self, which):
"""sample_delay(Laser_ff_sptr self, int which) -> unsigned int"""
return _FSO_Comm_swig.Laser_ff_sptr_sample_delay(self, which)
def output_multiple(self):
"""output_multiple(Laser_ff_sptr self) -> int"""
return _FSO_Comm_swig.Laser_ff_sptr_output_multiple(self)
def relative_rate(self):
"""relative_rate(Laser_ff_sptr self) -> double"""
return _FSO_Comm_swig.Laser_ff_sptr_relative_rate(self)
def start(self):
"""start(Laser_ff_sptr self) -> bool"""
return _FSO_Comm_swig.Laser_ff_sptr_start(self)
def stop(self):
"""stop(Laser_ff_sptr self) -> bool"""
return _FSO_Comm_swig.Laser_ff_sptr_stop(self)
def nitems_read(self, which_input):
"""nitems_read(Laser_ff_sptr self, unsigned int which_input) -> uint64_t"""
return _FSO_Comm_swig.Laser_ff_sptr_nitems_read(self, which_input)
def nitems_written(self, which_output):
"""nitems_written(Laser_ff_sptr self, unsigned int which_output) -> uint64_t"""
return _FSO_Comm_swig.Laser_ff_sptr_nitems_written(self, which_output)
def max_noutput_items(self):
"""max_noutput_items(Laser_ff_sptr self) -> int"""
return _FSO_Comm_swig.Laser_ff_sptr_max_noutput_items(self)
def set_max_noutput_items(self, m):
"""set_max_noutput_items(Laser_ff_sptr self, int | |
1
# Main part of the encryption algorithm, the number cruncher :)
def __des_crypt(self, block, crypt_type):
"""Crypt the block of data through DES bit-manipulation"""
block = self.__permutate(des.__ip, block)
Bn = [0] * 32
self.L = block[:32]
self.R = block[32:]
# Encryption starts from Kn[1] through to Kn[16]
if crypt_type == des.ENCRYPT:
iteration = 0
iteration_adjustment = 1
# Decryption starts from Kn[16] down to Kn[1]
else:
iteration = 15
iteration_adjustment = -1
i = 0
while i < 16:
# Make a copy of R[i-1], this will later become L[i]
tempR = self.R[:]
# Permutate R[i - 1] to start creating R[i]
self.R = self.__permutate(des.__expansion_table, self.R)
# Exclusive or R[i - 1] with K[i], create B[1] to B[8] whilst here
self.R = map(lambda x, y: x ^ y, self.R, self.Kn[iteration])
B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]]
# Optimization: Replaced below commented code with above
#j = 0
#B = []
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.Kn[iteration][j]
# j += 1
# if j % 6 == 0:
# B.append(self.R[j-6:j])
# Permutate B[1] to B[8] using the S-Boxes
j = 0
pos = 0
while j < 8:
# Work out the offsets
m = (B[j][0] << 1) + B[j][5]
n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4]
# Find the permutation value
v = des.__sbox[j][(m << 4) + n]
# Turn value into bits, add it to result: Bn
Bn[pos] = (v & 8) >> 3
Bn[pos + 1] = (v & 4) >> 2
Bn[pos + 2] = (v & 2) >> 1
Bn[pos + 3] = v & 1
pos += 4
j += 1
# Permutate the concatination of B[1] to B[8] (Bn)
self.R = self.__permutate(des.__p, Bn)
# Xor with L[i - 1]
self.R = map(lambda x, y: x ^ y, self.R, self.L)
# Optimization: This now replaces the below commented code
#j = 0
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.L[j]
# j += 1
# L[i] becomes R[i - 1]
self.L = tempR
i += 1
iteration += iteration_adjustment
# Final permutation of R[16]L[16]
self.final = self.__permutate(des.__fp, self.R + self.L)
return self.final
# Data to be encrypted/decrypted
def crypt(self, data, crypt_type):
"""Crypt the data in blocks, running it through des_crypt()"""
# Error check the data
if not data:
return ''
if len(data) % self.block_size != 0:
if crypt_type == des.DECRYPT: # Decryption must work on 8 byte blocks
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n.")
if not self.getPadding():
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n. Try setting the optional padding character")
else:
data += (self.block_size - (len(data) % self.block_size)) * self.getPadding()
# print "Len of data: %f" % (len(data) / self.block_size)
if self.getMode() == CBC:
if self.getIV():
iv = self.__String_to_BitList(self.getIV())
else:
raise ValueError("For CBC mode, you must supply the Initial Value (IV) for ciphering")
# Split the data into blocks, crypting each one seperately
i = 0
dict = {}
result = []
#cached = 0
#lines = 0
while i < len(data):
# Test code for caching encryption results
#lines += 1
#if dict.has_key(data[i:i+8]):
#print "Cached result for: %s" % data[i:i+8]
# cached += 1
# result.append(dict[data[i:i+8]])
# i += 8
# continue
block = self.__String_to_BitList(data[i:i+8])
# Xor with IV if using CBC mode
if self.getMode() == CBC:
if crypt_type == des.ENCRYPT:
block = map(lambda x, y: x ^ y, block, iv)
#j = 0
#while j < len(block):
# block[j] = block[j] ^ iv[j]
# j += 1
processed_block = self.__des_crypt(block, crypt_type)
if crypt_type == des.DECRYPT:
processed_block = map(lambda x, y: x ^ y, processed_block, iv)
#j = 0
#while j < len(processed_block):
# processed_block[j] = processed_block[j] ^ iv[j]
# j += 1
iv = block
else:
iv = processed_block
else:
processed_block = self.__des_crypt(block, crypt_type)
# Add the resulting crypted block to our list
#d = self.__BitList_to_String(processed_block)
#result.append(d)
result.append(self.__BitList_to_String(processed_block))
#dict[data[i:i+8]] = d
i += 8
# print "Lines: %d, cached: %d" % (lines, cached)
# Return the full crypted string
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self._padData(data, pad, padmode)
return self.crypt(data, des.ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after decrypting.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self.crypt(data, des.DECRYPT)
return self._unpadData(data, pad, padmode)
#############################################################################
# Triple DES #
#############################################################################
class triple_des(_baseDes):
"""Triple DES encryption/decrytpion class
This algorithm uses the DES-EDE3 (when a 24 byte key is supplied) or
the DES-EDE2 (when a 16 byte key is supplied) encryption methods.
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key, [mode], [IV])
key -> Bytes containing the encryption key, must be either 16 or
24 bytes long
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
_baseDes.__init__(self, mode, IV, pad, padmode)
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Either 16 or 24 bytes long."""
self.key_size = 24 # Use DES-EDE3 mode
if len(key) != self.key_size:
if len(key) == 16: # Use DES-EDE2 mode
self.key_size = 16
else:
raise ValueError("Invalid triple DES key size. Key must be either 16 or 24 bytes long")
if self.getMode() == CBC:
if not self.getIV():
# Use the first 8 bytes of the key
self._iv = key[:self.block_size]
if len(self.getIV()) != self.block_size:
raise ValueError("Invalid IV, must be 8 bytes in length")
self.__key1 = des(key[:8], self._mode, self._iv,
self._padding, self._padmode)
self.__key2 = des(key[8:16], self._mode, self._iv,
self._padding, self._padmode)
if self.key_size == 16:
self.__key3 = self.__key1
else:
self.__key3 = des(key[16:], self._mode, self._iv,
self._padding, self._padmode)
_baseDes.setKey(self, key)
# Override setter methods to work on all 3 keys.
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
_baseDes.setMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setMode(mode)
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
_baseDes.setPadding(self, pad)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadding(pad)
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
_baseDes.setPadMode(self, mode)
for key in (self.__key1, | |
= CFUNCTYPE(None, GLenum, GLenum, POINTER(GLint)) # GL/glext.h:5223
PFNGLPIXELTRANSFORMPARAMETERFVEXTPROC = CFUNCTYPE(None, GLenum, GLenum, POINTER(GLfloat)) # GL/glext.h:5224
# EXT_pixel_transform_color_table (GL/glext.h:5227)
GL_EXT_pixel_transform_color_table = 1 # GL/glext.h:5228
# EXT_shared_texture_palette (GL/glext.h:5231)
GL_EXT_shared_texture_palette = 1 # GL/glext.h:5232
# EXT_separate_specular_color (GL/glext.h:5235)
GL_EXT_separate_specular_color = 1 # GL/glext.h:5236
# EXT_secondary_color (GL/glext.h:5239)
GL_EXT_secondary_color = 1 # GL/glext.h:5240
# GL/glext.h:5242
glSecondaryColor3bEXT = _link_function('glSecondaryColor3bEXT', None, [GLbyte, GLbyte, GLbyte], 'EXT_secondary_color')
# GL/glext.h:5243
glSecondaryColor3bvEXT = _link_function('glSecondaryColor3bvEXT', None, [POINTER(GLbyte)], 'EXT_secondary_color')
# GL/glext.h:5244
glSecondaryColor3dEXT = _link_function('glSecondaryColor3dEXT', None, [GLdouble, GLdouble, GLdouble], 'EXT_secondary_color')
# GL/glext.h:5245
glSecondaryColor3dvEXT = _link_function('glSecondaryColor3dvEXT', None, [POINTER(GLdouble)], 'EXT_secondary_color')
# GL/glext.h:5246
glSecondaryColor3fEXT = _link_function('glSecondaryColor3fEXT', None, [GLfloat, GLfloat, GLfloat], 'EXT_secondary_color')
# GL/glext.h:5247
glSecondaryColor3fvEXT = _link_function('glSecondaryColor3fvEXT', None, [POINTER(GLfloat)], 'EXT_secondary_color')
# GL/glext.h:5248
glSecondaryColor3iEXT = _link_function('glSecondaryColor3iEXT', None, [GLint, GLint, GLint], 'EXT_secondary_color')
# GL/glext.h:5249
glSecondaryColor3ivEXT = _link_function('glSecondaryColor3ivEXT', None, [POINTER(GLint)], 'EXT_secondary_color')
# GL/glext.h:5250
glSecondaryColor3sEXT = _link_function('glSecondaryColor3sEXT', None, [GLshort, GLshort, GLshort], 'EXT_secondary_color')
# GL/glext.h:5251
glSecondaryColor3svEXT = _link_function('glSecondaryColor3svEXT', None, [POINTER(GLshort)], 'EXT_secondary_color')
# GL/glext.h:5252
glSecondaryColor3ubEXT = _link_function('glSecondaryColor3ubEXT', None, [GLubyte, GLubyte, GLubyte], 'EXT_secondary_color')
# GL/glext.h:5253
glSecondaryColor3ubvEXT = _link_function('glSecondaryColor3ubvEXT', None, [POINTER(GLubyte)], 'EXT_secondary_color')
# GL/glext.h:5254
glSecondaryColor3uiEXT = _link_function('glSecondaryColor3uiEXT', None, [GLuint, GLuint, GLuint], 'EXT_secondary_color')
# GL/glext.h:5255
glSecondaryColor3uivEXT = _link_function('glSecondaryColor3uivEXT', None, [POINTER(GLuint)], 'EXT_secondary_color')
# GL/glext.h:5256
glSecondaryColor3usEXT = _link_function('glSecondaryColor3usEXT', None, [GLushort, GLushort, GLushort], 'EXT_secondary_color')
# GL/glext.h:5257
glSecondaryColor3usvEXT = _link_function('glSecondaryColor3usvEXT', None, [POINTER(GLushort)], 'EXT_secondary_color')
# GL/glext.h:5258
glSecondaryColorPointerEXT = _link_function('glSecondaryColorPointerEXT', None, [GLint, GLenum, GLsizei, POINTER(GLvoid)], 'EXT_secondary_color')
PFNGLSECONDARYCOLOR3BEXTPROC = CFUNCTYPE(None, GLbyte, GLbyte, GLbyte) # GL/glext.h:5260
PFNGLSECONDARYCOLOR3BVEXTPROC = CFUNCTYPE(None, POINTER(GLbyte)) # GL/glext.h:5261
PFNGLSECONDARYCOLOR3DEXTPROC = CFUNCTYPE(None, GLdouble, GLdouble, GLdouble) # GL/glext.h:5262
PFNGLSECONDARYCOLOR3DVEXTPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:5263
PFNGLSECONDARYCOLOR3FEXTPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat) # GL/glext.h:5264
PFNGLSECONDARYCOLOR3FVEXTPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:5265
PFNGLSECONDARYCOLOR3IEXTPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:5266
PFNGLSECONDARYCOLOR3IVEXTPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:5267
PFNGLSECONDARYCOLOR3SEXTPROC = CFUNCTYPE(None, GLshort, GLshort, GLshort) # GL/glext.h:5268
PFNGLSECONDARYCOLOR3SVEXTPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:5269
PFNGLSECONDARYCOLOR3UBEXTPROC = CFUNCTYPE(None, GLubyte, GLubyte, GLubyte) # GL/glext.h:5270
PFNGLSECONDARYCOLOR3UBVEXTPROC = CFUNCTYPE(None, POINTER(GLubyte)) # GL/glext.h:5271
PFNGLSECONDARYCOLOR3UIEXTPROC = CFUNCTYPE(None, GLuint, GLuint, GLuint) # GL/glext.h:5272
PFNGLSECONDARYCOLOR3UIVEXTPROC = CFUNCTYPE(None, POINTER(GLuint)) # GL/glext.h:5273
PFNGLSECONDARYCOLOR3USEXTPROC = CFUNCTYPE(None, GLushort, GLushort, GLushort) # GL/glext.h:5274
PFNGLSECONDARYCOLOR3USVEXTPROC = CFUNCTYPE(None, POINTER(GLushort)) # GL/glext.h:5275
PFNGLSECONDARYCOLORPOINTEREXTPROC = CFUNCTYPE(None, GLint, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:5276
# EXT_texture_perturb_normal (GL/glext.h:5279)
GL_EXT_texture_perturb_normal = 1 # GL/glext.h:5280
# GL/glext.h:5282
glTextureNormalEXT = _link_function('glTextureNormalEXT', None, [GLenum], 'EXT_texture_perturb_normal')
PFNGLTEXTURENORMALEXTPROC = CFUNCTYPE(None, GLenum) # GL/glext.h:5284
# EXT_multi_draw_arrays (GL/glext.h:5287)
GL_EXT_multi_draw_arrays = 1 # GL/glext.h:5288
# GL/glext.h:5290
glMultiDrawArraysEXT = _link_function('glMultiDrawArraysEXT', None, [GLenum, POINTER(GLint), POINTER(GLsizei), GLsizei], 'EXT_multi_draw_arrays')
# GL/glext.h:5291
glMultiDrawElementsEXT = _link_function('glMultiDrawElementsEXT', None, [GLenum, POINTER(GLsizei), GLenum, POINTER(POINTER(GLvoid)), GLsizei], 'EXT_multi_draw_arrays')
PFNGLMULTIDRAWARRAYSEXTPROC = CFUNCTYPE(None, GLenum, POINTER(GLint), POINTER(GLsizei), GLsizei) # GL/glext.h:5293
PFNGLMULTIDRAWELEMENTSEXTPROC = CFUNCTYPE(None, GLenum, POINTER(GLsizei), GLenum, POINTER(POINTER(GLvoid)), GLsizei) # GL/glext.h:5294
# EXT_fog_coord (GL/glext.h:5297)
GL_EXT_fog_coord = 1 # GL/glext.h:5298
# GL/glext.h:5300
glFogCoordfEXT = _link_function('glFogCoordfEXT', None, [GLfloat], 'EXT_fog_coord')
# GL/glext.h:5301
glFogCoordfvEXT = _link_function('glFogCoordfvEXT', None, [POINTER(GLfloat)], 'EXT_fog_coord')
# GL/glext.h:5302
glFogCoorddEXT = _link_function('glFogCoorddEXT', None, [GLdouble], 'EXT_fog_coord')
# GL/glext.h:5303
glFogCoorddvEXT = _link_function('glFogCoorddvEXT', None, [POINTER(GLdouble)], 'EXT_fog_coord')
# GL/glext.h:5304
glFogCoordPointerEXT = _link_function('glFogCoordPointerEXT', None, [GLenum, GLsizei, POINTER(GLvoid)], 'EXT_fog_coord')
PFNGLFOGCOORDFEXTPROC = CFUNCTYPE(None, GLfloat) # GL/glext.h:5306
PFNGLFOGCOORDFVEXTPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:5307
PFNGLFOGCOORDDEXTPROC = CFUNCTYPE(None, GLdouble) # GL/glext.h:5308
PFNGLFOGCOORDDVEXTPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:5309
PFNGLFOGCOORDPOINTEREXTPROC = CFUNCTYPE(None, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:5310
# REND_screen_coordinates (GL/glext.h:5313)
GL_REND_screen_coordinates = 1 # GL/glext.h:5314
# EXT_coordinate_frame (GL/glext.h:5317)
GL_EXT_coordinate_frame = 1 # GL/glext.h:5318
# GL/glext.h:5320
glTangent3bEXT = _link_function('glTangent3bEXT', None, [GLbyte, GLbyte, GLbyte], 'EXT_coordinate_frame')
# GL/glext.h:5321
glTangent3bvEXT = _link_function('glTangent3bvEXT', None, [POINTER(GLbyte)], 'EXT_coordinate_frame')
# GL/glext.h:5322
glTangent3dEXT = _link_function('glTangent3dEXT', None, [GLdouble, GLdouble, GLdouble], 'EXT_coordinate_frame')
# GL/glext.h:5323
glTangent3dvEXT = _link_function('glTangent3dvEXT', None, [POINTER(GLdouble)], 'EXT_coordinate_frame')
# GL/glext.h:5324
glTangent3fEXT = _link_function('glTangent3fEXT', None, [GLfloat, GLfloat, GLfloat], 'EXT_coordinate_frame')
# GL/glext.h:5325
glTangent3fvEXT = _link_function('glTangent3fvEXT', None, [POINTER(GLfloat)], 'EXT_coordinate_frame')
# GL/glext.h:5326
glTangent3iEXT = _link_function('glTangent3iEXT', None, [GLint, GLint, GLint], 'EXT_coordinate_frame')
# GL/glext.h:5327
glTangent3ivEXT = _link_function('glTangent3ivEXT', None, [POINTER(GLint)], 'EXT_coordinate_frame')
# GL/glext.h:5328
glTangent3sEXT = _link_function('glTangent3sEXT', None, [GLshort, GLshort, GLshort], 'EXT_coordinate_frame')
# GL/glext.h:5329
glTangent3svEXT = _link_function('glTangent3svEXT', None, [POINTER(GLshort)], 'EXT_coordinate_frame')
# GL/glext.h:5330
glBinormal3bEXT = _link_function('glBinormal3bEXT', None, [GLbyte, GLbyte, GLbyte], 'EXT_coordinate_frame')
# GL/glext.h:5331
glBinormal3bvEXT = _link_function('glBinormal3bvEXT', None, [POINTER(GLbyte)], 'EXT_coordinate_frame')
# GL/glext.h:5332
glBinormal3dEXT = _link_function('glBinormal3dEXT', None, [GLdouble, GLdouble, GLdouble], 'EXT_coordinate_frame')
# GL/glext.h:5333
glBinormal3dvEXT = _link_function('glBinormal3dvEXT', None, [POINTER(GLdouble)], 'EXT_coordinate_frame')
# GL/glext.h:5334
glBinormal3fEXT = _link_function('glBinormal3fEXT', None, [GLfloat, GLfloat, GLfloat], 'EXT_coordinate_frame')
# GL/glext.h:5335
glBinormal3fvEXT = _link_function('glBinormal3fvEXT', None, [POINTER(GLfloat)], 'EXT_coordinate_frame')
# GL/glext.h:5336
glBinormal3iEXT = _link_function('glBinormal3iEXT', None, [GLint, GLint, GLint], 'EXT_coordinate_frame')
# GL/glext.h:5337
glBinormal3ivEXT = _link_function('glBinormal3ivEXT', None, [POINTER(GLint)], 'EXT_coordinate_frame')
# GL/glext.h:5338
glBinormal3sEXT = _link_function('glBinormal3sEXT', None, [GLshort, GLshort, GLshort], 'EXT_coordinate_frame')
# GL/glext.h:5339
glBinormal3svEXT = _link_function('glBinormal3svEXT', None, [POINTER(GLshort)], 'EXT_coordinate_frame')
# GL/glext.h:5340
glTangentPointerEXT = _link_function('glTangentPointerEXT', None, [GLenum, GLsizei, POINTER(GLvoid)], 'EXT_coordinate_frame')
# GL/glext.h:5341
glBinormalPointerEXT = _link_function('glBinormalPointerEXT', None, [GLenum, GLsizei, POINTER(GLvoid)], 'EXT_coordinate_frame')
PFNGLTANGENT3BEXTPROC = CFUNCTYPE(None, GLbyte, GLbyte, GLbyte) # GL/glext.h:5343
PFNGLTANGENT3BVEXTPROC = CFUNCTYPE(None, POINTER(GLbyte)) # GL/glext.h:5344
PFNGLTANGENT3DEXTPROC = CFUNCTYPE(None, GLdouble, GLdouble, GLdouble) # GL/glext.h:5345
PFNGLTANGENT3DVEXTPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:5346
PFNGLTANGENT3FEXTPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat) # GL/glext.h:5347
PFNGLTANGENT3FVEXTPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:5348
PFNGLTANGENT3IEXTPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:5349
PFNGLTANGENT3IVEXTPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:5350
PFNGLTANGENT3SEXTPROC = CFUNCTYPE(None, GLshort, GLshort, GLshort) # GL/glext.h:5351
PFNGLTANGENT3SVEXTPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:5352
PFNGLBINORMAL3BEXTPROC = CFUNCTYPE(None, GLbyte, GLbyte, GLbyte) # GL/glext.h:5353
PFNGLBINORMAL3BVEXTPROC = CFUNCTYPE(None, POINTER(GLbyte)) # GL/glext.h:5354
PFNGLBINORMAL3DEXTPROC = CFUNCTYPE(None, GLdouble, GLdouble, GLdouble) # GL/glext.h:5355
PFNGLBINORMAL3DVEXTPROC = CFUNCTYPE(None, POINTER(GLdouble)) # GL/glext.h:5356
PFNGLBINORMAL3FEXTPROC = CFUNCTYPE(None, GLfloat, GLfloat, GLfloat) # GL/glext.h:5357
PFNGLBINORMAL3FVEXTPROC = CFUNCTYPE(None, POINTER(GLfloat)) # GL/glext.h:5358
PFNGLBINORMAL3IEXTPROC = CFUNCTYPE(None, GLint, GLint, GLint) # GL/glext.h:5359
PFNGLBINORMAL3IVEXTPROC = CFUNCTYPE(None, POINTER(GLint)) # GL/glext.h:5360
PFNGLBINORMAL3SEXTPROC = CFUNCTYPE(None, GLshort, GLshort, GLshort) # GL/glext.h:5361
PFNGLBINORMAL3SVEXTPROC = CFUNCTYPE(None, POINTER(GLshort)) # GL/glext.h:5362
PFNGLTANGENTPOINTEREXTPROC = CFUNCTYPE(None, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:5363
PFNGLBINORMALPOINTEREXTPROC = CFUNCTYPE(None, GLenum, GLsizei, POINTER(GLvoid)) # GL/glext.h:5364
# EXT_texture_env_combine (GL/glext.h:5367)
GL_EXT_texture_env_combine = 1 # GL/glext.h:5368
# APPLE_specular_vector (GL/glext.h:5371)
GL_APPLE_specular_vector = 1 # GL/glext.h:5372
# APPLE_transform_hint (GL/glext.h:5375)
GL_APPLE_transform_hint = 1 # GL/glext.h:5376
# SGIX_fog_scale (GL/glext.h:5379)
GL_SGIX_fog_scale = 1 # GL/glext.h:5380
# SUNX_constant_data (GL/glext.h:5383)
GL_SUNX_constant_data = 1 # GL/glext.h:5384
# GL/glext.h:5386
glFinishTextureSUNX = _link_function('glFinishTextureSUNX', None, [], 'SUNX_constant_data')
PFNGLFINISHTEXTURESUNXPROC = CFUNCTYPE(None) # GL/glext.h:5388
# SUN_global_alpha (GL/glext.h:5391)
GL_SUN_global_alpha = 1 # GL/glext.h:5392
# GL/glext.h:5394
glGlobalAlphaFactorbSUN = _link_function('glGlobalAlphaFactorbSUN', None, [GLbyte], 'SUN_global_alpha')
# GL/glext.h:5395
glGlobalAlphaFactorsSUN = _link_function('glGlobalAlphaFactorsSUN', None, [GLshort], 'SUN_global_alpha')
# GL/glext.h:5396
glGlobalAlphaFactoriSUN = _link_function('glGlobalAlphaFactoriSUN', None, [GLint], 'SUN_global_alpha')
# GL/glext.h:5397
glGlobalAlphaFactorfSUN = _link_function('glGlobalAlphaFactorfSUN', None, [GLfloat], 'SUN_global_alpha')
# GL/glext.h:5398
glGlobalAlphaFactordSUN = _link_function('glGlobalAlphaFactordSUN', None, [GLdouble], 'SUN_global_alpha')
# GL/glext.h:5399
glGlobalAlphaFactorubSUN = _link_function('glGlobalAlphaFactorubSUN', None, [GLubyte], 'SUN_global_alpha')
# GL/glext.h:5400
glGlobalAlphaFactorusSUN = _link_function('glGlobalAlphaFactorusSUN', None, [GLushort], 'SUN_global_alpha')
# GL/glext.h:5401
glGlobalAlphaFactoruiSUN = _link_function('glGlobalAlphaFactoruiSUN', None, [GLuint], 'SUN_global_alpha')
PFNGLGLOBALALPHAFACTORBSUNPROC = CFUNCTYPE(None, GLbyte) # GL/glext.h:5403
PFNGLGLOBALALPHAFACTORSSUNPROC = CFUNCTYPE(None, GLshort) # GL/glext.h:5404
PFNGLGLOBALALPHAFACTORISUNPROC = CFUNCTYPE(None, GLint) # GL/glext.h:5405
PFNGLGLOBALALPHAFACTORFSUNPROC = CFUNCTYPE(None, GLfloat) # GL/glext.h:5406
PFNGLGLOBALALPHAFACTORDSUNPROC = CFUNCTYPE(None, GLdouble) # GL/glext.h:5407
PFNGLGLOBALALPHAFACTORUBSUNPROC = CFUNCTYPE(None, GLubyte) # GL/glext.h:5408
PFNGLGLOBALALPHAFACTORUSSUNPROC = CFUNCTYPE(None, GLushort) # GL/glext.h:5409
PFNGLGLOBALALPHAFACTORUISUNPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5410
# SUN_triangle_list (GL/glext.h:5413)
GL_SUN_triangle_list = 1 # GL/glext.h:5414
# GL/glext.h:5416
glReplacementCodeuiSUN = _link_function('glReplacementCodeuiSUN', None, [GLuint], 'SUN_triangle_list')
# GL/glext.h:5417
glReplacementCodeusSUN = _link_function('glReplacementCodeusSUN', None, [GLushort], 'SUN_triangle_list')
# GL/glext.h:5418
glReplacementCodeubSUN = _link_function('glReplacementCodeubSUN', None, [GLubyte], 'SUN_triangle_list')
# GL/glext.h:5419
glReplacementCodeuivSUN = _link_function('glReplacementCodeuivSUN', None, [POINTER(GLuint)], 'SUN_triangle_list')
# GL/glext.h:5420
glReplacementCodeusvSUN = _link_function('glReplacementCodeusvSUN', None, [POINTER(GLushort)], 'SUN_triangle_list')
# GL/glext.h:5421
glReplacementCodeubvSUN = _link_function('glReplacementCodeubvSUN', None, [POINTER(GLubyte)], 'SUN_triangle_list')
# GL/glext.h:5422
glReplacementCodePointerSUN = _link_function('glReplacementCodePointerSUN', None, [GLenum, GLsizei, POINTER(POINTER(GLvoid))], 'SUN_triangle_list')
PFNGLREPLACEMENTCODEUISUNPROC = CFUNCTYPE(None, GLuint) # GL/glext.h:5424
PFNGLREPLACEMENTCODEUSSUNPROC = CFUNCTYPE(None, GLushort) # GL/glext.h:5425
PFNGLREPLACEMENTCODEUBSUNPROC = CFUNCTYPE(None, GLubyte) # GL/glext.h:5426
PFNGLREPLACEMENTCODEUIVSUNPROC = CFUNCTYPE(None, POINTER(GLuint)) # GL/glext.h:5427
PFNGLREPLACEMENTCODEUSVSUNPROC = CFUNCTYPE(None, POINTER(GLushort)) # GL/glext.h:5428
PFNGLREPLACEMENTCODEUBVSUNPROC = CFUNCTYPE(None, POINTER(GLubyte)) # GL/glext.h:5429
PFNGLREPLACEMENTCODEPOINTERSUNPROC = CFUNCTYPE(None, GLenum, GLsizei, POINTER(POINTER(GLvoid))) # GL/glext.h:5430
# SUN_vertex (GL/glext.h:5433)
GL_SUN_vertex = 1 # GL/glext.h:5434
# GL/glext.h:5436
glColor4ubVertex2fSUN = _link_function('glColor4ubVertex2fSUN', None, [GLubyte, GLubyte, GLubyte, GLubyte, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5437
glColor4ubVertex2fvSUN = _link_function('glColor4ubVertex2fvSUN', None, [POINTER(GLubyte), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5438
glColor4ubVertex3fSUN = _link_function('glColor4ubVertex3fSUN', None, [GLubyte, GLubyte, GLubyte, GLubyte, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5439
glColor4ubVertex3fvSUN = _link_function('glColor4ubVertex3fvSUN', None, [POINTER(GLubyte), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5440
glColor3fVertex3fSUN = _link_function('glColor3fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5441
glColor3fVertex3fvSUN = _link_function('glColor3fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5442
glNormal3fVertex3fSUN = _link_function('glNormal3fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5443
glNormal3fVertex3fvSUN = _link_function('glNormal3fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5444
glColor4fNormal3fVertex3fSUN = _link_function('glColor4fNormal3fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5445
glColor4fNormal3fVertex3fvSUN = _link_function('glColor4fNormal3fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5446
glTexCoord2fVertex3fSUN = _link_function('glTexCoord2fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5447
glTexCoord2fVertex3fvSUN = _link_function('glTexCoord2fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5448
glTexCoord4fVertex4fSUN = _link_function('glTexCoord4fVertex4fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5449
glTexCoord4fVertex4fvSUN = _link_function('glTexCoord4fVertex4fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5450
glTexCoord2fColor4ubVertex3fSUN = _link_function('glTexCoord2fColor4ubVertex3fSUN', None, [GLfloat, GLfloat, GLubyte, GLubyte, GLubyte, GLubyte, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5451
glTexCoord2fColor4ubVertex3fvSUN = _link_function('glTexCoord2fColor4ubVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLubyte), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5452
glTexCoord2fColor3fVertex3fSUN = _link_function('glTexCoord2fColor3fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5453
glTexCoord2fColor3fVertex3fvSUN = _link_function('glTexCoord2fColor3fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5454
glTexCoord2fNormal3fVertex3fSUN = _link_function('glTexCoord2fNormal3fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5455
glTexCoord2fNormal3fVertex3fvSUN = _link_function('glTexCoord2fNormal3fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5456
glTexCoord2fColor4fNormal3fVertex3fSUN = _link_function('glTexCoord2fColor4fNormal3fVertex3fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5457
glTexCoord2fColor4fNormal3fVertex3fvSUN = _link_function('glTexCoord2fColor4fNormal3fVertex3fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5458
glTexCoord4fColor4fNormal3fVertex4fSUN = _link_function('glTexCoord4fColor4fNormal3fVertex4fSUN', None, [GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5459
glTexCoord4fColor4fNormal3fVertex4fvSUN = _link_function('glTexCoord4fColor4fNormal3fVertex4fvSUN', None, [POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5460
glReplacementCodeuiVertex3fSUN = _link_function('glReplacementCodeuiVertex3fSUN', None, [GLuint, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5461
glReplacementCodeuiVertex3fvSUN = _link_function('glReplacementCodeuiVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5462
glReplacementCodeuiColor4ubVertex3fSUN = _link_function('glReplacementCodeuiColor4ubVertex3fSUN', None, [GLuint, GLubyte, GLubyte, GLubyte, GLubyte, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5463
glReplacementCodeuiColor4ubVertex3fvSUN = _link_function('glReplacementCodeuiColor4ubVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLubyte), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5464
glReplacementCodeuiColor3fVertex3fSUN = _link_function('glReplacementCodeuiColor3fVertex3fSUN', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5465
glReplacementCodeuiColor3fVertex3fvSUN = _link_function('glReplacementCodeuiColor3fVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5466
glReplacementCodeuiNormal3fVertex3fSUN = _link_function('glReplacementCodeuiNormal3fVertex3fSUN', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5467
glReplacementCodeuiNormal3fVertex3fvSUN = _link_function('glReplacementCodeuiNormal3fVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5468
glReplacementCodeuiColor4fNormal3fVertex3fSUN = _link_function('glReplacementCodeuiColor4fNormal3fVertex3fSUN', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], 'SUN_vertex')
# GL/glext.h:5469
glReplacementCodeuiColor4fNormal3fVertex3fvSUN = _link_function('glReplacementCodeuiColor4fNormal3fVertex3fvSUN', None, [POINTER(GLuint), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLfloat)], 'SUN_vertex')
# GL/glext.h:5470
glReplacementCodeuiTexCoord2fVertex3fSUN = _link_function('glReplacementCodeuiTexCoord2fVertex3fSUN', None, [GLuint, GLfloat, GLfloat, GLfloat, GLfloat, GLfloat], | |
the string cannot be converted to a numeric value, the Stata
system missing value is returned.
"""
return _stp._st_getrealofstring(s)
@staticmethod
def getTempFile():
"""
Get a valid Stata temporary filename.
Returns
-------
str
The filename, including its path.
"""
return _stp._st_gettempfile()
@staticmethod
def getTempName():
"""
Get a valid Stata temporary name.
Returns
-------
str
The tempname.
"""
return _stp._st_gettempname()
@staticmethod
def getWorkingDir():
"""
Get the current Stata working directory.
Returns
-------
str
The path of the current working directory.
"""
return _stp._st_getworkingdir()
@staticmethod
def isFmt(fmt):
"""
Test if a format is a valid Stata format.
Parameters
----------
fmt : str
The format to test.
Returns
-------
bool
True if the format is a valid Stata format.
"""
b = _stp._st_getfmttype(fmt)
if b=="":
return False
else:
return True
@staticmethod
def isNumFmt(fmt):
"""
Test if a format is a valid Stata numeric format.
Parameters
----------
fmt : str
The format to test.
Returns
-------
bool
True if the format is a valid Stata numeric format.
"""
b = _stp._st_getfmttype(fmt)
if b=="numeric":
return True
else:
return False
@staticmethod
def isStrFmt(fmt):
"""
Test if a format is a valid Stata string format.
Parameters
----------
fmt : str
The format to test.
Returns
-------
bool
True if the format is a valid Stata string format.
"""
b = _stp._st_getfmttype(fmt)
if b=="string":
return True
else:
return False
@staticmethod
def isValidName(name):
"""
Check if a string is a valid Stata name.
Parameters
----------
name : str
Name to test.
Returns
-------
bool
True if the string represents a valid Stata name.
"""
return _stp._st_isvalidname(name)
@staticmethod
def isValidVariableName(name):
"""
Check if a string is a valid Stata variable name.
Parameters
----------
name : str
Name to test.
Returns
-------
bool
True if the string represents a valid Stata variable name.
"""
return _stp._st_isvalidvariablename(name)
@staticmethod
def macroExpand(s):
"""
Return `s` with any quoted or dollar sign--prefixed macros expanded.
Parameters
----------
s : str
The string to expand.
Returns
-------
str
A string with macros expanded.
"""
return _stp._st_macroexpand(s)
@staticmethod
def makeVarName(s, retainCase=False):
"""
Attempt to form a valid variable name from a string.
Parameters
----------
s : str
Source string.
retainCase : bool, optional
Preserve the case or convert variable name to lowercase. If set to
True, the case will not be converted to lowercase. Default is
False.
Returns
-------
str
The new variable name. Returns an empty string if a valid
name was not created.
"""
if retainCase is True:
rcase = 1
elif retainCase is False:
rcase = 0
else:
raise TypeError("retainCase must be a boolean value")
return _stp._st_makevarname(s, rcase)
@staticmethod
def pollnow():
"""
Request that Stata poll its GUI immediately. Use this method inside
a time-consuming task so that the Stata interface is responsive to
user inputs. Generally, :meth:`pollstd()` should be used instead.
"""
return _stp._st_pollnow()
@staticmethod
def pollstd():
"""
Request that Stata poll its GUI at the standard interval. Use this
method inside a time-consuming task so that the Stata interface is
responsive to user inputs.
"""
return _stp._st_pollstd()
@staticmethod
def rclear():
"""
Clear Stata's **r()** stored results.
"""
return _stp._st_rclear()
@staticmethod
def sclear():
"""
Clear Stata's **s()** stored results.
"""
return _stp._st_sclear()
@staticmethod
def stata(s, echo=False):
"""
Execute a Stata command.
Parameters
----------
s : str
The command to execute.
echo : bool, optional
Echo the command. Default is False.
"""
if echo is True:
becho = 1
elif echo is False:
becho = 0
else:
raise TypeError("echo must be a boolean value")
return _stp._st_executecommand(s, becho)
@staticmethod
def strToName(s, prefix=False):
"""
Convert a string to a Stata name. Each character in `s` that is
not allowed in a Stata name is converted to an underscore
character, **_**. If the first character in `s` is a numeric
character and `prefix` is specified and True, then the result is
prefixed with an underscore. The result is truncated to 32
characters.
Parameters
----------
s : str
The string to convert.
prefix : bool, optional
Prefix with an underscore. Default is False.
Returns
-------
str
A valid Stata name.
"""
if prefix is True:
bprefix = 1
elif prefix is False:
bprefix = 0
else:
raise TypeError("prefix must be a boolean value")
return _stp._st_strtoname(s, bprefix)
class StrLConnector:
"""
This class facilitates access to Stata's **strL** datatype. The allowed
values for the variable index `var` and the observation index `obs`
are
.. _ref-strlrange:
.. centered:: **-nvar** `<=` `var` `<` **nvar**
and
.. centered:: **-nobs** `<=` `obs` `<` **nobs**
Here **nvar** is the number of variables defined in the dataset
currently loaded in Stata or in the specified frame, which is
returned by :meth:`~Data.getVarCount()`. **nobs** is the number of observations
defined in the dataset currently loaded in Stata or in the
specified frame, which is returned by :meth:`~Data.getObsTotal()`.
Negative values for `var` and `obs` are allowed and are interpreted in the
usual way for Python indexing. `var` can be specified either as the
variable name or index. Note that passing the variable index will be more
efficient because looking up the index for the specified variable name is
avoided.
There are two ways to create a :class:`StrLConnector` instance:
* StrLConnector(`var`, `obs`)
Creates a :class:`StrLConnector` and connects it to a specific **strL** in
the Stata dataset; see :class:`Data`.
**var** : int or str
Variable to access.
**obs** : int
Observation to access.
A **ValueError** can be raised if
* `var` is out of :ref:`range <ref-strlrange>` or not found.
* `obs` is out of :ref:`range <ref-strlrange>`.
* StrLConnector(`frame`, `var`, `obs`)
Creates a :class:`StrLConnector` and connects it to a specific **strL** in
the specified :class:`~Frame`.
**frame** : :class:`~Frame`
The :class:`Frame` to reference.
**var** : int or str
Variable to access.
**obs** : int
Observation to access.
A **ValueError** can be raised if
* `frame` does not already exist in Stata.
* `var` is out of :ref:`range <ref-strlrange>` or not found.
* `obs` is out of :ref:`range <ref-strlrange>`.
"""
def __init__(self, *argv):
nargs = len(argv)
if nargs != 2 and nargs != 3:
raise TypeError("__init__() takes from 2 to 3 positional arguments")
f = argv[0]
if isinstance(f, Frame):
if nargs != 3:
raise TypeError("__init__() takes 3 required arguments when a frame is specified")
var = argv[1]
obs = argv[2]
nobs = f.getObsTotal()
nvar = f.getVarCount()
ovar = _get_df_var_index_single(f.name, f.id, var)
if ovar<-nvar or ovar>=nvar:
raise ValueError("%d: var out of range" % (var))
if obs<-nobs or obs>=nobs:
raise ValueError("%d: obs out of range" % (obs))
if not f.isVarTypeStrL(ovar):
raise TypeError("type mismatch; not a strL")
self._var = ovar
self._obs = obs
self._pos = 0
self.frame = f
else:
if nargs != 2:
raise TypeError("__init__() takes 2 required arguments when no frame is specified")
var = argv[1]
nobs = Data.getObsTotal()
nvar = Data.getVarCount()
ovar = _get_var_index_single(f)
if ovar<-nvar or ovar>=nvar:
raise ValueError("%d: var out of range" % (f))
if var<-nobs or var>=nobs:
raise ValueError("%d: obs out of range" % (var))
if not Data.isVarTypeStrL(ovar):
raise TypeError("type mismatch; not a strL")
self._var = ovar
self._obs = var
self._pos = 0
self.frame = None
def close(self):
"""
Close the connection and release any resources.
"""
return self.reset()
def getPosition(self):
"""
Get the current access position.
Returns
-------
int
The position.
"""
return self._pos
def getSize(self):
"""
Get the total number of bytes available in the **strL**.
Returns
-------
int
The total number of bytes available.
"""
if self.frame is None:
return _stp._st_getbytessize(self._var, self._obs)
else:
return _stp._st_df_getbytessize(self.frame.name, self.frame.id, self._var, self._obs)
def isBinary(self):
"""
Determine if the attached **strL** has been marked as binary.
Returns
-------
bool
True if the **strL** has been marked as binary.
"""
if self.frame is None:
return _stp._st_isstrlbinary(self._var, self._obs)
else:
return _stp._st_df_isstrlbinary(self.frame.name, self.frame.id, self._var, | |
elem in lc_result_final],
}
)
if method == "efficiency":
X = result["n"]
Y = [
[
[
result[metric][i] - std_coeff * result[metric + "_std"][i]
for i in range(len(sizes))
],
result[metric],
[
result[metric][i] + std_coeff * result[metric + "_std"][i]
for i in range(len(sizes))
],
],
[
[
result[metric + "_train"][i]
- std_coeff * result[metric + "_train_std"][i]
for i in range(len(sizes))
],
result[metric + "_train"],
[
result[metric + "_train"][i]
+ std_coeff * result[metric + "_train_std"][i]
for i in range(len(sizes))
],
],
]
x_label = "n"
y_label = metric
labels = [
"test",
"train",
]
elif method == "performance":
X = result["time"]
Y = [
[
[
result[metric][i] - std_coeff * result[metric + "_std"][i]
for i in range(len(sizes))
],
result[metric],
[
result[metric][i] + std_coeff * result[metric + "_std"][i]
for i in range(len(sizes))
],
],
]
x_label = "time"
y_label = metric
labels = []
else:
X = result["n"]
Y = [
[
[
result["time"][i] - std_coeff * result["time_std"][i]
for i in range(len(sizes))
],
result["time"],
[
result["time"][i] + std_coeff * result["time_std"][i]
for i in range(len(sizes))
],
],
]
x_label = "n"
y_label = "time"
labels = []
range_curve(
X, Y, x_label, y_label, ax, labels, **style_kwds,
)
return result
# ---#
def lift_chart(
y_true: str,
y_score: str,
input_relation: Union[str, vDataFrame],
cursor=None,
pos_label: Union[int, float, str] = 1,
nbins: int = 30,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the Lift Chart.
Parameters
----------
y_true: str
Response column.
y_score: str
Prediction Probability.
input_relation: str/vDataFrame
Relation to use to do the scoring. The relation can be a view or a table
or even a customized relation. For example, you could write:
"(SELECT ... FROM ...) x" as long as an alias is given at the end of the
relation.
cursor: DBcursor, optional
Vertica database cursor.
pos_label: int/float/str, optional
To compute the Lift Chart, one of the response column classes must be the
positive one. The parameter 'pos_label' represents this class.
nbins: int, optional
An integer value that determines the number of decision boundaries. Decision
boundaries are set at equally-spaced intervals between 0 and 1, inclusive.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("y_true", y_true, [str],),
("y_score", y_score, [str],),
("input_relation", input_relation, [str, vDataFrame],),
("nbins", nbins, [int, float],),
]
)
cursor, conn, input_relation = check_cursor(cursor, input_relation)
version(cursor=cursor, condition=[8, 0, 0])
query = "SELECT LIFT_TABLE(obs, prob USING PARAMETERS num_bins = {}) OVER() FROM (SELECT (CASE WHEN {} = '{}' THEN 1 ELSE 0 END) AS obs, {}::float AS prob FROM {}) AS prediction_output"
query = query.format(nbins, y_true, pos_label, y_score, input_relation)
executeSQL(cursor, query, "Computing the Lift Table.")
query_result = cursor.fetchall()
if conn:
conn.close()
decision_boundary, positive_prediction_ratio, lift = (
[item[0] for item in query_result],
[item[1] for item in query_result],
[item[2] for item in query_result],
)
decision_boundary.reverse()
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(8, 6)
ax.set_xlabel("Cumulative Data Fraction")
max_value = max([0 if elem != elem else elem for elem in lift])
lift = [max_value if elem != elem else elem for elem in lift]
param1 = {"color": gen_colors()[0]}
ax.plot(
decision_boundary, lift, **updated_dict(param1, style_kwds, 0),
)
param2 = {"color": gen_colors()[1]}
ax.plot(
decision_boundary,
positive_prediction_ratio,
**updated_dict(param2, style_kwds, 1),
)
color1, color2 = color_dict(style_kwds, 0), color_dict(style_kwds, 1)
if color1 == color2:
color2 = gen_colors()[1]
ax.fill_between(
decision_boundary, positive_prediction_ratio, lift, facecolor=color1, alpha=0.2
)
ax.fill_between(
decision_boundary,
[0 for elem in decision_boundary],
positive_prediction_ratio,
facecolor=color2,
alpha=0.2,
)
ax.set_title("Lift Table")
ax.set_axisbelow(True)
ax.grid()
color1 = mpatches.Patch(color=color1, label="Cumulative Lift")
color2 = mpatches.Patch(color=color2, label="Cumulative Capture Rate")
ax.legend(handles=[color1, color2], loc="center left", bbox_to_anchor=[1, 0.5])
ax.set_xlim(0, 1)
ax.set_ylim(0)
return tablesample(
values={
"decision_boundary": decision_boundary,
"positive_prediction_ratio": positive_prediction_ratio,
"lift": lift,
},
)
# ---#
def parameter_grid(param_grid: dict,):
"""
---------------------------------------------------------------------------
Generates the list of the different combinations of input parameters.
Parameters
----------
param_grid: dict
Dictionary of parameters.
Returns
-------
list of dict
List of the different combinations.
"""
check_types([("param_grid", param_grid, [dict]),])
return [dict(zip(param_grid.keys(), values)) for values in product(*param_grid.values())]
# ---#
def plot_acf_pacf(
vdf: vDataFrame,
column: str,
ts: str,
by: list = [],
p: Union[int, list] = 15,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the ACF and PACF Charts.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
column: str
Response column.
ts: str
vcolumn used as timeline. It will be to use to order the data.
It can be a numerical or type date like (date, datetime, timestamp...)
vcolumn.
by: list, optional
vcolumns used in the partition.
p: int/list, optional
Int equals to the maximum number of lag to consider during the computation
or List of the different lags to include during the computation.
p must be positive or a list of positive integers.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if isinstance(by, str):
by = [by]
check_types(
[
("column", column, [str],),
("ts", ts, [str],),
("by", by, [list],),
("p", p, [int, float],),
("vdf", vdf, [vDataFrame,],),
]
)
tmp_style = {}
for elem in style_kwds:
if elem not in ("color", "colors"):
tmp_style[elem] = style_kwds[elem]
if "color" in style_kwds:
color = style_kwds["color"]
else:
color = gen_colors()[0]
columns_check([column, ts] + by, vdf)
by = vdf_columns_names(by, vdf)
column, ts = vdf_columns_names([column, ts], vdf)
acf = vdf.acf(ts=ts, column=column, by=by, p=p, show=False)
pacf = vdf.pacf(ts=ts, column=column, by=by, p=p, show=False)
result = tablesample(
{
"index": [i for i in range(0, len(acf.values["value"]))],
"acf": acf.values["value"],
"pacf": pacf.values["value"],
"confidence": pacf.values["confidence"],
},
)
fig = plt.figure(figsize=(10, 6)) if isnotebook() else plt.figure(figsize=(10, 6))
plt.rcParams["axes.facecolor"] = "#FCFCFC"
ax1 = fig.add_subplot(211)
x, y, confidence = (
result.values["index"],
result.values["acf"],
result.values["confidence"],
)
plt.xlim(-1, x[-1] + 1)
ax1.bar(
x, y, width=0.007 * len(x), color="#444444", zorder=1, linewidth=0,
)
param = {
"s": 90,
"marker": "o",
"facecolors": color,
"edgecolors": "black",
"zorder": 2,
}
ax1.scatter(
x, y, **updated_dict(param, tmp_style,),
)
ax1.plot(
[-1] + x + [x[-1] + 1],
[0 for elem in range(len(x) + 2)],
color=color,
zorder=0,
)
ax1.fill_between(x, confidence, color="#FE5016", alpha=0.1)
ax1.fill_between(x, [-elem for elem in confidence], color="#FE5016", alpha=0.1)
ax1.set_title("Autocorrelation")
y = result.values["pacf"]
ax2 = fig.add_subplot(212)
ax2.bar(x, y, width=0.007 * len(x), color="#444444", zorder=1, linewidth=0)
ax2.scatter(
x, y, **updated_dict(param, tmp_style,),
)
ax2.plot(
[-1] + x + [x[-1] + 1],
[0 for elem in range(len(x) + 2)],
color=color,
zorder=0,
)
ax2.fill_between(x, confidence, color="#FE5016", alpha=0.1)
ax2.fill_between(x, [-elem for elem in confidence], color="#FE5016", alpha=0.1)
ax2.set_title("Partial Autocorrelation")
plt.show()
return result
# ---#
def prc_curve(
y_true: str,
y_score: str,
input_relation: Union[str, vDataFrame],
cursor=None,
pos_label: Union[int, float, str] = 1,
nbins: int = 30,
auc_prc: bool = False,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the PRC Curve.
Parameters
----------
y_true: str
Response column.
y_score: str
Prediction Probability.
input_relation: str/vDataFrame
Relation to use to do the scoring. The relation can be a view or a table
or even a customized relation. For example, you could write:
"(SELECT ... FROM ...) x" as long as an alias is given at the end of the
relation.
cursor: DBcursor, optional
Vertica database cursor.
pos_label: int/float/str, optional
To compute the PRC Curve, one of the response column classes must be the
positive one. The parameter 'pos_label' represents this class.
nbins: int, optional
An integer value that determines the number of decision boundaries. Decision
boundaries are set at equally-spaced intervals between 0 and 1, inclusive.
auc_prc: bool, optional
If set to True, the function will return the PRC AUC without drawing the
curve.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("y_true", y_true, [str],),
("y_score", y_score, [str],),
("input_relation", input_relation, [str, vDataFrame],),
("nbins", nbins, [int, float],),
("auc_prc", auc_prc, [bool],),
]
)
if nbins < 0:
nbins = 999999
cursor, conn, input_relation = check_cursor(cursor, input_relation)
version(cursor=cursor, condition=[9, 1, 0])
query = "SELECT PRC(obs, prob USING PARAMETERS num_bins = {}) OVER() FROM (SELECT (CASE WHEN {} = '{}' THEN 1 ELSE 0 END) AS obs, {}::float AS prob FROM {}) AS prediction_output"
query = query.format(nbins, y_true, pos_label, y_score, input_relation)
executeSQL(cursor, query, "Computing the PRC table.")
query_result = cursor.fetchall()
if conn:
conn.close()
threshold, recall, precision = (
[0] + [item[0] for item in query_result] + [1],
[1] + [item[1] for item in query_result] + [0],
| |
= self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, (nn.LayerNorm, BertLayerNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
BERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in `Attention is all you need`_ by <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>.
To behave as an decoder the model needs to be initialized with the
:obj:`is_decoder` argument of the configuration set to :obj:`True`; an
:obj:`encoder_hidden_states` is expected as an input to the forward pass.
.. _`Attention is all you need`:
https://arxiv.org/abs/1706.03762
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertModel, BertTokenizer
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with two heads on top as done during the pre-training: a `masked language modeling` head and
a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
**kwargs
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False
continuation before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForPreTraining
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is | |
<gh_stars>0
from django.test import TestCase, override_settings
from django.db.models.signals import pre_delete
from unittest.mock import MagicMock
from django.apps import apps
from django.utils.crypto import get_random_string
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
import tempfile, shutil # temp dir to test filefields (test_auto_upload_dir_method)
from django.utils.text import slugify
import random
import decimal
from photologue import models as photo_models
from glyke_back.models import *
from glyke_back.views import create_gallery
from glyke_back import signals
def get_random_temp_file(extension):
""""Creates a temporary byte file for testing purposes of given extension.
Returns a tuple of (random file, name)"""
rnd_file_name = f'{get_random_string(length=10)}.{str(extension)}'
return (SimpleUploadedFile(rnd_file_name, b"these are the file contents"), rnd_file_name)
MEDIA_ROOT = tempfile.mkdtemp() # temp dir to test filefields (test_auto_upload_dir_method)
@override_settings(MEDIA_ROOT=MEDIA_ROOT) # temp dir to test filefields (test_auto_upload_dir_method)
class ModelsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user_not_staff = User.objects.create(username='user_not_staff', password='password')
cls.parent_cat = Category.objects.create(name='Parent cat')
cls.sub_parent_cat = Category.objects.create(name='Sub-parent cat', parent = cls.parent_cat)
cls.child_cat = Category.objects.create(name='Child cat', parent = cls.sub_parent_cat)
cls.product_sub_parent = Product.objects.create(name='Product of sub-parent cat', category = cls.sub_parent_cat)
cls.product_child = Product.objects.create(name='Product of child cat', category = cls.child_cat)
cls.order = Order.objects.create(number=1, customer=None)
cls.order_line = OrderLine.objects.create(parent_order=cls.order, product=cls.product_child)
@classmethod
def tearDownClass(cls): # delete temp dir on teardown
shutil.rmtree(MEDIA_ROOT, ignore_errors=True)
super().tearDownClass()
def test_pre_delete_signals(self, instance_list=[]):
"""Assert pre_delete signals are sent with proper arguments"""
instance_list = [self.parent_cat, self.sub_parent_cat, self.child_cat, self.product_sub_parent, self.product_child]
for instance in reversed(instance_list):
handler = MagicMock() # Create handler
pre_delete.connect(handler, sender=instance.__class__)
instance.delete()
instance.save()
# Assert the signal was called only once with the args
handler.assert_called_once_with(signal=signals.pre_delete,
sender=instance.__class__,
instance = instance,
using='default')
def test_switch_parent_category_on_delete(self):
"""Assert categories of child categories and products switch to its 'grandparents'"""
expected_new_parent = self.sub_parent_cat.parent
self.sub_parent_cat.delete()
self.assertEqual(Category.objects.get(id=self.child_cat.id).parent, expected_new_parent)
self.assertEqual(Product.objects.get(id=self.product_sub_parent.id).category, expected_new_parent)
def test_set_parent_category_none_on_delete(self):
"""Assert categories of child categories and products set to None if no parents available"""
self.parent_cat.delete()
self.sub_parent_cat.delete()
# get refreshed instances for assertion
self.assertIsNone(Category.objects.get(id=self.child_cat.id).parent)
self.assertIsNone(Product.objects.get(id=self.product_sub_parent.id).category)
def test_category_child_level_update(self):
"""Assert categories' child_level update as expected on CRUD"""
test_cat = Category.objects.create(name='Parent cat 2')
test_cat_2 = Category.objects.create(name='Sub-parent cat 2', parent = self.parent_cat)
self.assertEqual(Category.objects.get(id=self.parent_cat.id).child_level, 0)
self.assertEqual(Category.objects.get(id=self.sub_parent_cat.id).child_level, 1)
self.assertEqual(Category.objects.get(id=self.child_cat.id).child_level, 2)
self.assertEqual(test_cat.child_level, 0)
self.assertEqual(test_cat_2.child_level, 1)
test_cat.parent = self.child_cat
test_cat.save()
test_cat_2.parent = None
test_cat_2.save()
self.assertEqual(Category.objects.get(id=self.parent_cat.id).child_level, 0)
self.assertEqual(Category.objects.get(id=self.sub_parent_cat.id).child_level, 1)
self.assertEqual(Category.objects.get(id=self.child_cat.id).child_level, 2)
self.assertEqual(test_cat.child_level, 3)
self.assertEqual(test_cat_2.child_level, 0)
self.parent_cat.delete()
self.assertEqual(Category.objects.get(id=self.sub_parent_cat.id).child_level, 0)
self.assertEqual(Category.objects.get(id=self.child_cat.id).child_level, 1)
self.assertEqual(Category.objects.get(id=test_cat.id).child_level, 2)
self.assertEqual(Category.objects.get(id=test_cat_2.id).child_level, 0)
def test_category_ordering_indices_update(self):
"""Assert categories' ordering_indices update properly"""
Category.objects.all().delete()
Product.objects.all().delete()
category_a = Category.objects.create(name='a')
self.assertEqual(Category.objects.get(name='a').ordering_index, 1)
Category.objects.create(name='b')
self.assertEqual(Category.objects.get(name='a').ordering_index, 1)
self.assertEqual(Category.objects.get(name='b').ordering_index, 2)
Category.objects.create(name='c')
self.assertEqual(Category.objects.get(name='a').ordering_index, 1)
self.assertEqual(Category.objects.get(name='b').ordering_index, 2)
self.assertEqual(Category.objects.get(name='c').ordering_index, 3)
category_a_a = Category.objects.create(name='aa', parent=category_a)
self.assertEqual(Category.objects.get(name='a').ordering_index, 1)
self.assertEqual(Category.objects.get(name='aa').ordering_index, 2)
self.assertEqual(Category.objects.get(name='b').ordering_index, 3)
self.assertEqual(Category.objects.get(name='c').ordering_index, 4)
Category.objects.create(name='aaa', parent=category_a_a)
self.assertEqual(Category.objects.get(name='a').ordering_index, 1)
self.assertEqual(Category.objects.get(name='aa').ordering_index, 2)
self.assertEqual(Category.objects.get(name='aaa').ordering_index, 3)
self.assertEqual(Category.objects.get(name='b').ordering_index, 4)
self.assertEqual(Category.objects.get(name='c').ordering_index, 5)
Category.objects.create(name='ba')
self.assertEqual(Category.objects.get(name='a').ordering_index, 1)
self.assertEqual(Category.objects.get(name='aa').ordering_index, 2)
self.assertEqual(Category.objects.get(name='aaa').ordering_index, 3)
self.assertEqual(Category.objects.get(name='b').ordering_index, 4)
self.assertEqual(Category.objects.get(name='ba').ordering_index, 5)
self.assertEqual(Category.objects.get(name='c').ordering_index, 6)
Category.objects.get(name='a').delete()
self.assertEqual(Category.objects.get(name='aa').ordering_index, 1)
self.assertEqual(Category.objects.get(name='aaa').ordering_index, 2)
self.assertEqual(Category.objects.get(name='b').ordering_index, 3)
self.assertEqual(Category.objects.get(name='ba').ordering_index, 4)
self.assertEqual(Category.objects.get(name='c').ordering_index, 5)
Category.objects.get(name='aaa').delete()
self.assertEqual(Category.objects.get(name='aa').ordering_index, 1)
self.assertEqual(Category.objects.get(name='b').ordering_index, 2)
self.assertEqual(Category.objects.get(name='ba').ordering_index, 3)
self.assertEqual(Category.objects.get(name='c').ordering_index, 4)
def test_get_deleted_product_instance_on_delete(self):
"""Assert a deleted instance is created on_delete"""
self.assertFalse(Product.objects.filter(name='_deleted_').exists())
self.product_child.delete()
deleted_product_auto = Product.objects.get(name='_deleted_')
self.assertEqual(OrderLine.objects.get(id=self.order_line.id).product, deleted_product_auto)
def test_is_active_switch(self):
"""Assert is_active attribute switches correctly"""
model_list = apps.get_models()
for model in model_list:
if hasattr(model, 'is_active'):
model.objects.all().update(is_active=False)
self.assertFalse(model.objects.filter(is_active=True))
model.objects.all().update(is_active=True)
self.assertFalse(model.objects.filter(is_active=False))
def test__str__methods(self):
"""Assert __str__ methods work properly"""
rnd_str = get_random_string(length=10)
category = Category.objects.create(name=rnd_str)
self.assertEqual(str(category), rnd_str)
product = Product.objects.create(name=rnd_str)
self.assertEqual(str(product), rnd_str)
order_no_user = Order.objects.create(number=rnd_str, customer=None)
self.assertIn('no_name', str(order_no_user))
order = Order.objects.create(number=rnd_str, customer=self.user_not_staff)
self.assertIn(self.user_not_staff.username, str(order))
orderline_no_user = OrderLine.objects.create(parent_order=order_no_user, product=product)
self.assertIn('no_name | Line: 1', str(orderline_no_user))
orderline = OrderLine.objects.create(parent_order=order, product=product)
self.assertIn(f'{self.user_not_staff.username} | Line: 1', str(orderline))
def test_auto_upload_dir_method(self):
"""Assert models.get_upload_dir function works properly"""
# case: category w/o picture
self.assertEqual(str(self.parent_cat.picture), 'category/no_image.png')
# case: category w/ temporary random picture
rnd_temp_file, rnd_temp_file_name = get_random_temp_file('jpg')
self.parent_cat.picture = rnd_temp_file
self.parent_cat.save()
cat_name_slug = slugify(self.parent_cat.name.lower())
self.assertEqual(str(self.parent_cat.picture), f'category/{cat_name_slug}/{rnd_temp_file_name}')
def test_product_create_assign_photo(self):
"""Assert product save method assigns the main_photo attr on creation"""
rnd_product_name = get_random_string(length=20)
gallery = create_gallery(title=rnd_product_name)
img_file, img_file_name = get_random_temp_file('jpg')
photo = photo_models.Photo.objects.create(image=img_file, title=img_file_name, slug=slugify(img_file_name))
gallery.photos.add(photo)
product = Product.objects.create(name=rnd_product_name, photos=gallery)
self.assertIsNotNone(product.photos)
self.assertEqual(gallery.slug, slugify(product.name + "_gallery"))
self.assertEqual(product.main_photo, photo)
def test_photos_rename_on_product_rename(self):
"""Checks if product's gallery & photos are renamed properly when the product is renamed"""
rnd_product_name = get_random_string(length=20)
gallery = create_gallery(title=rnd_product_name)
# create 4 random photos and add them to product's gallery
for _ in range(4):
rnd_photo_name = f'{get_random_string()}_{rnd_product_name}' # this part is usually done in the view
img_file = SimpleUploadedFile(rnd_photo_name, b"these are the file contents")
photo = photo_models.Photo.objects.create(image=img_file, title=rnd_photo_name, slug=slugify(rnd_photo_name))
gallery.photos.add(photo)
product = Product.objects.create(name=rnd_product_name, photos=gallery)
self.assertTrue(product.photos.photos.all().exists())
self.assertQuerysetEqual(product.photos.photos.all().order_by('id'), photo_models.Photo.objects.all().order_by('id'))
self.assertEqual(gallery.slug, slugify(product.name + "_gallery"))
for photo in product.photos.photos.all():
self.assertTrue(photo.title.endswith(f'_{rnd_product_name}'))
self.assertTrue(photo.slug.endswith(f'_{slugify(rnd_product_name)}'))
# rename the product, all of its photos and gallery has to be renamed on save() as well
rnd_product_name = get_random_string(length=20)
product.name = rnd_product_name
product.save()
self.assertQuerysetEqual(product.photos.photos.all().order_by('id'), photo_models.Photo.objects.all().order_by('id'))
self.assertEqual(gallery.slug, slugify(product.name + "_gallery"))
for photo in product.photos.photos.all():
self.assertTrue(photo.title.endswith(f'_{rnd_product_name}'))
self.assertTrue(photo.slug.endswith(f'_{slugify(rnd_product_name)}'))
def test_product_save_profit_update(self):
"""Assert product save method updates profit attr"""
def update_check_prices():
product.cost_price = rnd_cost_price
product.discount_percent = rnd_discount
product.selling_price = rnd_selling_price
product.save()
test_profit = Decimal(rnd_selling_price*Decimal(1-rnd_discount/100)).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP) - rnd_cost_price
self.assertEqual(product.profit, test_profit)
self.assertEqual(product.cost_price, rnd_cost_price)
self.assertEqual(product.discount_percent, rnd_discount)
self.assertEqual(product.selling_price, rnd_selling_price)
self.assertEqual(product.profit, product.end_user_price-rnd_cost_price)
# case: all 0
product = Product.objects.create(name='test_product')
self.assertEqual(product.profit, 0)
# case: profit > 0, w/ discount
rnd_cost_price = decimal.Decimal(random.randrange(1, 9999))/100
rnd_selling_price = decimal.Decimal(random.randrange((rnd_cost_price*100), 9999))/100
rnd_discount = random.randint(0, int((1-(rnd_cost_price/rnd_selling_price))*100))
update_check_prices()
self.assertGreaterEqual(product.profit, 0)
# case: profit > 0, no discount
rnd_discount = 0
update_check_prices()
self.assertGreaterEqual(product.profit, 0)
# case: profit < 0, no discount
rnd_selling_price = decimal.Decimal(random.randrange(1, 9999))/100
rnd_cost_price = decimal.Decimal(random.randrange((rnd_selling_price*100), 9999))/100
update_check_prices()
self.assertLessEqual(product.profit, 0)
# case: profit < 0, w/ discount
rnd_discount = random.randint(1, 80)
update_check_prices()
def test_product_save_end_user_price_update(self):
"""Assert product save method updates end_user_price attr"""
def update_check_prices():
product.discount_percent = rnd_discount
product.selling_price = rnd_selling_price
product.save()
test_end_user_price = Decimal(rnd_selling_price*Decimal(1-rnd_discount/100)).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
self.assertEqual(product.selling_price, rnd_selling_price)
self.assertEqual(product.discount_percent, rnd_discount)
self.assertEqual(product.end_user_price, test_end_user_price)
self.assertEqual(product.profit, test_end_user_price-product.cost_price)
# case: all == 0
product = Product.objects.create(name=get_random_string())
self.assertEqual(product.end_user_price, 0)
# case: all > 0
rnd_selling_price = decimal.Decimal(random.randrange(100, 9999))/100
rnd_discount = random.randint(1, 99)
update_check_prices()
# case: selling_price has changed
rnd_selling_price = decimal.Decimal(random.randrange(100, 9999))/100
update_check_prices()
# case: discount_percent has changed
rnd_discount = random.randint(1, 99)
update_check_prices()
def test_orderline_autoinc(self):
"""Assert line auto-numering in checks work properly"""
order_no_user = Order.objects.create()
for _ in range(5): # i'm guessing 5 lines is more than enough
product = Product.objects.create(name=get_random_string(length=12))
order_line = OrderLine.objects.create(parent_order=order_no_user, product=product)
self.assertEqual(order_line.line_number, order_no_user.order_lines.count())
# deletes all lines one by one, except for the last one
# the line_number of each following line has to be decremented by 1 (keeping the initial line order)
for _ in range(order_no_user.order_lines.count()-1):
order_no_user.order_lines.first().delete()
expected_line_number = 1
for order_line in order_no_user.order_lines.all():
self.assertEqual(order_line.line_number, expected_line_number)
expected_line_number += 1
def test_orderline_duplicating_avoiding(self):
"""Checks if an existing order_line instance's quantity is incremented properly, if a new order_line instance of the same product is tried to be created. Also check if a duplicating instance of order_line is not created."""
quantity_1 = random.randint(1, 100)
quantity_2 = random.randint(1, 100)
OrderLine.objects.all().delete()
order = Order.objects.create()
self.assertEqual(OrderLine.objects.all().count(), 0)
self.assertEqual(order.order_lines.count(), 0)
# creating 1 line of product A: 1 created
OrderLine.objects.create(parent_order=order, product=self.product_child, quantity=quantity_1)
self.assertEqual(OrderLine.objects.all().count(), 1)
self.assertEqual(order.order_lines.count(), 1)
self.assertEqual(order.order_lines.first().quantity, quantity_1)
# creating 2 line of product A: 0 created, line 1 updated
OrderLine.objects.create(parent_order=order, product=self.product_child, quantity=quantity_2)
self.assertEqual(OrderLine.objects.all().count(), 1)
self.assertEqual(order.order_lines.count(), 1)
self.assertEqual(order.order_lines.first().quantity, quantity_1+quantity_2)
def test_order_calculating(self):
"""Checks if Order's total prices are calculated properly"""
expected_order_cost_price = 0
expected_order_selling_price = 0
expected_order_end_user_price = 0
order = Order.objects.create()
for i in range(3):
self.assertEqual(order.cost_price, expected_order_cost_price)
self.assertEqual(order.selling_price, expected_order_selling_price)
self.assertEqual(order.end_user_price, expected_order_end_user_price)
# add 3 order_lines
# has to be different product each time, because same product lines get summed up
product = Product.objects.create(name=get_random_string(),
cost_price = decimal.Decimal(random.randrange(100, 9999))/100,
selling_price = decimal.Decimal(random.randrange((self.product_child.cost_price*100), 9999))/100,
discount_percent = random.randint(0, 4) * 10,
)
rnd_quantity = random.randint(1, 4)
OrderLine.objects.create(parent_order=order,
product=self.product_child,
quantity=rnd_quantity)
# update expected values
expected_order_cost_price += self.product_child.cost_price * rnd_quantity
expected_order_selling_price += self.product_child.selling_price * rnd_quantity
expected_order_end_user_price += self.product_child.end_user_price * rnd_quantity
def test_order_update_on_orderline_save(self):
"""Checks if Order's save() method is called on any of its orderlines' save() and if its prices are recalculated properly"""
initial_selling_price = decimal.Decimal(random.randrange(100, 9999))/100
multiplier = random.randint(1, 5)
order = Order.objects.create()
self.product_child.selling_price = initial_selling_price
self.product_child.save()
order_line= OrderLine.objects.create(parent_order=order, product=self.product_child)
self.assertEqual(order.selling_price, initial_selling_price)
# case: quantity update
order_line.quantity = multiplier
order_line.save()
self.assertEqual(order.selling_price, initial_selling_price*multiplier)
# case: price update
self.product_child.selling_price = initial_selling_price*multiplier
order_line.quantity = 1
order_line.save()
self.assertEqual(order.selling_price, initial_selling_price*multiplier)
def test_order_update_on_orderline_delete(self):
"""Checks if Order's save() method is called on any of its orderlines' delete() and if its prices are recalculated properly"""
initial_selling_price = decimal.Decimal(random.randrange(100, 9999))/100
order = Order.objects.create()
self.product_child.selling_price = initial_selling_price
self.product_child.save()
order_line = OrderLine.objects.create(parent_order=order, product=self.product_child)
self.assertEqual(order.selling_price, initial_selling_price)
# case: no orderlines
order_line.delete()
self.assertEqual(order.selling_price, 0)
def test_order_items_total_update(self):
"""Checks if Order's items_total is calculated properly"""
expected_items_total = 0
OrderLine.objects.all().delete()
order = Order.objects.create()
self.assertEqual(order.items_total, expected_items_total)
for i in range(5):
expected_order_lines_count = i + 1
rnd_product = Product.objects.create(name=get_random_string(), category = self.child_cat)
rnd_quantity = random.randint(1, 100)
expected_items_total += | |
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Callable, Dict, Iterator, List, Optional, Set, Tuple, Union
from darwin.path_utils import construct_full_path
Point = Dict[str, float]
BoundingBox = Dict[str, float]
Polygon = List[Point]
ComplexPolygon = List[Polygon]
Node = Dict[str, Any]
EllipseData = Dict[str, Union[float, Point]]
CuboidData = Dict[str, Any]
KeyFrame = Dict[str, Any]
Segment = List[int]
DarwinVersionNumber = Tuple[int, int, int]
PathLike = Union[str, Path]
ErrorHandler = Callable[[int, str], None]
@dataclass
class Team:
"""
Definition of a V7 team.
Attributes
----------
default: bool
If this is the default Team or not.
slug: str
This team's slug.
datasets_dir: str
The path to the directory of all datasets this teams contains.
api_key: str
The API key used to authenticate for this Team.
selected: bool, default: False
If this is the currently active Team. Defaults to ``False``.
"""
default: bool
slug: str
datasets_dir: str
api_key: str
selected: bool = False
@dataclass(frozen=True)
class Feature:
"""
Structured payload of a Feature record on V7 Darwin.
Attributes
----------
name: str
The name of this ``Feature``.
enabled: bool
Whether or not this ``Feature`` is enabled. Disabled ``Feature``s do nothing, as if they
didn't exist.
"""
name: str
enabled: bool
@dataclass(frozen=True, eq=True)
class AnnotationClass:
"""
Represents an AnnocationClass from an Annotation.
Attributes
----------
name: str
The name of this ``AnnotationClass``.
annotation_type: str
The type of this ``AnnotationClass``.
annotation_internal_type: Optional[str], default: None
The V7 internal type of this ``AnnotationClass``. This is mostly used to convert from types
that are known in the outside world by a given name, but then are known inside V7's lingo
by another.
"""
name: str
annotation_type: str
annotation_internal_type: Optional[str] = None
@dataclass(frozen=True, eq=True)
class SubAnnotation:
"""
Represents a subannotation that belongs to an AnnotationClass.
Attributes
----------
annotation_type: str
The type of this ``SubAnnotation``.
data: Any
Any external data, in any format, relevant to this ``SubAnnotation``. Used for compatibility
purposes with external formats.
"""
annotation_type: str
data: Any
@dataclass(frozen=True, eq=True)
class Annotation:
"""
Represents an Annotation from an Image/Video.
Attributes
----------
annotation_class: AnnotationClass
The ``AnnotationClass`` from this ``Annotation``.
data: Any
Any external data, in any format, relevant to this ``Annotation``. Used for compatibility
purposes with external formats.
subs: List[SubAnnotation]
List of ``SubAnnotations`` belonging to this ``Annotation``.
"""
annotation_class: AnnotationClass
data: Any
subs: List[SubAnnotation] = field(default_factory=list)
def get_sub(self, annotation_type: str) -> Optional[SubAnnotation]:
"""
Returns the first SubAnnotation that matches the given type.
Parameters
----------
annotation_type: str
The type of the subannotation.
Returns
-------
Optional[SubAnnotation]
A SubAnnotation found, or `None` if none was found.
"""
for sub in self.subs:
if sub.annotation_type == annotation_type:
return sub
return None
@dataclass(frozen=True, eq=True)
class VideoAnnotation:
"""
Represents an Annotation that belongs to a Video.
Attributes
----------
annotation_class: AnnotationClass
The ``AnnotationClass`` from this ``VideoAnnotation``.
frames: Dict[int, Any]
A dictionary of frames for this ``VideoAnnotation``.
keyframes: Dict[int, bool]
The keyframes for this ``VideoAnnotation``. Keyframes are a selection of frames from the
``frames`` attribute.
segments: List[Segment]
A list of ``Segment``'s.
interpolated: bool
Whehter this ``VideoAnnotation`` is interpolated or not.
"""
annotation_class: AnnotationClass
frames: Dict[int, Any]
keyframes: Dict[int, bool]
segments: List[Segment]
interpolated: bool
def get_data(
self, only_keyframes: bool = True, post_processing: Optional[Callable[[Annotation, Any], Any]] = None
) -> Dict[str, Any]:
"""
Return the post-processed frames and the additional information from this
``VideoAnnotation`` in a dictionary with the format:
.. code-block:: python
{
"frames": {
# Post-Processed Frames here
},
"segments": [
# Segments here
]
"interpolated": True
}
Parameters
----------
only_keyframes: bool, default: True
Whether or not to return only the keyframes. Defaults to ``True``.
post_processing: Optional[Callable[[Annotation, Any], Any]], default: None
If given, it processes each frame through the given ``Callabale`` before adding it to the
returned dictionary. Defaults to ``None``.
Returns
-------
Dict[str, Any]
A dictionary containing the processed frames, the segments of this ``VideoAnnotation``
and whether or not it is interpolated.
"""
if not post_processing:
post_processing = lambda annotation, data: data
return {
"frames": {
frame: {
**post_processing(
self.frames[frame],
{self.frames[frame].annotation_class.annotation_type: self.frames[frame].data},
),
**{"keyframe": self.keyframes[frame]},
}
for frame in self.frames
if not only_keyframes or self.keyframes[frame]
},
"segments": self.segments,
"interpolated": self.interpolated,
}
@dataclass
class AnnotationFile:
"""
Represents a file containing annotations. Mostly useful when trying to import or export
annotations to/from darwin V7.
Attributes
----------
path: Path
Path to the file.
filename: str
Name of the file containing the annotations.
annotation_classes: Set[AnnotationClass]
``Set`` of all ``AnnotationClass``es this file contains. Used as a way to know in advance
which ``AnnotationClass``es this file has without having to go through the list of
annotations.
annotations: Union[List[VideoAnnotation], List[Annotation]]
List of ``VideoAnnotation``s or ``Annotation``s.
is_video: bool, default: False
Whether the annotations in the ``annotations`` attribute are ``VideoAnnotation`` or not.
Defaults to ``False``.
image_width: Optional[int], default: None
Width of the image in this annotation. Defaults to ``None``.
image_height: Optional[int], default: None
Height of the image in this annotation. Defaults to ``None``.
image_url: Optional[str], default: None
URL of the image in this annotation. Defaults to ``None``.
workview_url: Optional[str], default: None
URL of the workview for this annotation. Defaults to ``None``.
seq: Optional[int], default: None
Sequence for this annotation. Defaults to ``None``.
frame_urls: Optional[List[str]], default: None
URLs for the frames this ``AnnotationFile`` has. Defautls to ``None``.
remote_path: Optional[str], default: None
Remote path for this Annoataion file in V7's darwin. Defaults to ``None``.
"""
path: Path
filename: str
annotation_classes: Set[AnnotationClass]
annotations: Union[List[VideoAnnotation], List[Annotation]]
is_video: bool = False
image_width: Optional[int] = None
image_height: Optional[int] = None
image_url: Optional[str] = None
workview_url: Optional[str] = None
seq: Optional[int] = None
frame_urls: Optional[List[str]] = None
remote_path: Optional[str] = None
@property
def full_path(self) -> str:
"""
Returns the absolute path of this file.
Returns
-------
str
The absolute path of the file.
"""
return construct_full_path(self.remote_path, self.filename)
def make_bounding_box(
class_name: str, x: float, y: float, w: float, h: float, subs: Optional[List[SubAnnotation]] = None
) -> Annotation:
"""
Creates and returns a bounding box annotation. ``x``, ``y``, ``w`` and ``h`` are rounded to 3
decimal places when creating the annotation.
Parameters
----------
class_name: str
The name of the class for this ``Annotation``.
x: float
The top left ``x`` value where the bounding box will start.
y: float
The top left ``y`` value where the bounding box will start.
w: float
The width of the bounding box.
h: float
The height of the bounding box.
subs: Optional[List[SubAnnotation]], default: None
List of ``SubAnnotation``s for this ``Annotation``. Defaults to ``None``.
Returns
-------
Annotation
A bounding box ``Annotation``.
"""
return Annotation(
AnnotationClass(class_name, "bounding_box"),
{"x": round(x, 3), "y": round(y, 3), "w": round(w, 3), "h": round(h, 3)},
subs or [],
)
def make_tag(class_name: str, subs: Optional[List[SubAnnotation]] = None) -> Annotation:
return Annotation(AnnotationClass(class_name, "tag"), {}, subs or [])
def make_polygon(
class_name: str,
point_path: List[Point],
bounding_box: Optional[Dict] = None,
subs: Optional[List[SubAnnotation]] = None,
) -> Annotation:
return Annotation(
AnnotationClass(class_name, "polygon"),
_maybe_add_bounding_box_data({"path": point_path}, bounding_box),
subs or [],
)
def make_complex_polygon(
class_name: str,
point_paths: List[List[Point]],
bounding_box: Optional[Dict] = None,
subs: Optional[List[SubAnnotation]] = None,
) -> Annotation:
return Annotation(
AnnotationClass(class_name, "complex_polygon", "polygon"),
_maybe_add_bounding_box_data({"paths": point_paths}, bounding_box),
subs or [],
)
def make_keypoint(class_name: str, x: float, y: float, subs: Optional[List[SubAnnotation]] = None) -> Annotation:
return Annotation(AnnotationClass(class_name, "keypoint"), {"x": x, "y": y}, subs or [])
def make_line(class_name: str, path: List[Point], subs: Optional[List[SubAnnotation]] = None) -> Annotation:
return Annotation(AnnotationClass(class_name, "line"), {"path": path}, subs or [])
def make_skeleton(class_name: str, nodes: List[Node], subs: Optional[List[SubAnnotation]] = None) -> Annotation:
return Annotation(AnnotationClass(class_name, "skeleton"), {"nodes": nodes}, subs or [])
def make_ellipse(class_name: str, parameters: EllipseData, subs: Optional[List[SubAnnotation]] = None) -> Annotation:
"""
Creates and returns an Ellipse annotation.
Data needed to build an Ellipse annotation via ``make_ellipse``.
Parameters
----------
class_name: str
The name of the class for this ``Annotation``.
parameters: EllipseData
The data needed to build an Ellipse. This data must be a dictionary with a format simillar
to:
.. code-block:: javascript
{
"angle": 0.57,
"center": {
"x": 2745.69,
"y": 2307.46
},
"radius": {
"x": 467.02,
"y": 410.82
}
}
Where:
- ``angle: float`` is the orientation angle of the ellipse.
- ``center: Point`` is the center point of the ellipse.
- ``radius: Point`` is the | |
import functools
import struct
def compressed_unimplemented_instruction(word, **kwargs):
return {
'cmd': 'Undefined',
'word': word,
'size': 2,
}
def uncompressed_unimplemented_instruction(word, **kwargs):
return {
'cmd': 'Undefined',
'word': word,
'size': 4,
}
def c_j(word, **kwargs):
# C.J performs an unconditional control transfer. The offset is
# sign-extended and added to the pc to form the jump target address.
# C.J can therefore target a ±2 KiB range. C.J expands to jal x0,
# offset[11:1].
return {
'cmd': 'JAL',
'imm': kwargs.get('imm'),
'rd': 0,
'word': word,
'size': 2,
}
def c_jr(word):
return {
'cmd': 'JALR',
'imm': 0,
'rs1': compressed_rs1_or_rd(word),
'rd': 0,
'word': word,
'size': 2,
}
def c_beqz(word, **kwargs):
# BEQZ performs conditional control transfers. The offset is
# sign-extended and added to the pc to form the branch target address.
# It can therefore target a ±256 B range. C.BEQZ takes the branch if
# the value in register rs1' is zero. It expands to
# beq rs1', x0, offset[8:1].
return {
'cmd': 'BEQ',
'imm': kwargs.get('imm'),
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': 0,
'taken': None,
'word': word,
'size': 2,
}
def c_bnez(word, **kwargs):
# BEQZ performs conditional control transfers. The offset is
# sign-extended and added to the pc to form the branch target address.
# It can therefore target a ±256 B range. C.BEQZ takes the branch if
# the value in register rs1' is zero. It expands to
# beq rs1', x0, offset[8:1].
return {
'cmd': 'BNE',
'imm': kwargs.get('imm'),
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': 0,
'taken': None,
'word': word,
'size': 2,
}
def c_mv(word):
# C.MV copies the value in register rs2 into register rd. C.MV expands into add rd, x0, rs2;
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.106)
return {
'cmd': 'ADD',
'rs1': 0,
'rs2': compressed_rs2(word),
'rd': compressed_rs1_or_rd(word),
'word': word,
'size': 2,
}
def c_lui(word, **kwargs):
# C.LUI loads the non-zero 6-bit immediate field into bits 17–12 of the
# destination register, clears the bottom 12 bits, and sign-extends bit
# 17 into all higher bits of the destination. C.LUI expands into
# lui rd, nzimm[17:12]. C.LUI is only valid when rd̸={x0, x2}, and when
# the immediate is not equal to zero.
#
# C.LUI nzimm[17] dest̸={0, 2} nzimm[16:12] C1
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.104)
return {
'cmd': 'LUI',
'rd': compressed_rs1_or_rd(word),
'imm': kwargs.get('imm'),
'word': word,
'size': 2,
}
def c_ldsp(word):
# C.LDSP is an RV64C/RV128C-only instruction that loads a 64-bit value from memory
# into register rd. It computes its effective address by adding the zero-extended
# offset, scaled by 8, to the stack pointer, x2. It expands to ld rd, offset[8:3](x2);
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.99)
#
# 011 uimm[5] rd̸=0 uimm[4:3|8:6] 10 C.LDSP (RV64/128; RES, rd=0);
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.111)
_b080706 = (word >> 2) & 0b111
_b0403 = (word >> 5) & 0b11
_b05 = (word >> 12) & 0b1
_imm = (_b080706 << 6) | (_b05 << 5) | (_b0403 << 3)
return {
'cmd': 'LD',
'rs1': 2,
'imm': _imm,
'rd': compressed_rs1_or_rd(word),
'nbytes': 8,
'word': word,
'size': 2,
}
def c_lw(word, **kwargs):
# C.LW loads a 32-bit value from memory into register rd ′. It computes
# an effective address by adding the zero-extended offset, scaled by 4,
# to the base address in register rs1 ′. It expands to
# lw rd', offset[6:2](rs1').
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.101)
return {
'cmd': 'LW',
'rs1': compressed_quadrant_00_rs1_prime(word),
'imm': kwargs.get('imm'),
'rd': compressed_quadrant_00_rs2_prime_or_rd_prime(word),
'nbytes': 4,
'word': word,
'size': 2,
}
def c_ld(word, **kwargs):
# C.LD is an RV64C/RV128C-only instruction that loads a 64-bit value from memory
# into register rd'. It computes an effective address by adding the zero-extended
# offset, scaled by 8, to the base address in register rs1'. It expands to ld rd',
# offset[7:3](rs1').
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.101)
return {
'cmd': 'LD',
'rs1': compressed_quadrant_00_rs1_prime(word),
'imm': kwargs.get('imm'),
'rd': compressed_quadrant_00_rs2_prime_or_rd_prime(word),
'nbytes': 8,
'word': word,
'size': 2,
}
def c_sd(word, **kwargs):
# C.SD is an RV64C/RV128C-only instruction that stores a 64-bit value in
# register rs2' to memory. It computes an effective address by adding the
# zero-extended offset, scaled by 8, to the base address in register rs1'.
# It expands to sd rs2', offset[7:3](rs1')
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.102)
return {
'cmd': 'SD',
'rs1': compressed_quadrant_00_rs1_prime(word),
'rs2': compressed_quadrant_00_rs2_prime_or_rd_prime(word),
'imm': kwargs.get('imm'),
'nbytes': 8,
'word': word,
'size': 2,
}
def c_sw(word, **kwargs):
# C.SW stores a 32-bit value in register rs2' to memory. It computes an
# effective address by adding the zero-extended offset, scaled by 4, to
# the base address in register rs1'. It expands to sw rs2', offset[6:2](rs1')
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.102)
return {
'cmd': 'SW',
'rs1': compressed_quadrant_00_rs1_prime(word),
'rs2': compressed_quadrant_00_rs2_prime_or_rd_prime(word),
'imm': kwargs.get('imm'),
'nbytes': 4,
'word': word,
'size': 2,
}
def c_addi4spn(word, **kwargs):
# C.ADDI4SPN is a CIW-format instruction that adds a zero-extended non-zero
# immediate, scaledby 4, to the stack pointer, x2, and writes the result to rd'.
# This instruction is used to generate pointers to stack-allocated variables,
# and expands to addi rd', x2, nzuimm[9:2].
return {
'cmd': 'ADDI',
'imm': kwargs.get('imm'),
'rs1': 2,
'rd': compressed_quadrant_00_rs2_prime_or_rd_prime(word),
'word': word,
'size': 2,
}
def c_addi16sp(word, **kwargs):
# C.ADDI16SP is used to adjust the stack pointer in procedure prologues and
# epilogues. It expands into addi x2, x2, nzimm[9:4]. C.ADDI16SP is only
# valid when nzimm̸=0; the code point with nzimm=0 is reserved.
return {
'cmd': 'ADDI',
'imm': kwargs.get('imm'),
'rs1': 2,
'rd': 2,
'word': word,
'size': 2,
}
def c_sdsp(word, **kwargs):
# C.SDSP is an RV64C/RV128C-only instruction that stores a 64-bit value in
# register rs2 to memory. It computes an effective address by adding the
# zero-extended offset, scaled by 8, to the stack pointer, x2. It expands to
# sd rs2, offset[8:3](x2).
return {
'cmd': 'SD',
'imm': kwargs.get('imm'),
'rs1': 2,
'rs2': compressed_rs2(word),
'nbytes': 8,
'word': word,
'size': 2,
}
def c_addi(word, **kwargs):
# C.ADDI adds the non-zero sign-extended 6-bit immediate to the value in
# register rd then writes the result to rd. C.ADDI expands into
# addi rd, rd, nzimm[5:0]. C.ADDI is only valid when rd̸=x0. The code point
# with both rd=x0 and nzimm=0 encodes the C.NOP instruction; the remaining
# code points with either rd=x0 or nzimm=0 encode HINTs.
return {
'cmd': 'ADDI',
'imm': kwargs.get('imm'),
'rs1': compressed_rs1_or_rd(word),
'rd': compressed_rs1_or_rd(word),
'word': word,
'size': 2,
}
def c_addiw(word, **kwargs):
# C.ADDIW is an RV64C/RV128C-only instruction that performs the same
# computation but produces a 32-bit result, then sign-extends result to 64
# bits. C.ADDIW expands into addiw rd, rd, imm[5:0]. The immediate can be
# zero for C.ADDIW, where this corresponds to sext.w rd. C.ADDIW is only
# valid when rd̸=x0; the code points with rd=x0 are reserved.
return {
'cmd': 'ADDIW',
'imm': kwargs.get('imm'),
'rs1': compressed_rs1_or_rd(word),
'rd': compressed_rs1_or_rd(word),
'word': word,
'size': 2,
}
def c_nop(word):
return {
'cmd': 'NOP',
'word': word,
'size': 2,
}
def c_add(word):
# C.ADD adds the values in registers rd and rs2 and writes the result to
# register rd. C.ADD expands into add rd, rd, rs2. C.ADD is only valid when
# rs2̸=x0; the code points with rs2=x0 correspond to the C.JALR and C.EBREAK
# instructions. The code points with rs2̸=x0 and rd=x0 are HINTs.
return {
'cmd': 'ADD',
'rs1': compressed_rs1_or_rd(word),
'rs2': compressed_rs2(word),
'rd': compressed_rs1_or_rd(word),
'word': word,
'size': 2,
}
def c_sub(word):
# C.SUB subtracts the value in register rs2 ′ from the value in register rd',
# then writes the result to register rd ′. C.SUB expands into
# sub rd', rd', rs2'.
return {
'cmd': 'SUB',
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': compressed_quadrant_01_rs2_prime(word),
'rd': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'word': word,
'size': 2,
}
def c_xor(word):
# C.XOR computes the bitwise XOR of the values in registers rd'
# and rs2', then writes the result to register rd'. C.XOR expands
# into xor rd', rd', rs2'.
return {
'cmd': 'XOR',
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': compressed_quadrant_01_rs2_prime(word),
'rd': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'word': word,
'size': 2,
}
def c_or(word):
# C.OR computes the bitwise OR of the values in registers rd'
# and rs2', then writes the result to register rd'. C.OR expands
# into or rd', rd', rs2'.
return {
'cmd': 'OR',
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': compressed_quadrant_01_rs2_prime(word),
'rd': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
| |
### All utility function to obtain perturbation mask
import numpy as np
import itertools
import random
import math
from utils import *
import os
import time
import scipy.io as scio
import datetime
import re
import matplotlib.pyplot as plt
import pylab
import os
import csv
from skimage import transform, filters
from textwrap import wrap
import cv2
import sys
from PIL import Image
def Get_blurred_img(input_img, img_label, model, resize_shape=(224, 224), Gaussian_param = [51, 50], Median_param = 11, blur_type= 'Gaussian', use_cuda = 1):
########################
# Generate blurred images as the baseline
# Parameters:
# -------------
# input_img: the original input image
# img_label: the classification target that you want to visualize (img_label=-1 means the top 1 classification label)
# model: the model that you want to visualize
# resize_shape: the input size for the given model
# Gaussian_param: parameters for Gaussian blur
# Median_param: parameters for median blur
# blur_type: Gaussian blur or median blur or mixed blur
# use_cuda: use gpu (1) or not (0)
####################################################
original_img = cv2.imread(input_img, 1)
original_img = cv2.resize(original_img, resize_shape)
img = np.float32(original_img) / 255
if blur_type =='Gaussian': # Gaussian blur
Kernelsize = Gaussian_param[0]
SigmaX = Gaussian_param[1]
blurred_img = cv2.GaussianBlur(img, (Kernelsize, Kernelsize), SigmaX)
elif blur_type == 'Black':
blurred_img = img * 0
elif blur_type == 'Median': # Median blur
Kernelsize_M = Median_param
blurred_img = np.float32(cv2.medianBlur(original_img, Kernelsize_M)) / 255
elif blur_type == 'Mixed': # Mixed blur
Kernelsize = Gaussian_param[0]
SigmaX = Gaussian_param[1]
blurred_img1 = cv2.GaussianBlur(img, (Kernelsize, Kernelsize), SigmaX)
Kernelsize_M = Median_param
blurred_img2 = np.float32(cv2.medianBlur(original_img, Kernelsize_M)) / 255
blurred_img = (blurred_img1 + blurred_img2) / 2
return img, blurred_img
def Integrated_Mask(ups, img, blurred_img, model, category, max_iterations = 15, integ_iter = 20,
tv_beta=2, l1_coeff = 0.01*300, tv_coeff = 0.2*300, size_init = 112, use_cuda =1):
########################
# Obtaining perturbation mask using integrated gradient descent to find the smallest and smoothest area that maximally decrease the
# output of a deep model
# Parameters:
# -------------
# ups: upsampling factor
# img: the original input image
# blurred_img: the baseline for the input image
# model: the model that you want to visualize
# category: the classification target that you want to visualize (category=-1 means the top 1 classification label)
# max_iterations: the max iterations for the integrated gradient descent
# integ_iter: how many points you want to use when computing the integrated gradients
# tv_beta: which norm you want to use for the total variation term
# l1_coeff: parameter for the L1 norm
# tv_coeff: parameter for the total variation term
# size_init: the resolution of the mask that you want to generate
# use_cuda: use gpu (1) or not (0)
####################################################
# preprocess the input image and the baseline (low probability) image
img = preprocess_image(img, use_cuda, require_grad=False)
blurred_img = preprocess_image(blurred_img, use_cuda, require_grad=False)
resize_size = img.data.shape
resize_wh = (img.data.shape[2], img.data.shape[3])
# initialize the mask
mask_init = np.ones((int(resize_wh[0]/ups), int(resize_wh[1]/ups)), dtype=np.float32)
mask = numpy_to_torch(mask_init, use_cuda, requires_grad=True)
# upsampler
if use_cuda:
upsample = torch.nn.UpsamplingBilinear2d(size=resize_wh).cuda()
else:
upsample = torch.nn.UpsamplingBilinear2d(size=resize_wh)
# You can choose any optimizer
# The optimizer doesn't matter, because we don't need optimizer.step(), we just use it to compute the gradient
optimizer = torch.optim.Adam([mask], lr=0.1)
# containers for curve metrics
curve1 = np.array([])
curve2 = np.array([])
curvetop = np.array([])
curve_total = np.array([])
# Integrated gradient descent
# hyperparams
alpha = 0.0001
beta = 0.2
for i in range(max_iterations):
upsampled_mask = upsample(mask)
upsampled_mask = upsampled_mask.expand(1, 3, upsampled_mask.size(2), upsampled_mask.size(3))
# the l1 term and the total variation term
loss1 = l1_coeff * torch.mean(torch.abs(1 - mask)) + tv_coeff * tv_norm(mask, tv_beta)
loss_all = loss1.clone()
# compute the perturbed image
perturbated_input_base = img.mul(upsampled_mask) + blurred_img.mul(1 - upsampled_mask)
loss2_ori = torch.nn.Softmax(dim=1)(model(perturbated_input_base))[0, category] # masking loss (no integrated)
loss_ori = loss1 + loss2_ori
if i==0:
if use_cuda:
curve1 = np.append(curve1, loss1.data.cpu().numpy())
curve2 = np.append(curve2, loss2_ori.data.cpu().numpy())
curvetop = np.append(curvetop, loss2_ori.data.cpu().numpy())
curve_total = np.append(curve_total, loss_ori.data.cpu().numpy())
else:
curve1 = np.append(curve1, loss1.data.numpy())
curve2 = np.append(curve2, loss2_ori.data.numpy())
curvetop = np.append(curvetop, loss2_ori.data.numpy())
curve_total = np.append(curve_total, loss_ori.data.numpy())
if use_cuda:
loss_oridata = loss_ori.data.cpu().numpy()
else:
loss_oridata = loss_ori.data.numpy()
# calculate integrated gradient for next descent step
for inte_i in range(integ_iter):
# Use the mask to perturbated the input image.
integ_mask = 0.0 + ((inte_i + 1.0) / integ_iter) * upsampled_mask
perturbated_input_integ = img.mul(integ_mask) + blurred_img.mul(1 - integ_mask)
# add noise
noise = np.zeros((resize_wh[0], resize_wh[1], 3), dtype=np.float32)
noise = noise + cv2.randn(noise, 0, 0.2)
noise = numpy_to_torch(noise, use_cuda, requires_grad=False)
perturbated_input = perturbated_input_integ + noise
outputs = torch.nn.Softmax(dim=1)(model(perturbated_input))
loss2 = outputs[0, category]
loss_all = loss_all + loss2/20.0
# compute the integrated gradients for the given target,
# and compute the gradient for the l1 term and the total variation term
optimizer.zero_grad()
loss_all.backward()
whole_grad = mask.grad.data.clone() # integrated gradient
# LINE SEARCH with revised Armijo condition
step = 200.0 # upper limit of step size
MaskClone = mask.data.clone()
MaskClone -= step * whole_grad
MaskClone = Variable(MaskClone, requires_grad=False)
MaskClone.data.clamp_(0, 1) # clamp the value of mask in [0,1]
mask_LS = upsample(MaskClone) # Here the direction is the whole_grad
Img_LS = img.mul(mask_LS) + blurred_img.mul(1 - mask_LS)
outputsLS = torch.nn.Softmax(dim=1)(model(Img_LS))
loss_LS = l1_coeff * torch.mean(torch.abs(1 - MaskClone)) + tv_coeff * tv_norm(MaskClone, tv_beta) + outputsLS[0, category]
if use_cuda:
loss_LSdata = loss_LS.data.cpu().numpy()
else:
loss_LSdata = loss_LS.data.numpy()
new_condition = whole_grad ** 2 # Here the direction is the whole_grad
new_condition = new_condition.sum()
new_condition = alpha * step * new_condition
# finding best step size using backtracking line search
while loss_LSdata > loss_oridata - new_condition.cpu().numpy():
step *= beta
MaskClone = mask.data.clone()
MaskClone -= step * whole_grad
MaskClone = Variable(MaskClone, requires_grad=False)
MaskClone.data.clamp_(0, 1)
mask_LS = upsample(MaskClone)
Img_LS = img.mul(mask_LS) + blurred_img.mul(1 - mask_LS)
outputsLS = torch.nn.Softmax(dim=1)(model(Img_LS))
loss_LS = l1_coeff * torch.mean(torch.abs(1 - MaskClone)) + tv_coeff * tv_norm(MaskClone, tv_beta) + outputsLS[0, category]
if use_cuda:
loss_LSdata = loss_LS.data.cpu().numpy()
else:
loss_LSdata = loss_LS.data.numpy()
new_condition = whole_grad ** 2 # Here the direction is the whole_grad
new_condition = new_condition.sum()
new_condition = alpha * step * new_condition
if step<0.00001:
break
mask.data -= step * whole_grad # integrated gradient descent step - we have the updated mask at this point
if use_cuda:
curve1 = np.append(curve1, loss1.data.cpu().numpy())
curve2 = np.append(curve2, loss2_ori.data.cpu().numpy()) # only masking loss
curve_total = np.append(curve_total, loss_ori.data.cpu().numpy())
else:
curve1 = np.append(curve1, loss1.data.numpy())
curve2 = np.append(curve2, loss2_ori.data.numpy())
curve_total = np.append(curve_total, loss_ori.data.numpy())
mask.data.clamp_(0, 1)
if use_cuda:
maskdata = mask.data.cpu().numpy()
else:
maskdata = mask.data.numpy()
maskdata = np.squeeze(maskdata)
maskdata, imgratio = topmaxPixel(maskdata, 40)
maskdata = np.expand_dims(maskdata, axis=0)
maskdata = np.expand_dims(maskdata, axis=0)
if use_cuda:
Masktop = torch.from_numpy(maskdata).cuda()
else:
Masktop = torch.from_numpy(maskdata)
# Use the mask to perturb the input image.
Masktop = Variable(Masktop, requires_grad=False)
MasktopLS = upsample(Masktop)
Img_topLS = img.mul(MasktopLS) + blurred_img.mul(1 - MasktopLS)
outputstopLS = torch.nn.Softmax(dim=1)(model(Img_topLS))
loss_top1 = l1_coeff * torch.mean(torch.abs(1 - Masktop)) + tv_coeff * tv_norm(Masktop, tv_beta)
loss_top2 = outputstopLS[0, category]
if use_cuda:
curvetop = np.append(curvetop, loss_top2.data.cpu().numpy())
else:
curvetop = np.append(curvetop, loss_top2.data.numpy())
if max_iterations > 3:
if i == int(max_iterations / 2):
if np.abs(curve2[0] - curve2[i]) <= 0.001:
l1_coeff = l1_coeff / 10
elif i == int(max_iterations / 1.25):
if np.abs(curve2[0] - curve2[i]) <= 0.01:
l1_coeff = l1_coeff / 5
#######################################################################################
upsampled_mask = upsample(mask)
if use_cuda:
mask = mask.data.cpu().numpy().copy()
else:
mask = mask.data.numpy().copy()
return mask, upsampled_mask
def Deletion_Insertion_Comb_withOverlay(max_patches, mask, model, output_path, img_ori, blurred_img_ori, category, use_cuda=1, blur_mask=0, outputfig = 1):
########################
# Compute the deletion and insertion scores
#
# parameters:
# max_patches: number of literals in a root conjunction
# mask: the generated mask
# model: the model that you want to visualize
# output_path: where to save the results
# img_ori: the original image
# blurred_img_ori: the baseline image
# category: the classification target that you want to visualize (category=-1 means the top 1 classification label)
# use_cuda: use gpu (1) or not (0)
# blur_mask: blur the mask or not
# outputfig: save figure or not
####################################################
if blur_mask: # invert mask, blur and re-invert
mask = (mask - np.min(mask)) / np.max(mask)
mask = 1 - mask
mask = cv2.GaussianBlur(mask, (51, 51), 50)
mask = 1-mask
blurred_insert = blurred_img_ori.copy()
blurred_insert = preprocess_image(blurred_insert, use_cuda, require_grad=False)
img = preprocess_image(img_ori, use_cuda, require_grad=False)
blurred_img = preprocess_image(blurred_img_ori, use_cuda, require_grad=False)
resize_wh = (img.data.shape[2], img.data.shape[3])
if use_cuda:
| |
<filename>project/apps/salesforce/models.py<gh_stars>10-100
import json
# Third-Party
from model_utils import Choices
from distutils.util import strtobool
# Local
from apps.bhs.models import Convention, Award, Chart, Group, Person
from apps.registration.models import Contest, Session, Assignment, Entry
class SfConvention:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Name
if hasattr(n, 'sf_Name'):
d['name'] = str(n.sf_Name.cdata)
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Season
if hasattr(n, 'sf_BS_Season__c'):
season = int(float(n.sf_BS_Season__c.cdata))
d['season'] = season
# Panel
if hasattr(n, 'sf_BS_Panel__c'):
d['panel'] = int(float(n.sf_BS_Panel__c.cdata))
# Year
if hasattr(n, 'sf_Year__c'):
d['year'] = int(n.sf_Year__c.cdata)
# Open Date
if hasattr(n, 'sf_Open_Date__c'):
d['open_date'] = n.sf_Open_Date__c.cdata
# Close Date
if hasattr(n, 'sf_Close_Date__c'):
d['close_date'] = n.sf_Close_Date__c.cdata
# Start Date
if hasattr(n, 'sf_Start_Date__c'):
d['start_date'] = n.sf_Start_Date__c.cdata
# End Date
if hasattr(n, 'sf_End_Date__c'):
d['end_date'] = n.sf_End_Date__c.cdata
# Venue
if hasattr(n, 'sf_Venue__c'):
d['venue_name'] = n.sf_Venue__c.cdata
# Location
if hasattr(n, 'sf_Location__c'):
d['location'] = n.sf_Location__c.cdata
# Time Zone
if hasattr(n, 'sf_Time_Zone__c'):
d['timezone'] = n.sf_Time_Zone__c.cdata
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Divisions
if hasattr(n, 'sf_BS_Division__c'):
d['divisions'] = n.sf_BS_Division__c.cdata
# Kinds
if hasattr(n, 'sf_BS_Kind__c'):
d['kinds'] = n.sf_BS_Kind__c.cdata
# Return parsed dict
return d
class SfAward:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Gender
d['gender'] = int(float(n.sf_BS_Classification__c.cdata)) if hasattr(n, 'sf_BS_Classification__c') else None
# Level
if hasattr(n, 'sf_BS_Level__c'):
d['level'] = int(float(n.sf_BS_Level__c.cdata))
# Season
if hasattr(n, 'sf_BS_Season__c'):
d['season'] = int(float(n.sf_BS_Season__c.cdata))
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Divisions
d['division'] = int(float(n.sf_BS_Division__c.cdata)) if hasattr(n, 'sf_BS_Division__c') else None
# Is Single
if hasattr(n, 'sf_is_single__c'):
d['is_single'] = bool(strtobool(n.sf_is_single__c.cdata))
# Threshold
d['threshold'] = float(n.sf_Threshold__c.cdata) if hasattr(n, 'sf_Threshold__c') else None
# Minimum
d['minimum'] = float(n.sf_Minimum__c.cdata) if hasattr(n, 'sf_Minimum__c') else None
# advance
d['advance'] = float(n.sf_Advance__c.cdata) if hasattr(n, 'sf_Advance__c') else None
# spots
d['spots'] = int(float(n.sf_Spots__c.cdata)) if hasattr(n, 'sf_Spots__c') else None
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Notes
d['notes'] = n.sf_Notes__c.cdata if hasattr(n, 'sf_Notes__c') else ""
# Age
d['age'] = int(float(n.sf_BS_Age__c.cdata)) if hasattr(n, 'sf_BS_Age__c') else None
# Is Novice
if hasattr(n, 'sf_is_novice__c'):
d['is_novice'] = bool(strtobool(n.sf_is_novice__c.cdata))
# Size
d['size'] = int(float(n.sf_BS_Size__c.cdata)) if hasattr(n, 'sf_BS_Size__c') else None
# Size Range
d['size_range'] = n.sf_Size_Range__c.cdata if hasattr(n, 'sf_Size_Range__c') else None
# Scope
d['scope'] = int(float(n.sf_BS_Scope__c.cdata)) if hasattr(n, 'sf_BS_Scope__c') else None
# Scope Range
d['scope_range'] = n.sf_Scope_Range__c.cdata if hasattr(n, 'sf_Scope_Range__c') else None
# Tree Sort
d['tree_sort'] = int(float(n.sf_Tree_Sort__c.cdata)) if hasattr(n, 'sf_Tree_Sort__c') else None
# Return parsed dict
return d
class SfChart:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Name
if hasattr(n, 'sf_Name'):
d['title'] = n.sf_Name.cdata
# Arrangers
if hasattr(n, 'sf_Arrangers__c'):
d['arrangers'] = n.sf_Arrangers__c.cdata
# Composer
d['composers'] = n.sf_Composers__c.cdata if hasattr(n, 'sf_Composers__c') else ""
# Lyricist
d['lyricists'] = n.sf_Lyricists__c.cdata if hasattr(n, 'sf_Lyricists__c') else ""
# Holders
d['holders'] = n.sf_Holders__c.cdata if hasattr(n, 'sf_Holders__c') else ""
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Notes
d['notes'] = n.sf_Notes__c.cdata if hasattr(n, 'sf_Notes__c') else ""
# Return parsed dict
return d
class SfGroup:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Gender
if hasattr(n, 'sf_BS_Classification__c'):
d['gender'] = int(float(n.sf_BS_Classification__c.cdata))
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Divisions
d['division'] = int(float(n.sf_BS_Division__c.cdata)) if hasattr(n, 'sf_BS_Division__c') else None
# bhs_id
if hasattr(n, 'sf_cfg_Member_Id__c') and n.sf_cfg_Member_Id__c.cdata.isalnum():
# Is a Chorus
# code
d['code'] = n.sf_cfg_Member_Id__c.cdata if hasattr(n, 'sf_cfg_Member_Id__c') else ""
elif hasattr(n, 'sf_cfg_Member_Id__c'):
# Is a Quartet
d['bhs_id'] = int(n.sf_cfg_Member_Id__c.cdata) if hasattr(n, 'sf_cfg_Member_Id__c') else None
# Return parsed dict
return d
class SfPerson:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Name
if hasattr(n, 'sf_FirstName') and hasattr(n, 'sf_LastName'):
d['name'] = n.sf_FirstName.cdata + " " + n.sf_LastName.cdata
# First Name
d['first_name'] = n.sf_FirstName.cdata if hasattr(n, 'sf_FirstName') else ""
# Last Name
d['last_name'] = n.sf_LastName.cdata if hasattr(n, 'sf_LastName') else ""
# part
d['part'] = int(float(n.sf_BS_VoicePart__c.cdata)) if hasattr(n, 'sf_BS_VoicePart__c') else None
# Gender
d['gender'] = int(float(n.sf_BS_Gender__c.cdata)) if hasattr(n, 'sf_BS_Gender__c') else None
# Email
d['email'] = n.sf_npe01__HomeEmail__c.cdata if hasattr(n, 'sf_npe01__HomeEmail__c') else ""
# Home Phone
d['home_phone'] = n.sf_HomePhone.cdata if hasattr(n, 'sf_HomePhone') else ""
# Cell Phone
d['cell_phone'] = n.sf_MobilePhone.cdata if hasattr(n, 'sf_MobilePhone') else ""
# BHS ID
d['bhs_id'] = int(n.sf_cfg_Member_Number__c.cdata) if hasattr(n, 'sf_cfg_Member_Number__c') else None
# Return parsed dict
return d
class SfSession:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Status
if hasattr(n, 'sf_BS_Status__c'):
d['status'] = int(float(n.sf_BS_Status__c.cdata))
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Num Rounds
if hasattr(n, 'sf_Num_rounds__c'):
d['num_rounds'] = int(float(n.sf_Num_rounds__c.cdata))
# Is Invitational
if hasattr(n, 'sf_is_invitational__c'):
d['is_invitational'] = bool(strtobool(n.sf_is_invitational__c.cdata))
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# Notes
d['notes'] = n.sf_Notes__c.cdata if hasattr(n, 'sf_Notes__c') else ""
# Footnotes
d['footnotes'] = n.sf_Footnotes__c.cdata if hasattr(n, 'sf_Footnotes__c') else ""
if hasattr(n, 'sf_BS_Convention_UUID__c'):
d['convention_id'] = n.sf_BS_Convention_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Season
if hasattr(n, 'sf_BS_Season__c'):
d['season'] = int(float(n.sf_BS_Season__c.cdata))
# Panel
if hasattr(n, 'sf_BS_Panel__c'):
d['panel'] = int(float(n.sf_BS_Panel__c.cdata))
# Year
if hasattr(n, 'sf_Year__c'):
d['year'] = int(n.sf_Year__c.cdata)
# Open Date
if hasattr(n, 'sf_Open_Date__c'):
d['open_date'] = n.sf_Open_Date__c.cdata
# Close Date
if hasattr(n, 'sf_Close_Date__c'):
d['close_date'] = n.sf_Close_Date__c.cdata
# Start Date
if hasattr(n, 'sf_Start_Date__c'):
d['start_date'] = n.sf_Start_Date__c.cdata
# End Date
if hasattr(n, 'sf_End_Date__c'):
d['end_date'] = n.sf_End_Date__c.cdata
# Venue
if hasattr(n, 'sf_Venue__c'):
d['venue_name'] = n.sf_Venue__c.cdata
# Location
if hasattr(n, 'sf_Location__c'):
d['location'] = n.sf_Location__c.cdata
# Time Zone
if hasattr(n, 'sf_Time_Zone__c'):
d['timezone'] = n.sf_Time_Zone__c.cdata
# Divisions
if hasattr(n, 'sf_BS_Division__c'):
d['divisions'] = n.sf_BS_Division__c.cdata
# Return parsed dict
return d
class SfContest:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Award ID
if hasattr(n, 'sf_BS_Award_UUID__c'):
d['award_id'] = n.sf_BS_Award_UUID__c.cdata
# Name
if hasattr(n, 'sf_Name'):
d['name'] = n.sf_Name.cdata
# Kind
if hasattr(n, 'sf_BS_Kind__c'):
d['kind'] = int(float(n.sf_BS_Kind__c.cdata))
# Gender
d['gender'] = int(float(n.sf_BS_Classification__c.cdata)) if hasattr(n, 'sf_BS_Classification__c') else None
# Level
if hasattr(n, 'sf_BS_Level__c'):
d['level'] = int(float(n.sf_BS_Level__c.cdata))
# Season
if hasattr(n, 'sf_BS_Season__c'):
d['season'] = int(float(n.sf_BS_Season__c.cdata))
# Description
d['description'] = n.sf_Description__c.cdata if hasattr(n, 'sf_Description__c') else ""
# District
if hasattr(n, 'sf_BS_District__c'):
d['district'] = int(float(n.sf_BS_District__c.cdata))
# Divisions
d['division'] = int(float(n.sf_BS_Division__c.cdata)) if hasattr(n, 'sf_BS_Division__c') else None
# Age
d['age'] = int(float(n.sf_BS_Age__c.cdata)) if hasattr(n, 'sf_BS_Age__c') else None
# Is Novice
if hasattr(n, 'sf_is_novice__c'):
d['is_novice'] = bool(strtobool(n.sf_is_novice__c.cdata))
# Is Single
if hasattr(n, 'sf_is_single__c'):
d['is_single'] = bool(strtobool(n.sf_is_single__c.cdata))
# Size
d['size'] = int(float(n.sf_BS_Size__c.cdata)) if hasattr(n, 'sf_BS_Size__c') else None
# Size Range
d['size_range'] = n.sf_Size_Range__c.cdata if hasattr(n, 'sf_Size_Range__c') else None
# Scope
d['scope'] = int(float(n.sf_BS_Scope__c.cdata)) if hasattr(n, 'sf_BS_Scope__c') else None
# Scope Range
d['scope_range'] = n.sf_Scope_Range__c.cdata if hasattr(n, 'sf_Scope_Range__c') else None
# Tree Sort
d['tree_sort'] = int(float(n.sf_Tree_Sort__c.cdata)) if hasattr(n, 'sf_Tree_Sort__c') else None
# Session ID
if hasattr(n, 'sf_BS_Session_UUID__c'):
d['session_id'] = n.sf_BS_Session_UUID__c.cdata
# Return parsed dict
return d
class SfAssignment:
def parse_sf_notification(n):
d = {}
# Created
if hasattr(n, 'sf_CreatedDate'):
d['created'] = n.sf_CreatedDate.cdata
# Modified
if hasattr(n, 'sf_LastModifiedDate'):
d['modified'] = n.sf_LastModifiedDate.cdata
# UUID
if hasattr(n, 'sf_BS_UUID__c'):
d['id'] = n.sf_BS_UUID__c.cdata
# Kind
if hasattr(n, 'sf_BS_Type__c'):
d['kind'] = int(float(n.sf_BS_Type__c.cdata))
# Category
if | |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.274562,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.52564,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0492287,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.241355,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.260146,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.123302,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.198882,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.100389,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.422572,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.101137,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.48232,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0491471,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00517184,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0560611,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0382489,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.105208,
'Execution Unit/Register Files/Runtime Dynamic': 0.0434208,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.130415,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.300512,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.41324,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000679372,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000679372,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000613657,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000249548,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000549449,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00252185,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00573042,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0367697,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.33887,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0980438,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.124886,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.67089,
'Instruction Fetch Unit/Runtime Dynamic': 0.267952,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0363631,
'L2/Runtime Dynamic': 0.00744087,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.49217,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.612821,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0406038,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0406037,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.68391,
'Load Store Unit/Runtime Dynamic': 0.853668,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.100122,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.200244,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0355336,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0360296,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.145422,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0162211,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.362571,
'Memory Management Unit/Runtime Dynamic': 0.0522507,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.8255,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.129283,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0071364,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0602474,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
self.cfg["word_embedding"]
self.sess, self.saver = None, None
# Add placeholder
self.words = tf.placeholder(tf.int32, shape=[None, None], name="words") # shape = (batch_size, max_time)
self.labels = tf.placeholder(tf.int32, shape=[None, None], name="label") # shape = (batch_size, max_time - 1)
self.seq_len = tf.placeholder(tf.int32, shape=[None], name="seq_len")
# shape = (batch_size, max_time, max_word_length)
self.chars = tf.placeholder(tf.int32, shape=[None, None, None], name="chars")
self.char_seq_len = tf.placeholder(tf.int32, shape=[None, None], name="char_seq_len")
# hyper-parameters
self.is_train = tf.placeholder(tf.bool, shape=[], name="is_train")
self.batch_size = tf.placeholder(tf.int32, name="batch_size")
self.keep_prob = tf.placeholder(tf.float32, name="keep_probability")
self.drop_rate = tf.placeholder(tf.float32, name="dropout_rate")
self.lr = tf.placeholder(tf.float32, name="learning_rate")
# Build embedding layer
with tf.variable_scope("embeddings"):
self.word_embeddings = tf.Variable(np.load(self.cfg["word_embedding"])["embeddings"], name="embedding",
dtype=tf.float32, trainable=False)
word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.words, name="word_emb")
print("Word embedding shape: {}".format(word_emb.get_shape().as_list()))
self.char_embeddings = tf.get_variable(name="char_embedding", dtype=tf.float32, trainable=True,
shape=[self.char_vocab_size, self.cfg["char_emb_dim"]])
char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.chars, name="chars_emb")
char_represent = multi_conv1d(char_emb, self.cfg["filter_sizes"], self.cfg["channel_sizes"],
drop_rate=self.drop_rate, is_train=self.is_train)
print("Chars representation shape: {}".format(char_represent.get_shape().as_list()))
word_emb = tf.concat([word_emb, char_represent], axis=-1)
self.word_emb = tf.layers.dropout(word_emb, rate=self.drop_rate, training=self.is_train)
print("Word and chars concatenation shape: {}".format(self.word_emb.get_shape().as_list()))
# Build model ops
with tf.name_scope("BiLSTM"):
with tf.variable_scope('forward'):
lstm_fw_cell = tf.keras.layers.LSTMCell(self.cfg["num_units"])
with tf.variable_scope('backward'):
lstm_bw_cell = tf.keras.layers.LSTMCell(self.cfg["num_units"])
rnn_outs, *_ = bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, self.word_emb, sequence_length=self.seq_len,
dtype=tf.float32)
# As we have a Bi-LSTM, we have two outputs which are not connected, so we need to merge them.
rnn_outs = tf.concat(rnn_outs, axis=-1)
rnn_outs = tf.layers.dropout(rnn_outs, rate=self.drop_rate, training=self.is_train)
outputs = rnn_outs
print("Output shape: {}".format(outputs.get_shape().as_list()))
context = tf.transpose(outputs, [1, 0, 2])
p_context = tf.layers.dense(outputs, units=2 * self.cfg["num_units"], use_bias=False)
p_context = tf.transpose(p_context, [1, 0, 2])
attn_cell = AttentionCell(self.cfg["num_units"], context, p_context) # time major based
attn_outs, _ = dynamic_rnn(attn_cell, context, sequence_length=self.seq_len, time_major=True,
dtype=tf.float32)
outputs = tf.transpose(attn_outs, [1, 0, 2])
print("Attention output shape: {}".format(outputs.get_shape().as_list()))
self.logits = tf.layers.dense(outputs, units=self.label_vocab_size, use_bias=True)
# self.logits = tf.nn.softmax(self.logits)
print("Logits shape: {}".format(self.logits.get_shape().as_list()))
# Define loss and optimizer
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.labels)
mask = tf.sequence_mask(self.seq_len)
self.loss = tf.reduce_mean(tf.boolean_mask(losses, mask))
# losses = focal_loss(self.gamma,self.alpha)
# self.loss = losses(self.labels, self.logits)
# self.loss = tf.reduce_mean(self.loss)
tf.summary.scalar("loss", self.loss)
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.train_op = optimizer.minimize(self.loss)
print('Params number: {}'.format(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
self.saver = tf.train.Saver(max_to_keep=self.max_to_keep)
self.sess.run(tf.global_variables_initializer())
def restore_last_session(self, ckpt_path=None):
if ckpt_path is not None:
ckpt = tf.train.get_checkpoint_state(ckpt_path)
else:
ckpt = tf.train.get_checkpoint_state(self.checkpoint_path) # get checkpoint state
if ckpt and ckpt.model_checkpoint_path: # restore session
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
def save_session(self, epoch):
self.saver.save(self.sess, self.checkpoint_path + self.cfg["model_name"], global_step=epoch)
def close_session(self):
self.sess.close()
def _add_summary(self):
self.summary = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(self.summary_path + "train", self.sess.graph)
self.test_writer = tf.summary.FileWriter(self.summary_path + "test")
def _get_feed_dict(self, batch, keep_prob=1.0, is_train=False, lr=None):
feed_dict = {self.words: batch["words"], self.seq_len: batch["seq_len"], self.batch_size: batch["batch_size"]}
if "labels" in batch:
feed_dict[self.labels] = batch["labels"]
feed_dict[self.chars] = batch["chars"]
feed_dict[self.char_seq_len] = batch["char_seq_len"]
feed_dict[self.keep_prob] = keep_prob
feed_dict[self.drop_rate] = 1.0 - keep_prob
feed_dict[self.is_train] = is_train
if lr is not None:
feed_dict[self.lr] = lr
return feed_dict
def _predict_op(self, data):
feed_dict = self._get_feed_dict(data)
pred_logits = tf.cast(tf.argmax(self.logits, axis=-1), tf.int32)
logits = self.sess.run(pred_logits, feed_dict=feed_dict)
return logits
def train_epoch(self, train_set,valid_set, epoch):
num_batches = len(train_set)
prog = Progbar(target=num_batches)
for i, batch_data in enumerate(train_set):
feed_dict = self._get_feed_dict(batch_data, is_train=True, keep_prob=self.cfg["keep_prob"],
lr=self.cfg["lr"])
_, train_loss, summary = self.sess.run([self.train_op, self.loss, self.summary], feed_dict=feed_dict)
cur_step = (epoch - 1) * num_batches + (i + 1)
prog.update(i + 1, [("Global Step", int(cur_step)), ("Train Loss", train_loss)])
if i % 100 == 0:
self.train_writer.add_summary(summary, cur_step)
step = cur_step
for j, batch_data in enumerate(valid_set):
feed_dict = self._get_feed_dict(batch_data)
val_summary = self.sess.run(self.summary, feed_dict=feed_dict)
self.test_writer.add_summary(val_summary, step)
micro_f_val, out_str, micro = self.evaluate_punct(valid_set, "val")
return micro_f_val, train_loss
def train(self, train_set, valid_set):
self.logger.info("Start training...")
best_f1, no_imprv_epoch = -np.inf, 0
self._add_summary()
for epoch in range(1, self.cfg["epochs"] + 1):
self.logger.info('Epoch {}/{}: '.format(epoch, self.cfg["epochs"],))
micro_f_val, train_loss = self.train_epoch(train_set,valid_set, epoch) # train epochs
self.logger.info('Train loss: {} - Valid micro average fscore: {}'.format(train_loss, micro_f_val))
cur_f1 = micro_f_val
if cur_f1 > best_f1:
no_imprv_epoch = 0
best_f1 = cur_f1
# f1_test, out_str = self.evaluate_punct(test_set, "test")
# self.logger.info("\nEvaluate on {} dataset:\n{}\n".format("test", out_str))
self.save_session(epoch)
else:
no_imprv_epoch += 1
if no_imprv_epoch >= self.cfg["no_imprv_tolerance"]:
self.logger.info("Early Stopping at epoch - Valid micro average fscore: {:04.2f} - {:04.2f}".format(epoch, best_f1))
break
self.train_writer.close()
self.test_writer.close()
def test(self,test_set):
self.logger.info("Start testing...")
micro_f, out_str, micro = self.evaluate_punct(test_set, "test")
self.logger.info("\nEvaluate on {} dataset:\n{}\n".format("test", out_str))
self.logger.info("\n{}\n".format(micro))
def evaluate_punct(self, dataset, name):
PUNCTUATIONS = ['O','PERIOD', 'COMMA', 'EXCLAM', 'COLON', 'QMARK','SEMICOLON']
preds = []
labels = []
TP = 0.0
FP = 0.0
FN = 0.0
num_class = len(PUNCTUATIONS)
# cfm = [ [0 for i in range(7)] for j in range(num_class)]
for data in dataset:
predicts = self._predict_op(data)
for pred, tag, seq_len in zip(predicts, data["labels"], data["seq_len"]):
preds.append(pred[:seq_len])
# print(preds)
labels.append(tag[:seq_len])
for i in range(len(pred)):
for l in range(1,7):
if (pred[i] == tag[i]) and (tag[i] == l):
TP += 1
elif (pred[i] != tag[i]) and (tag[i] == l):
FN += 1
elif (pred[i] != tag[i]) and (pred[i] == l):
FP += 1
labels = [y for x in labels for y in x]
preds = [y for x in preds for y in x]
precision = precision_score(labels, preds, average=None)
recall = recall_score(labels, preds, average=None)
f_score = f1_score(labels, preds, average=None)
if (TP + FN) != 0:
micro_r = TP / (TP + FN)
else:
micro_r = 0
###################
if (TP + FP) != 0:
micro_p = TP / (TP + FP)
else:
micro_p = 0
################
if (micro_r + micro_p) > 0:
micro_f = 2*micro_r * micro_p / (micro_r + micro_p)
else:
micro_f = 0.0
micro = 'MICRO AVERAGE:\n\t Precision: ' + str(100*micro_p) + '%\n\tRecall: ' + str(100*micro_r) + ' %\n\t F_1 score: ' + str(100*micro_f) + ' %\n'
out_str = "-" * 46 + "\n"
out_str += "{:<16} {:<9} {:<9} {:<9}\n".format("PUNCTUATION", "PRECISION", "RECALL", "F-SCORE")
for i in range(1,num_class):
out_str += u"{:<16} {:<9} {:<9} {:<9}\n".format(PUNCTUATIONS[i], "{:.4f}".format(100*precision[i]),
"{:.4f}".format(100*recall[i]),
"{:.4f}".format(100*f_score[i]))
return micro_f, out_str, micro
class BiLSTM_CRF_model:
def __init__(self, config, alpha, gamma):
self.cfg = config
self.alpha = alpha
self.gamma = gamma
# Create folders
if not os.path.exists(self.cfg["checkpoint_path"]):
os.makedirs(self.cfg["checkpoint_path"])
if not os.path.exists(self.cfg["summary_path"]):
os.makedirs(self.cfg["summary_path"])
#Create logger
self.logger = get_logger(os.path.join(self.cfg["checkpoint_path"], str(self.gamma) + str(self.alpha) + "log.txt"))
# Load dictionary
dict_data = load_data(self.cfg["vocab"])
self.word_dict, self.char_dict = dict_data["word_dict"], dict_data["char_dict"]
self.label_dict = dict_data["label_dict"]
del dict_data
self.word_vocab_size = len(self.word_dict)
self.char_vocab_size = len(self.char_dict)
self.label_vocab_size = len(self.label_dict)
self.max_to_keep = self.cfg["max_to_keep"]
self.checkpoint_path = self.cfg["checkpoint_path"]
self.summary_path = self.cfg["summary_path"]
self.word_embedding = self.cfg["word_embedding"]
self.sess, self.saver = None, None
# Add placeholder
self.words = tf.placeholder(tf.int32, shape=[None, None], name="words") # shape = (batch_size, max_time)
self.labels = tf.placeholder(tf.int32, shape=[None, None], name="label") # shape = (batch_size, max_time)
self.seq_len = tf.placeholder(tf.int32, shape=[None], name="seq_len")
# shape = (batch_size, max_time, max_word_length)
self.chars = tf.placeholder(tf.int32, shape=[None, None, None], name="chars")
self.char_seq_len = tf.placeholder(tf.int32, shape=[None, None], name="char_seq_len")
# hyper-parameters
self.is_train = tf.placeholder(tf.bool, shape=[], name="is_train")
self.batch_size = tf.placeholder(tf.int32, name="batch_size")
self.keep_prob = tf.placeholder(tf.float32, name="keep_probability")
self.drop_rate = tf.placeholder(tf.float32, name="dropout_rate")
self.lr = tf.placeholder(tf.float32, name="learning_rate")
# Build embedding layer
with tf.variable_scope("embeddings"):
self.word_embeddings = tf.Variable(np.load(self.cfg["word_embedding"])["embeddings"], name="embedding",
dtype=tf.float32, trainable=False)
word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.words, name="word_emb")
print("Word embedding shape: {}".format(word_emb.get_shape().as_list()))
self.char_embeddings = tf.get_variable(name="char_embedding", dtype=tf.float32, trainable=True,
shape=[self.char_vocab_size, self.cfg["char_emb_dim"]])
char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.chars, name="chars_emb")
char_represent = multi_conv1d(char_emb, self.cfg["filter_sizes"], self.cfg["channel_sizes"],
drop_rate=self.drop_rate, is_train=self.is_train)
print("Chars representation shape: {}".format(char_represent.get_shape().as_list()))
word_emb = tf.concat([word_emb, char_represent], axis=-1)
self.word_emb = tf.layers.dropout(word_emb, rate=self.drop_rate, training=self.is_train)
print("Word and chars concatenation shape: {}".format(self.word_emb.get_shape().as_list()))
# Build model ops
with tf.name_scope("BiLSTM"):
with tf.variable_scope('forward'):
lstm_fw_cell = tf.keras.layers.LSTMCell(self.cfg["num_units"])
with tf.variable_scope('backward'):
lstm_bw_cell = tf.keras.layers.LSTMCell(self.cfg["num_units"])
rnn_outs, *_ = bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, self.word_emb, sequence_length=self.seq_len,
dtype=tf.float32)
# As we have a Bi-LSTM, we have two outputs which are not connected, so we need to merge them.
rnn_outs = tf.concat(rnn_outs, axis=-1)
# rnn_outs = tf.layers.dropout(rnn_outs, rate=self.drop_rate, training=self.is_train)
outputs = rnn_outs
print("Output shape: {}".format(outputs.get_shape().as_list()))
self.logits = tf.layers.dense(outputs, units=self.label_vocab_size, use_bias=True)
# self.logits = tf.nn.softmax(self.logits)
print("Logits shape: {}".format(self.logits.get_shape().as_list()))
# Define loss and optimizer
crf_loss, self.trans_params = crf_log_likelihood(self.logits, self.labels, self.seq_len)
losses = focal_loss(self.gamma,self.alpha)
self.loss = losses(self.labels, self.logits)
self.loss = tf.reduce_mean(self.loss)
tf.summary.scalar("loss", self.loss)
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.train_op = optimizer.minimize(self.loss)
print('Params number: {}'.format(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
self.saver = tf.train.Saver(max_to_keep=self.max_to_keep)
self.sess.run(tf.global_variables_initializer())
def restore_last_session(self, ckpt_path=None):
if ckpt_path is not None:
ckpt = tf.train.get_checkpoint_state(ckpt_path)
else:
ckpt = tf.train.get_checkpoint_state(self.checkpoint_path) # get checkpoint state
if ckpt and ckpt.model_checkpoint_path: # restore session
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
def save_session(self, epoch):
self.saver.save(self.sess, self.checkpoint_path + self.cfg["model_name"], global_step=epoch)
def close_session(self):
self.sess.close()
def _add_summary(self):
self.summary = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(self.summary_path + "train", self.sess.graph)
self.test_writer = tf.summary.FileWriter(self.summary_path + "test")
def _get_feed_dict(self, batch, keep_prob=1.0, is_train=False, lr=None):
feed_dict = {self.words: batch["words"], self.seq_len: batch["seq_len"], self.batch_size: batch["batch_size"]}
if "labels" in batch:
feed_dict[self.labels] = batch["labels"]
feed_dict[self.chars] = batch["chars"]
feed_dict[self.char_seq_len] = batch["char_seq_len"]
feed_dict[self.keep_prob] = | |
class LeagueRecom:
"""
A LeagueRecom object holds information about the rankings and results of a couple of seasons
from a certain league
Parameters
----------
games : list
A list with Game objects stored in it
rankings : list
A list with Ranking objects stored in it
w_points : int
The amount of points a formation gets when it wins
d_points : int
The amount of points a formation gets when it plays a draw
Attributes
----------
matches_list : list
Stores all the Game objects in a list
ranking_list : list
Stores all the Ranking objects in a list
win_points : int
Stores the amount of points a winning formation gets
draw_points : int
Stores the amount of points a formation gets for a draw
ranking_dict : dictionary
Stores the dictionary of all the rankings of a team per year once it's generated
recom_dict : dictionary
Stores the recommendation dictionary of the league once it's generated
"""
def __init__(self, games: list, rankings: list, w_points: int, d_points: int):
try:
self.matches_list = games
self.ranking_list = rankings
self.win_points = w_points
self.draw_points = d_points
self.ranking_dict = None
self.recom_dict = None
except ValueError as e:
print("An incorrect variable type was entered\n", e)
def ranking_list_to_dict(self):
"""
Constructs based on the Ranking a dictionary with per season the rank of a club
Raises
----------
AttributeError
When the ranking_list doesn't contain objects of the Ranking class
TypeError
When a wrong type is inserted into the object as ranking_list
Exception
When an unexpected error happens
Returns
-------
dictionary
A dictionary with per season/year the rank of a certain club
"""
try:
# initialize a dictionary to save the rank info
ranking_dict = {}
# loop over all objects in the list
for rank in self.ranking_list:
if rank.get_season() not in ranking_dict:
# if the season key doesn't already exist
ranking_dict[rank.get_season()] = {}
# saves the team as key and its rank as value in a season
ranking_dict[rank.get_season()][rank.get_team()] = rank.get_rank()
# update the ranking_dict of the object
self.ranking_dict = ranking_dict
# return the ranking_dict
return self.ranking_dict
# handle exceptions
except AttributeError as e:
print("The list does not contain objects of the class Ranking\n", e)
except TypeError as e:
print("No iterable type is given\n", e)
except Exception as e:
print("Unexpected error while generating the ranking dictionary\n", e)
def formations_info_recom(self):
"""
Goes over every game object in the matches_list and gives every winning and draw
formation a certain amount of points based on the clubs rank in a season
Raises
----------
AttributeError
When the matches_list doesn't contain objects of the Game class,
or the ranking_dict doesn't accord with the Game objects in the matches_list
TypeError
When a wrong type is inserted into the object as matches_list,
or no integer was inserted as points for a win/draw
Exception
When the ranking dictionary is not (yet) instantiated or an unexpected error happens
Returns
-------
dictionary
A dictionary with per formation the counter-formations with a certain amount of points
These points are an indicator how effective a formation is against a counter-formation
"""
try:
if self.ranking_dict is None:
raise Exception("The ranking dictionary is not initiated")
# initialize a dictionary to save the formations and their points
formations_dict = {}
# save the win and draw points
WIN_POINTS = self.win_points
DRAW_POINTS = self.draw_points
# loop over all game objects
for game in self.matches_list:
if game.get_home_score() > game.get_away_score():
# home team wins
# check if keys exists in the dictionary
self.check_for_keys_in_dict(game.get_home_formation(), game.get_away_formation(), formations_dict)
# calculate the amount of points for the home team win
points = WIN_POINTS * self.ranking_dict[game.get_season()][game.get_home_team()]
# save the points and increment the games played in the array
formations_dict[game.get_home_formation()][game.get_away_formation()][0] += points
formations_dict[game.get_home_formation()][game.get_away_formation()][1] += 1
elif game.get_home_score() == game.get_away_score():
# draw
# check if keys exists in the dictionary
self.check_for_keys_in_dict(game.get_home_formation(), game.get_away_formation(), formations_dict)
self.check_for_keys_in_dict(game.get_away_formation(), game.get_home_formation(), formations_dict)
# calculate the amount of point for a draw
points = DRAW_POINTS * ((self.ranking_dict[game.get_season()][game.get_home_team()] +
self.ranking_dict[game.get_season()][game.get_away_team()]) / 2)
# save the points and increment the games played in the array
formations_dict[game.get_home_formation()][game.get_away_formation()][0] += points
formations_dict[game.get_home_formation()][game.get_away_formation()][1] += 1
# save the points and increment the games played in the array
formations_dict[game.get_away_formation()][game.get_home_formation()][0] += points
formations_dict[game.get_away_formation()][game.get_home_formation()][1] += 1
else:
# away team wins
# check if keys exists in the dictionary
self.check_for_keys_in_dict(game.get_away_formation(), game.get_home_formation(), formations_dict)
# calculate the amount of points for the away team win
points = WIN_POINTS * self.ranking_dict[game.get_season()][game.get_away_team()]
# save the points and increment the games played in the array
formations_dict[game.get_away_formation()][game.get_home_formation()][0] += points
formations_dict[game.get_away_formation()][game.get_home_formation()][1] += 1
# iterate over every formation
for key in formations_dict:
# iterate over every counter-formation
for key_2 in formations_dict[key]:
result_list = formations_dict[key][key_2]
# divide the total points by the amount of games played and save the result
formations_dict[key][key_2] = result_list[0] / result_list[1]
# update the recom_dict of the object
self.recom_dict = formations_dict
# return the recommendation dictionary
return formations_dict
# handle the exceptions
except AttributeError as e:
print("The list does not contain objects of the class Game /"
"The ranking_dict may not accord with the inserted matches_list\n", e)
except TypeError as e:
print("No iterable type is given / win/draw points are not of type int\n", e)
except Exception as e:
print("Unexpected error while generating the recommendation dictionary\n", e)
def check_for_keys_in_dict(self, key_1, key_2, chosen_dict):
"""
Checks if key_1 exists in the chosen_dict
hereafter checks if key_2 exists in the dictionary of key_1
Parameters
----------
key_1 : String
The first key to be checked
key_2 : String
The second key to be checked in the dictionary of the first key
chosen_dict : dictionary
The dictionary that is being checked for the existence of certain keys
"""
if key_1 not in chosen_dict:
# if the key doesn't already exist as key
chosen_dict[key_1] = {}
if key_2 not in chosen_dict[key_1]:
# if the key doesn't exist in the dictionary of the first key as key
chosen_dict[key_1][key_2] = [0.0, 0]
def create_league_recom(self):
"""
Calls all the methods of the object in order to build a recommendation
dictionary for the Games in the matches_list and the Rankings in the ranking_list
Returns
-------
Returns the recommendation dictionary
"""
# build the ranking_dict
self.ranking_list_to_dict()
self.formations_info_recom()
# return the recommendation dictionary
return self.recom_dict
def validate_recom_dict(self, test_matches):
"""Per formation the two most promising counter formations are given back
Over these counter formations the algorithm is validated
The recommendation dictionary is validated and a string with the accuracy is returned
Parameters
----------
test_matches : list
A list of Game objects to test the recommendation dictionary
Raises
------
AttributeError
When the test_matches list doesn't contain objects of the Game class
TypeError
When a wrong type is inserted into as a parameter
Exception
When the recommendation dictionary is not (yet) instantiated or an unexpected error happens
Returns
-------
String
the function returns a string which gives some information about the accuracy of the recommendation
dictionary over the test_matches
"""
try:
if self.recom_dict is None:
raise Exception("The recommendation dictionary is not initiated")
# a dictionary to save the actual recommendations
actual_recom = {}
# loop over every key in the recommendation dictionary
for first_key in self.recom_dict:
# initiate an empty list for storing the recommendations
actual_recom[first_key] = []
if len(self.recom_dict[first_key]) == 1:
# the formation only has one recom formation
actual_recom[first_key].append(list(self.recom_dict[first_key].keys())[0])
elif len(self.recom_dict[first_key]) == 2:
# the formation only has two recom formations
formations = list(self.recom_dict[first_key].keys())
actual_recom[first_key].append(formations[0])
actual_recom[first_key].append(formations[1])
else:
# the formation has more than two recom formations
loop_count = 1
first_recom = []
second_recom = []
# loop over every key in the dictionary of the first key
for second_key in self.recom_dict[first_key]:
if loop_count == 1:
# first loop
first_recom.append(second_key)
first_recom.append(self.recom_dict[first_key][second_key])
elif loop_count == 2:
# second loop
second_recom.append(second_key)
second_recom.append(self.recom_dict[first_key][second_key])
else:
# 3rd or higher loop
if self.recom_dict[first_key][second_key] > first_recom[1]:
# if the new formation is better than the one on first_recom
first_recom[0] = second_key
first_recom[1] = self.recom_dict[first_key][second_key]
else:
# the new formation is not better
if self.recom_dict[first_key][second_key] > second_recom[1]:
# if | |
"""
Connection pooling and host management.
"""
import logging
import time
from threading import Lock, RLock, Condition
import weakref
try:
from weakref import WeakSet
except ImportError:
from cassandra.util import WeakSet # NOQA
from cassandra import AuthenticationFailed
from cassandra.connection import MAX_STREAM_PER_CONNECTION, ConnectionException
log = logging.getLogger(__name__)
class NoConnectionsAvailable(Exception):
"""
All existing connections to a given host are busy, or there are
no open connections.
"""
pass
class Host(object):
"""
Represents a single Cassandra node.
"""
address = None
"""
The IP address or hostname of the node.
"""
monitor = None
"""
A :class:`.HealthMonitor` instance that tracks whether this node is
up or down.
"""
_datacenter = None
_rack = None
_reconnection_handler = None
def __init__(self, inet_address, conviction_policy_factory):
if inet_address is None:
raise ValueError("inet_address may not be None")
if conviction_policy_factory is None:
raise ValueError("conviction_policy_factory may not be None")
self.address = inet_address
self.monitor = HealthMonitor(conviction_policy_factory(self))
self._reconnection_lock = Lock()
@property
def datacenter(self):
""" The datacenter the node is in. """
return self._datacenter
@property
def rack(self):
""" The rack the node is in. """
return self._rack
def set_location_info(self, datacenter, rack):
"""
Sets the datacenter and rack for this node. Intended for internal
use (by the control connection, which periodically checks the
ring topology) only.
"""
self._datacenter = datacenter
self._rack = rack
def get_and_set_reconnection_handler(self, new_handler):
"""
Atomically replaces the reconnection handler for this
host. Intended for internal use only.
"""
with self._reconnection_lock:
old = self._reconnection_handler
self._reconnection_handler = new_handler
return old
def __eq__(self, other):
if not isinstance(other, Host):
return False
return self.address == other.address
def __str__(self):
return self.address
def __repr__(self):
dc = (" %s" % (self._datacenter,)) if self._datacenter else ""
return "<%s: %s%s>" % (self.__class__.__name__, self.address, dc)
class _ReconnectionHandler(object):
"""
Abstract class for attempting reconnections with a given
schedule and scheduler.
"""
_cancelled = False
def __init__(self, scheduler, schedule, callback, *callback_args, **callback_kwargs):
self.scheduler = scheduler
self.schedule = schedule
self.callback = callback
self.callback_args = callback_args
self.callback_kwargs = callback_kwargs
def start(self):
if self._cancelled:
return
# TODO cancel previous reconnection handlers? That's probably the job
# of whatever created this.
first_delay = self.schedule.next()
self.scheduler.schedule(first_delay, self.run)
def run(self):
if self._cancelled:
self.callback(*(self.callback_args), **(self.callback_kwargs))
try:
self.on_reconnection(self.try_reconnect())
except Exception as exc:
next_delay = self.schedule.next()
if self.on_exception(exc, next_delay):
self.scheduler.schedule(next_delay, self.run)
else:
self.callback(*(self.callback_args), **(self.callback_kwargs))
def cancel(self):
self._cancelled = True
def try_reconnect(self):
"""
Subclasses must implement this method. It should attempt to
open a new Connection and return it; if a failure occurs, an
Exception should be raised.
"""
raise NotImplementedError()
def on_reconnection(self, connection):
"""
Called when a new Connection is successfully opened. Nothing is
done by default.
"""
pass
def on_exception(self, exc, next_delay):
"""
Called when an Exception is raised when trying to connect.
`exc` is the Exception that was raised and `next_delay` is the
number of seconds (as a float) that the handler will wait before
attempting to connect again.
Subclasses should return :const:`False` if no more attempts to
connection should be made, :const:`True` otherwise. The default
behavior is to always retry unless the error is an
:exc:`.AuthenticationFailed` instance.
"""
if isinstance(exc, AuthenticationFailed):
return False
else:
return True
class _HostReconnectionHandler(_ReconnectionHandler):
def __init__(self, host, connection_factory, *args, **kwargs):
_ReconnectionHandler.__init__(self, *args, **kwargs)
self.host = host
self.connection_factory = connection_factory
def try_reconnect(self):
return self.connection_factory()
def on_reconnection(self, connection):
self.host.monitor.reset()
def on_exception(self, exc, next_delay):
if isinstance(exc, AuthenticationFailed):
return False
else:
log.warn("Error attempting to reconnect to %s: %s", self.host, exc)
log.debug("Reconnection error details", exc_info=True)
return True
class HealthMonitor(object):
"""
Monitors whether a particular host is marked as up or down.
This class is primarily intended for internal use, although
applications may find it useful to check whether a given node
is up or down.
"""
is_up = True
"""
A boolean representing the current state of the node.
"""
def __init__(self, conviction_policy):
self._conviction_policy = conviction_policy
self._host = conviction_policy.host
# self._listeners will hold, among other things, references to
# Cluster objects. To allow those to be GC'ed (and shutdown) even
# though we've implemented __del__, use weak references.
self._listeners = WeakSet()
self._lock = RLock()
def register(self, listener):
with self._lock:
self._listeners.add(listener)
def unregister(self, listener):
with self._lock:
self._listeners.remove(listener)
def set_up(self):
if self.is_up:
return
self._conviction_policy.reset()
log.info("Host %s is considered up", self._host)
with self._lock:
listeners = self._listeners.copy()
for listener in listeners:
listener.on_up(self._host)
self.is_up = True
def set_down(self):
if not self.is_up:
return
self.is_up = False
log.info("Host %s is considered down", self._host)
with self._lock:
listeners = self._listeners.copy()
for listener in listeners:
listener.on_down(self._host)
def reset(self):
return self.set_up()
def signal_connection_failure(self, connection_exc):
is_down = self._conviction_policy.add_failure(connection_exc)
if is_down:
self.set_down()
return is_down
_MAX_SIMULTANEOUS_CREATION = 1
class HostConnectionPool(object):
host = None
host_distance = None
is_shutdown = False
open_count = 0
_scheduled_for_creation = 0
def __init__(self, host, host_distance, session):
self.host = host
self.host_distance = host_distance
self._session = weakref.proxy(session)
self._lock = RLock()
self._conn_available_condition = Condition()
core_conns = session.cluster.get_core_connections_per_host(host_distance)
self._connections = [session.cluster.connection_factory(host.address)
for i in range(core_conns)]
self._trash = set()
self.open_count = core_conns
def borrow_connection(self, timeout):
if self.is_shutdown:
raise ConnectionException(
"Pool for %s is shutdown" % (self.host,), self.host)
conns = self._connections
if not conns:
# handled specially just for simpler code
log.debug("Detected empty pool, opening core conns to %s" % (self.host,))
core_conns = self._session.cluster.get_core_connections_per_host(self.host_distance)
with self._lock:
# we check the length of self._connections again
# along with self._scheduled_for_creation while holding the lock
# in case multiple threads hit this condition at the same time
to_create = core_conns - (len(self._connections) + self._scheduled_for_creation)
for i in range(to_create):
self._scheduled_for_creation += 1
self._session.submit(self._create_new_connection)
# in_flight is incremented by wait_for_conn
conn = self._wait_for_conn(timeout)
conn.set_keyspace(self._session.keyspace)
return conn
else:
# note: it would be nice to push changes to these config settings
# to pools instead of doing a new lookup on every
# borrow_connection() call
max_reqs = self._session.cluster.get_max_requests_per_connection(self.host_distance)
max_conns = self._session.cluster.get_max_connections_per_host(self.host_distance)
least_busy = min(conns, key=lambda c: c.in_flight)
# to avoid another thread closing this connection while
# trashing it (through the return_connection process), hold
# the connection lock from this point until we've incremented
# its in_flight count
with least_busy.lock:
# if we have too many requests on this connection but we still
# have space to open a new connection against this host, go ahead
# and schedule the creation of a new connection
if least_busy.in_flight >= max_reqs and len(self._connections) < max_conns:
self._maybe_spawn_new_connection()
if least_busy.in_flight >= MAX_STREAM_PER_CONNECTION:
# once we release the lock, wait for another connection
need_to_wait = True
else:
need_to_wait = False
least_busy.in_flight += 1
if need_to_wait:
# wait_for_conn will increment in_flight on the conn
least_busy = self._wait_for_conn(timeout)
least_busy.set_keyspace(self._session.keyspace)
return least_busy
def _maybe_spawn_new_connection(self):
with self._lock:
if self._scheduled_for_creation >= _MAX_SIMULTANEOUS_CREATION:
return
self._scheduled_for_creation += 1
log.debug("Submitting task for creation of new Connection to %s" % (self.host,))
self._session.submit(self._create_new_connection)
def _create_new_connection(self):
try:
self._add_conn_if_under_max()
except Exception:
log.exception("Unexpectedly failed to create new connection")
finally:
with self._lock:
self._scheduled_for_creation -= 1
def _add_conn_if_under_max(self):
max_conns = self._session.cluster.get_max_connections_per_host(self.host_distance)
with self._lock:
if self.is_shutdown:
return False
if self.open_count >= max_conns:
return False
self.open_count += 1
try:
conn = self._session.cluster.connection_factory(self.host.address)
with self._lock:
new_connections = self._connections[:] + [conn]
self._connections = new_connections
self._signal_available_conn()
return True
except ConnectionException as exc:
log.exception("Failed to add new connection to pool for host %s" % (self.host,))
with self._lock:
self.open_count -= 1
if self.host.monitor.signal_connection_failure(exc):
self.shutdown()
return False
except AuthenticationFailed:
with self._lock:
self.open_count -= 1
return False
def _await_available_conn(self, timeout):
with self._conn_available_condition:
self._conn_available_condition.wait(timeout)
def _signal_available_conn(self):
with self._conn_available_condition:
self._conn_available_condition.notify()
def _signal_all_available_conn(self):
with self._conn_available_condition:
self._conn_available_condition.notify_all()
def _wait_for_conn(self, timeout):
start = time.time()
remaining = timeout
while remaining > 0:
# wait on our condition for the possibility that a connection
# is useable
self._await_available_conn(remaining)
# self.shutdown() may trigger the above Condition
if self.is_shutdown:
raise ConnectionException("Pool is shutdown")
conns = self._connections
if conns:
least_busy = min(conns, key=lambda c: c.in_flight)
with least_busy.lock:
if least_busy.in_flight < MAX_STREAM_PER_CONNECTION:
least_busy.in_flight += 1
return least_busy
remaining = timeout - (time.time() - start)
raise NoConnectionsAvailable()
def return_connection(self, connection):
with connection.lock:
connection.in_flight -= 1
in_flight = connection.in_flight
if connection.is_defunct or connection.is_closed:
is_down = self.host.monitor.signal_connection_failure(connection.last_error)
if is_down:
self.shutdown()
else:
self._replace(connection)
else:
if connection in self._trash:
with connection.lock:
if in_flight == 0:
with self._lock:
self._trash.remove(connection)
connection.close()
return
core_conns = self._session.cluster.get_core_connections_per_host(self.host_distance)
min_reqs = self._session.cluster.get_min_requests_per_connection(self.host_distance)
# we can use in_flight here without holding the connection lock
# because the fact that in_flight dipped below the min at some
| |
0)
m.e98 = Constraint(expr= m.x413 == 0)
m.e99 = Constraint(expr= m.x414 == 0)
m.e100 = Constraint(expr= m.x127 - m.x397 - m.x400 == 0)
m.e101 = Constraint(expr= m.x128 - m.x398 - m.x401 == 0)
m.e102 = Constraint(expr= m.x129 - m.x399 - m.x402 == 0)
m.e103 = Constraint(expr= m.x133 - m.x409 - m.x412 == 0)
m.e104 = Constraint(expr= m.x134 - m.x410 - m.x413 == 0)
m.e105 = Constraint(expr= m.x135 - m.x411 - m.x414 == 0)
m.e106 = Constraint(expr= m.x397 - 40 * m.b910 <= 0)
m.e107 = Constraint(expr= m.x398 - 40 * m.b911 <= 0)
m.e108 = Constraint(expr= m.x399 - 40 * m.b912 <= 0)
m.e109 = Constraint(expr= m.x400 + 40 * m.b910 <= 40)
m.e110 = Constraint(expr= m.x401 + 40 * m.b911 <= 40)
m.e111 = Constraint(expr= m.x402 + 40 * m.b912 <= 40)
m.e112 = Constraint(expr= m.x409 - 4.45628648004517 * m.b910 <= 0)
m.e113 = Constraint(expr= m.x410 - 4.45628648004517 * m.b911 <= 0)
m.e114 = Constraint(expr= m.x411 - 4.45628648004517 * m.b912 <= 0)
m.e115 = Constraint(expr= m.x412 + 4.45628648004517 * m.b910
<= 4.45628648004517)
m.e116 = Constraint(expr= m.x413 + 4.45628648004517 * m.b911
<= 4.45628648004517)
m.e117 = Constraint(expr= m.x414 + 4.45628648004517 * m.b912
<= 4.45628648004517)
m.e118 = Constraint(expr= -0.75 * m.x415 + m.x439 == 0)
m.e119 = Constraint(expr= -0.75 * m.x416 + m.x440 == 0)
m.e120 = Constraint(expr= -0.75 * m.x417 + m.x441 == 0)
m.e121 = Constraint(expr= m.x418 == 0)
m.e122 = Constraint(expr= m.x419 == 0)
m.e123 = Constraint(expr= m.x420 == 0)
m.e124 = Constraint(expr= m.x442 == 0)
m.e125 = Constraint(expr= m.x443 == 0)
m.e126 = Constraint(expr= m.x444 == 0)
m.e127 = Constraint(expr= m.x145 - m.x415 - m.x418 == 0)
m.e128 = Constraint(expr= m.x146 - m.x416 - m.x419 == 0)
m.e129 = Constraint(expr= m.x147 - m.x417 - m.x420 == 0)
m.e130 = Constraint(expr= m.x157 - m.x439 - m.x442 == 0)
m.e131 = Constraint(expr= m.x158 - m.x440 - m.x443 == 0)
m.e132 = Constraint(expr= m.x159 - m.x441 - m.x444 == 0)
m.e133 = Constraint(expr= m.x415 - 4.45628648004517 * m.b913 <= 0)
m.e134 = Constraint(expr= m.x416 - 4.45628648004517 * m.b914 <= 0)
m.e135 = Constraint(expr= m.x417 - 4.45628648004517 * m.b915 <= 0)
m.e136 = Constraint(expr= m.x418 + 4.45628648004517 * m.b913
<= 4.45628648004517)
m.e137 = Constraint(expr= m.x419 + 4.45628648004517 * m.b914
<= 4.45628648004517)
m.e138 = Constraint(expr= m.x420 + 4.45628648004517 * m.b915
<= 4.45628648004517)
m.e139 = Constraint(expr= m.x439 - 3.34221486003388 * m.b913 <= 0)
m.e140 = Constraint(expr= m.x440 - 3.34221486003388 * m.b914 <= 0)
m.e141 = Constraint(expr= m.x441 - 3.34221486003388 * m.b915 <= 0)
m.e142 = Constraint(expr= m.x442 + 3.34221486003388 * m.b913
<= 3.34221486003388)
m.e143 = Constraint(expr= m.x443 + 3.34221486003388 * m.b914
<= 3.34221486003388)
m.e144 = Constraint(expr= m.x444 + 3.34221486003388 * m.b915
<= 3.34221486003388)
m.e145 = Constraint(expr= (m.x445 / (0.001 + 0.999 * m.b916) - 1.5 * log(m.x421
/ (0.001 + 0.999 * m.b916) + 1)) * (0.001 + 0.999 * m.b916) <= 0)
m.e146 = Constraint(expr= (m.x446 / (0.001 + 0.999 * m.b917) - 1.5 * log(m.x422
/ (0.001 + 0.999 * m.b917) + 1)) * (0.001 + 0.999 * m.b917) <= 0)
m.e147 = Constraint(expr= (m.x447 / (0.001 + 0.999 * m.b918) - 1.5 * log(m.x423
/ (0.001 + 0.999 * m.b918) + 1)) * (0.001 + 0.999 * m.b918) <= 0)
m.e148 = Constraint(expr= m.x424 == 0)
m.e149 = Constraint(expr= m.x425 == 0)
m.e150 = Constraint(expr= m.x426 == 0)
m.e151 = Constraint(expr= m.x451 == 0)
m.e152 = Constraint(expr= m.x452 == 0)
m.e153 = Constraint(expr= m.x453 == 0)
m.e154 = Constraint(expr= m.x148 - m.x421 - m.x424 == 0)
m.e155 = Constraint(expr= m.x149 - m.x422 - m.x425 == 0)
m.e156 = Constraint(expr= m.x150 - m.x423 - m.x426 == 0)
m.e157 = Constraint(expr= m.x160 - m.x445 - m.x451 == 0)
m.e158 = Constraint(expr= m.x161 - m.x446 - m.x452 == 0)
m.e159 = Constraint(expr= m.x162 - m.x447 - m.x453 == 0)
m.e160 = Constraint(expr= m.x421 - 4.45628648004517 * m.b916 <= 0)
m.e161 = Constraint(expr= m.x422 - 4.45628648004517 * m.b917 <= 0)
m.e162 = Constraint(expr= m.x423 - 4.45628648004517 * m.b918 <= 0)
m.e163 = Constraint(expr= m.x424 + 4.45628648004517 * m.b916
<= 4.45628648004517)
m.e164 = Constraint(expr= m.x425 + 4.45628648004517 * m.b917
<= 4.45628648004517)
m.e165 = Constraint(expr= m.x426 + 4.45628648004517 * m.b918
<= 4.45628648004517)
m.e166 = Constraint(expr= m.x445 - 2.54515263975353 * m.b916 <= 0)
m.e167 = Constraint(expr= m.x446 - 2.54515263975353 * m.b917 <= 0)
m.e168 = Constraint(expr= m.x447 - 2.54515263975353 * m.b918 <= 0)
m.e169 = Constraint(expr= m.x451 + 2.54515263975353 * m.b916
<= 2.54515263975353)
m.e170 = Constraint(expr= m.x452 + 2.54515263975353 * m.b917
<= 2.54515263975353)
m.e171 = Constraint(expr= m.x453 + 2.54515263975353 * m.b918
<= 2.54515263975353)
m.e172 = Constraint(expr= -m.x427 + m.x457 == 0)
m.e173 = Constraint(expr= -m.x428 + m.x458 == 0)
m.e174 = Constraint(expr= -m.x429 + m.x459 == 0)
m.e175 = Constraint(expr= -0.5 * m.x433 + m.x457 == 0)
m.e176 = Constraint(expr= -0.5 * m.x434 + m.x458 == 0)
m.e177 = Constraint(expr= -0.5 * m.x435 + m.x459 == 0)
m.e178 = Constraint(expr= m.x430 == 0)
m.e179 = Constraint(expr= m.x431 == 0)
m.e180 = Constraint(expr= m.x432 == 0)
m.e181 = Constraint(expr= m.x436 == 0)
m.e182 = Constraint(expr= m.x437 == 0)
m.e183 = Constraint(expr= m.x438 == 0)
m.e184 = Constraint(expr= m.x460 == 0)
m.e185 = Constraint(expr= m.x461 == 0)
m.e186 = Constraint(expr= m.x462 == 0)
m.e187 = Constraint(expr= m.x151 - m.x427 - m.x430 == 0)
m.e188 = Constraint(expr= m.x152 - m.x428 - m.x431 == 0)
m.e189 = Constraint(expr= m.x153 - m.x429 - m.x432 == 0)
m.e190 = Constraint(expr= m.x154 - m.x433 - m.x436 == 0)
m.e191 = Constraint(expr= m.x155 - m.x434 - m.x437 == 0)
m.e192 = Constraint(expr= m.x156 - m.x435 - m.x438 == 0)
m.e193 = Constraint(expr= m.x163 - m.x457 - m.x460 == 0)
m.e194 = Constraint(expr= m.x164 - m.x458 - m.x461 == 0)
m.e195 = Constraint(expr= m.x165 - m.x459 - m.x462 == 0)
m.e196 = Constraint(expr= m.x427 - 4.45628648004517 * m.b919 <= 0)
m.e197 = Constraint(expr= m.x428 - 4.45628648004517 * m.b920 <= 0)
m.e198 = Constraint(expr= m.x429 - 4.45628648004517 * m.b921 <= 0)
m.e199 = Constraint(expr= m.x430 + 4.45628648004517 * m.b919
<= 4.45628648004517)
m.e200 = Constraint(expr= m.x431 + 4.45628648004517 * m.b920
<= 4.45628648004517)
m.e201 = Constraint(expr= m.x432 + 4.45628648004517 * m.b921
<= 4.45628648004517)
m.e202 = Constraint(expr= m.x433 - 30 * m.b919 <= 0)
m.e203 = Constraint(expr= m.x434 - 30 * m.b920 <= 0)
m.e204 = Constraint(expr= m.x435 - 30 * m.b921 <= 0)
m.e205 = Constraint(expr= m.x436 + 30 * m.b919 <= 30)
m.e206 = Constraint(expr= m.x437 + 30 * m.b920 <= 30)
m.e207 = Constraint(expr= m.x438 + 30 * m.b921 <= 30)
m.e208 = Constraint(expr= m.x457 - 15 * m.b919 <= 0)
m.e209 = Constraint(expr= m.x458 - 15 * m.b920 <= 0)
m.e210 = Constraint(expr= m.x459 - 15 * m.b921 <= 0)
m.e211 = Constraint(expr= m.x460 + 15 * m.b919 <= 15)
m.e212 = Constraint(expr= m.x461 + 15 * m.b920 <= 15)
m.e213 = Constraint(expr= m.x462 + 15 * m.b921 <= 15)
m.e214 = Constraint(expr= (m.x493 / (0.001 + 0.999 * m.b922) - 1.25 * log(
m.x463 / (0.001 + 0.999 * m.b922) + 1)) * (0.001 + 0.999 * m.b922) <= 0)
m.e215 = Constraint(expr= (m.x494 / (0.001 + 0.999 * m.b923) - 1.25 * log(
m.x464 / (0.001 + 0.999 * m.b923) + 1)) * (0.001 + 0.999 * m.b923) <= 0)
m.e216 = Constraint(expr= (m.x495 / (0.001 + 0.999 * m.b924) - 1.25 * log(
m.x465 / (0.001 + 0.999 * m.b924) + 1)) * (0.001 + 0.999 * m.b924) <= 0)
m.e217 = Constraint(expr= m.x466 == 0)
m.e218 = Constraint(expr= m.x467 == 0)
m.e219 = Constraint(expr= m.x468 == 0)
m.e220 = Constraint(expr= m.x499 == 0)
m.e221 = Constraint(expr= m.x500 == 0)
m.e222 = Constraint(expr= m.x501 == 0)
m.e223 = Constraint(expr= m.x166 - m.x463 - m.x466 == 0)
m.e224 = Constraint(expr= m.x167 - m.x464 - m.x467 == 0)
m.e225 = Constraint(expr= m.x168 - m.x465 - m.x468 == 0)
m.e226 = Constraint(expr= m.x181 - m.x493 - m.x499 == 0)
m.e227 = Constraint(expr= m.x182 - m.x494 - m.x500 == 0)
m.e228 = Constraint(expr= m.x183 - m.x495 - m.x501 == 0)
m.e229 = Constraint(expr= m.x463 - 3.34221486003388 * m.b922 <= 0)
m.e230 = Constraint(expr= m.x464 - 3.34221486003388 * m.b923 <= 0)
m.e231 = Constraint(expr= m.x465 - 3.34221486003388 * m.b924 <= 0)
m.e232 = Constraint(expr= m.x466 + 3.34221486003388 * m.b922
<= 3.34221486003388)
m.e233 = Constraint(expr= m.x467 + 3.34221486003388 * m.b923
<= 3.34221486003388)
m.e234 = Constraint(expr= m.x468 + 3.34221486003388 * m.b924
<= 3.34221486003388)
m.e235 = Constraint(expr= m.x493 - 1.83548069293539 * m.b922 <= 0)
m.e236 = Constraint(expr= m.x494 - 1.83548069293539 * m.b923 <= 0)
m.e237 = Constraint(expr= m.x495 - 1.83548069293539 * m.b924 <= 0)
m.e238 = Constraint(expr= m.x499 + 1.83548069293539 * m.b922
<= 1.83548069293539)
m.e239 = Constraint(expr= m.x500 + 1.83548069293539 * m.b923
<= 1.83548069293539)
m.e240 | |
import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from Utils import load
from Models import apolo_resnet
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import resnet
import sampler
import utils
import numpy as np
import torchvision.models as models
import torch.nn.init as init
import time
import snip
import attack
import zenprune
import synflow
model_names = sorted(name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet")
and callable(resnet.__dict__[name]))
parser = argparse.ArgumentParser(description='Propert ResNets for CIFAR10 in pytorch')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet32',
# choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet32)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=160, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--dataset', default='cifar10', choices=['cifar10', 'tiny-imagenet', 'cifar100'])
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=50, type=int,
metavar='N', help='print frequency (default: 50)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--half', dest='half', action='store_true',
help='use half-precision(16-bit) ')
parser.add_argument('--save-dir', dest='save_dir',
help='The directory used to save the trained models',
default='save_temp', type=str)
parser.add_argument('--save-every', dest='save_every',
help='Saves checkpoints at every specified number of epochs',
type=int, default=10)
parser.add_argument('--sv', dest='compute_sv', action='store_true',
help='compute_sv throughout training')
parser.add_argument('--ntk', dest='compute_ntk', action='store_true',
help='compute ntk eigenvalues throughout training')
parser.add_argument('--lrs',dest='compute_lrs',action='store_true',
help='compute number of linear regions throughout training')
parser.add_argument('--seed', default=1, type=int, help='seed')
# Following arguments are for pruning
parser.add_argument('--prune_method', type=str, default='NONE', choices=['NONE','RAND', 'SNIP', 'GRASP', 'Zen', 'Mag', 'Synflow'], help='Pruning methods for lottery ticket experiments.')
parser.add_argument('--prunesets_num', type=int, default=10, help='Number of datapoints for applying pruning methods.')
parser.add_argument('--sparse_iter', type=float, default=0, help='Sparsity level of neural networks.')
parser.add_argument('--sparse_lvl', type=float, default=1, help='Sparsity level of neural networks.')
parser.add_argument('--ONI', dest='ONI', action='store_true', help='set ONI on')
parser.add_argument('--T_iter', type=int, default=5, help='Number of iterations for ONI.')
parser.add_argument('--iter_prune', dest='iter_prune', action='store_true')
# Following arguments are for projection
parser.add_argument('--proj', dest='proj', action='store_true', help='set projection on')
parser.add_argument('--proj_freq', type=int, default=5, help='Apply projection every n iterations.')
parser.add_argument('--proj_clip_to', type=float, default=0.02, help='Smallest singular values clipped to.')
parser.add_argument('--ortho', dest='ortho', action='store_true', help='add orthogonal regularizer on.')
parser.add_argument('--pre_epochs', type=int, default=0, help='Number of pretraining epochs.')
parser.add_argument('--s_name', type=str, default='saved_sparsity', help='saved_sparsity.')
parser.add_argument('--s_value', type=float, default=1, help='given changing sparsity.')
parser.add_argument("--layer", nargs="*", type=int, default=[],)
parser.add_argument('--structured', dest='structured', action='store_true', help='set structured masks')
parser.add_argument('--reduce_ratio', type=float, default=1, help='compact masks into reduce_ratio x 100% number of channels.')
parser.add_argument('--shuffle_ratio', type=float, default=0.1, help='shuffle ratio of structured pruning.')
parser.add_argument('--rescale', dest='rescale', action='store_true', help='rescale weight after pruning')
parser.add_argument('--adv', dest='adv', action='store_true', help='If using adversarial trick')
parser.add_argument('--ep_coe', type=float, default=0, help='coefficient for expressivity.')
parser.add_argument('--ortho_importance', type=float, default=0, help='orthogonality regularizer importance.')
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
args.sparse_lvl = 0.8 ** args.sparse_iter
print(args.sparse_lvl)
# Check the save_dir exists or not
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
torch.manual_seed(args.seed)
cudnn.benchmark = True
if args.dataset =='cifar10':
print('Loading {} dataset.'.format(args.dataset))
input_shape, num_classes = load.dimension(args.dataset)
train_dataset, train_loader = load.dataloader(args.dataset, args.batch_size, True, args.workers)
_, val_loader = load.dataloader(args.dataset, 128, False, args.workers)
elif args.dataset == 'tiny-imagenet':
args.batch_size = 256
args.lr = 0.2
args.epochs = 200
print('Loading {} dataset.'.format(args.dataset))
input_shape, num_classes = load.dimension(args.dataset)
train_dataset, train_loader = load.dataloader(args.dataset, args.batch_size, True, args.workers)
_, val_loader = load.dataloader(args.dataset, 128, False, args.workers)
elif args.dataset == 'cifar100':
args.batch_size = 128
# args.lr = 0.01
args.epochs = 160
# args.weight_decay = 5e-4
input_shape, num_classes = load.dimension(args.dataset)
train_dataset, train_loader = load.dataloader(args.dataset, args.batch_size, True, args.workers)
_, val_loader = load.dataloader(args.dataset, 128, False, args.workers)
if args.arch == 'resnet20':
print('Creating {} model.'.format(args.arch))
# model = torch.nn.DataParallel(resnet.__dict__[args.arch](ONI=args.ONI, T_iter=args.T_iter))
model = resnet.__dict__[args.arch](ONI=args.ONI, T_iter=args.T_iter)
model.cuda()
elif args.arch == 'resnet18':
print('Creating {} model.'.format(args.arch))
# Using resnet18 from Synflow
# model = load.model(args.arch, 'tinyimagenet')(input_shape,
# num_classes,
# dense_classifier = True).cuda()
# Using resnet18 from torchvision
model = models.resnet18()
model.fc = nn.Linear(512, num_classes)
model.cuda()
utils.kaiming_initialize(model)
elif args.arch == 'resnet110' or args.arch == 'resnet110full':
# Using resnet110 from Apollo
# model = apolo_resnet.ResNet(110, num_classes=num_classes)
model = load.model(args.arch, 'lottery')(input_shape,
num_classes,
dense_classifier = True).cuda()
elif args.arch in ['vgg16full', 'vgg16full-bn', 'vgg11full', 'vgg11full-bn'] :
if args.dataset == 'tiny-imagenet':
modeltype = 'tinyimagenet'
else:
modeltype = 'lottery'
# Using resnet110 from Apollo
# model = apolo_resnet.ResNet(110, num_classes=num_classes)
model = load.model(args.arch, modeltype)(input_shape,
num_classes,
dense_classifier = True).cuda()
# for layer in model.modules():
# if isinstance(layer, nn.Linear):
# init.orthogonal_(layer.weight.data)
# elif isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d)):
# special_init.DeltaOrthogonal_init(layer.weight.data)
print('Number of parameters of model: {}.'.format(count_parameters(model)))
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
if args.compute_sv:
print('[*] Will compute singular values throught training.')
size_hook = utils.get_hook(model, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d))
utils.run_once(train_loader, model)
utils.detach_hook([size_hook])
training_sv = []
training_svmax = []
training_sv20 = [] # 50% singular value
training_sv50 = [] # 50% singular value
training_sv80 = [] # 80% singular value
training_kclip = [] # singular values larger than 1e-12
sv, svmax, sv20, sv50, sv80, kclip = utils.get_sv(model, size_hook)
training_sv.append(sv)
training_svmax.append(svmax)
training_sv20.append(sv20)
training_sv50.append(sv50)
training_sv80.append(sv80)
training_kclip.append(kclip)
if args.compute_ntk:
training_ntk_eig = []
if num_classes>=32:
_, ntk_loader = load.dataloader(args.dataset, 32, True, args.workers)
grasp_fetch = False
else:
ntk_loader = train_loader
grasp_fetch = True
training_ntk_eig.append(utils.get_ntk_eig(ntk_loader, [model], train_mode = True, num_batch=1, num_classes=num_classes, samples_per_class=1, grasp_fetch=grasp_fetch))
if args.compute_lrs:
# training_lrs = []
# lrc_model = utils.Linear_Region_Collector(train_loader, input_size=(args.batch_size,*input_shape), sample_batch=300)
# lrc_model.reinit(models=[model])
# lrs = lrc_model.forward_batch_sample()[0]
# training_lrs.append(lrs)
# lrc_model.clear_hooks()
# print('[*] Current number of linear regions:{}'.format(lrs))
GAP_zen, output_zen = utils.get_zenscore(model, train_loader, args.arch, num_classes)
print('[*] Before pruning: GAP_zen:{:e}, output_zen:{:e}'.format(GAP_zen,output_zen))
if args.half:
model.half()
criterion.half()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
nesterov = True,
weight_decay=args.weight_decay)
if args.dataset == 'tiny-imagenet':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[100, 150], last_epoch=args.start_epoch - 1)
# milestones=[30, 60, 80], last_epoch=args.start_epoch - 1)
elif args.dataset == 'cifar100':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[60, 120], gamma = 0.2, last_epoch=args.start_epoch - 1)
else:
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[80, 120], last_epoch=args.start_epoch - 1)
# This part is for training full NN model to obtain Lottery ticket
# # First save original network:
init_path = os.path.join(args.save_dir, 'init_checkpoint.th')
save_checkpoint({
'state_dict': model.state_dict()
}, False, filename=init_path)
if args.prune_method == 'NONE':
pre_epochs = args.epochs
else:
pre_epochs = 0
training_loss = []
for epoch in range(pre_epochs):
# train for one epoch
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
train(train_loader, model, criterion, optimizer, epoch, track = training_loss)
lr_scheduler.step()
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if epoch > 0 and epoch % args.save_every == 0:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(args.save_dir, 'densenet_checkpoint.th'))
if args.compute_sv and epoch % args.save_every == 0:
sv, svmax, sv20, sv50, sv80, kclip= utils.get_sv(model, size_hook)
training_sv.append(sv)
training_svmax.append(svmax)
training_sv20.append(sv20)
training_sv50.append(sv50)
training_sv80.append(sv80)
training_kclip.append(kclip)
np.save(os.path.join(args.save_dir, 'sv.npy'), training_sv)
np.save(os.path.join(args.save_dir, 'sv_svmax.npy'), training_svmax)
np.save(os.path.join(args.save_dir, 'sv_sv20.npy'), training_sv20)
np.save(os.path.join(args.save_dir, 'sv_sv50.npy'), training_sv50)
np.save(os.path.join(args.save_dir, 'sv_sv80.npy'), training_sv80)
np.save(os.path.join(args.save_dir, 'sv_kclip.npy'), training_kclip)
if args.compute_ntk and epoch % args.save_every == 0:
training_ntk_eig.append(utils.get_ntk_eig(ntk_loader, [model], train_mode = True, num_batch=1, num_classes=num_classes, samples_per_class=1, grasp_fetch=grasp_fetch))
np.save(os.path.join(args.save_dir, 'ntk_eig.npy'), training_ntk_eig)
print('[*] {} epochs of dense network pre-training done'.format(pre_epochs))
np.save(os.path.join(args.save_dir, 'trainloss.npy'), training_loss)
# densenet_checkpoint = torch.load(os.path.join(args.save_dir, 'densenet_checkpoint.th'))
# model.load_state_dict(densenet_checkpoint['state_dict'])
# print('Model loaded!')
# Obtain lottery ticket by magnitude pruning
if args.prune_method == 'NONE':
snip.apply_mag_prune(args, model)
# reinitialize
init_checkpoint = torch.load(init_path)
model.load_state_dict(init_checkpoint['state_dict'])
print('Model reinitialized!')
elif args.prune_method == 'SNIP':
init_checkpoint = torch.load(init_path)
model.load_state_dict(init_checkpoint['state_dict'])
print('Model reinitialized!')
snip.apply_snip(args, [model], train_loader, criterion, num_classes=num_classes)
# attack.shuffle_mask(model)
elif args.prune_method == 'RAND':
init_checkpoint = torch.load(init_path)
model.load_state_dict(init_checkpoint['state_dict'])
print('Model reinitialized!')
snip.apply_rand_prune([model], args.sparse_lvl)
elif args.prune_method == 'GRASP':
init_checkpoint = torch.load(init_path)
model.load_state_dict(init_checkpoint['state_dict'])
print('Model reinitialized!')
snip.apply_grasp(args, [model], train_loader, criterion, num_classes=num_classes)
elif args.prune_method == 'Zen':
zenprune.apply_zenprune(args, [model], train_loader)
# zenprune.apply_cont_zenprune(args, [model], train_loader)
# zenprune.apply_zentransfer(args, [model], train_loader)
# init_checkpoint = torch.load(init_path)
# model.load_state_dict(init_checkpoint['state_dict'])
# print('Model reinitialized!')
elif args.prune_method == 'Mag':
snip.apply_mag_prune(args, model)
init_checkpoint = torch.load(init_path)
model.load_state_dict(init_checkpoint['state_dict'])
print('Model reinitialized!')
elif args.prune_method == 'Synflow':
synflow.apply_synflow(args, model)
print('{} done, sparsity of the current model: {}.'.format(args.prune_method, utils.check_sparsity(model)))
if args.compute_lrs:
# training_lrs = []
# lrc_model = utils.Linear_Region_Collector(train_loader, input_size=(args.batch_size,*input_shape), sample_batch=300)
# lrc_model.reinit(models=[model])
# lrs = lrc_model.forward_batch_sample()[0]
# training_lrs.append(lrs)
# lrc_model.clear_hooks()
# print('[*] Current number of linear regions:{}'.format(lrs))
GAP_zen, output_zen = utils.get_zenscore(model, train_loader, args.arch, num_classes)
print('[*] After pruning: GAP_zen:{:e}, output_zen:{:e}'.format(GAP_zen,output_zen))
if args.evaluate:
validate(val_loader, model, criterion)
return
# Recreate optimizer and learning scheduler
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
nesterov = True,
weight_decay=args.weight_decay)
if args.dataset == 'tiny-imagenet':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[100, 150], last_epoch=args.start_epoch - 1)
# milestones=[30, 60, 80], last_epoch=args.start_epoch - 1)
elif args.dataset == 'cifar100':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[60, 120], gamma = 0.2, last_epoch=args.start_epoch - 1)
else:
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[80, 120], last_epoch=args.start_epoch - 1)
for epoch in range(args.epochs):
# for epoch in range(args.pre_epochs, | |
from __future__ import annotations
import pyqtgraph as pg
from pyqtgraph import colormap as cmap
from typing import Generic, Iterator, Sequence, TypeVar, overload, MutableSequence
import numpy as np
from ._utils import convert_color_code, to_rgba
from .components import Legend, Region, ScaleBar, TextItem
from .graph_items import BarPlot, Curve, FillBetween, InfLine, LayerItem, Scatter, Histogram, TextGroup
from .mouse_event import MouseClickEvent
from ._doc import write_docs
from ...widgets.utils import FreeWidget
BOTTOM = "bottom"
LEFT = "left"
class LayerList(MutableSequence[LayerItem]):
"""A napari-like layer list for plot item handling."""
def __init__(self, parent: HasDataItems):
self.parent = parent
def __getitem__(self, key: int | str) -> LayerItem:
if isinstance(key, int):
return self.parent._items[key]
elif isinstance(key, str):
for item in self.parent._items:
if item.name == key:
return item
else:
raise ValueError(f"Item '{key}' not found.")
else:
raise TypeError(f"Cannot use type {type(key)} as a key.")
def __setitem__(self, key, value):
raise NotImplementedError("Can't set item")
def __delitem__(self, key: int | str):
return self.parent._remove_item(key)
def append(self, item: LayerItem):
if not isinstance(item, LayerItem):
raise TypeError(f"Cannot append type {type(item)}.")
self.parent._add_item(item)
def insert(self, pos: int, item: LayerItem):
if not isinstance(item, LayerItem):
raise TypeError(f"Cannot insert type {type(item)}.")
self.parent._insert_item(pos, item)
def __len__(self):
return len(self.parent._items)
def clear(self):
for _ in range(len(self)):
self.parent._remove_item(-1)
def swap(self, pos0: int, pos1: int):
return self.parent._swap_items(pos0, pos1)
def move(self, source: int, destination: int):
return self.parent._move_item(source, destination)
class HasDataItems:
_items: list[LayerItem]
@property
def _graphics(self) -> pg.GraphicsWidget:
"""Target widget to add graphics items."""
raise NotImplementedError()
@property
def layers(self) -> LayerList:
return LayerList(self)
@overload
def add_curve(self, x: Sequence[float], **kwargs): ...
@overload
def add_curve(self, x: Sequence[float], y: Sequence[float], **kwargs): ...
@write_docs
def add_curve(self,
x=None,
y=None,
face_color = None,
edge_color = None,
color = None,
size: float = 7,
name: str | None = None,
lw: float = 1,
ls: str = "-",
symbol=None):
"""
Add a line plot like ``plt.plot(x, y)``.
Parameters
----------
{x}
{y}
{face_color}
{edge_color}
{color}
size: float, default is 7
Symbol size.
{name}
{lw}
{ls}
{symbol}
Returns
-------
Curve
A plot item of a curve.
"""
x, y = _check_xy(x, y)
name = self._find_unique_name((name or "Curve"))
face_color, edge_color = _check_colors(face_color, edge_color, color)
item = Curve(x, y, face_color=face_color, edge_color=edge_color,
size=size, name=name, lw=lw, ls=ls, symbol=symbol)
self._add_item(item)
return item
@overload
def add_scatter(self, x: Sequence[float], **kwargs): ...
@overload
def add_scatter(self, x: Sequence[float], y: Sequence[float], **kwargs): ...
@write_docs
def add_scatter(self,
x=None,
y=None,
face_color = None,
edge_color = None,
color = None,
size: float = 7,
name: str | None = None,
lw: float = 1,
ls: str = "-",
symbol="o"):
"""
Add scatter plot like ``plt.scatter(x, y)``.
Parameters
----------
{x}
{y}
{face_color}
{edge_color}
{color}
size: float, default is 7
Symbol size.
{name}
{lw}
{ls}
{symbol}
Returns
-------
Scatter
A plot item of the scatter plot.
"""
x, y = _check_xy(x, y)
name = self._find_unique_name((name or "Scatter"))
face_color, edge_color = _check_colors(face_color, edge_color, color)
item = Scatter(x, y, face_color=face_color, edge_color=edge_color,
size=size, name=name, lw=lw, ls=ls, symbol=symbol)
self._add_item(item)
return item
@write_docs
def add_hist(self, data: Sequence[float],
bins: int | Sequence | str = 10,
range=None,
density: bool = False,
face_color = None,
edge_color = None,
color = None,
name: str | None = None,
lw: float = 1,
ls: str = "-",
):
"""
Add histogram like ``plt.hist(data)``.
Parameters
----------
data : array-like
Data for histogram constrction.
bins : int, sequence of float or str, default is 10
Bin numbers. See ``np.histogram`` for detail.
range : two floats, optional
Bin ranges. See ``np.histogram`` for detail.
density : bool, default is False
If true, plot the density instead of the counts. See ``np.histogram`` for
detail.
{face_color}
{edge_color}
{color}
{name}
{lw}
{ls}
Returns
-------
Histogram
A plot item of the histogram.
"""
name = self._find_unique_name((name or "Histogram"))
face_color, edge_color = _check_colors(face_color, edge_color, color)
item = Histogram(data, bins=bins, range=range, density=density,
face_color=face_color, edge_color=edge_color,
name=name, lw=lw, ls=ls)
self._add_item(item)
return item
@overload
def add_bar(self, x: Sequence[float], **kwargs): ...
@overload
def add_bar(self, x: Sequence[float], y: Sequence[float], **kwargs): ...
@write_docs
def add_bar(self,
x=None,
y=None,
width: float = 0.6,
face_color = None,
edge_color = None,
color = None,
name: str | None = None,
lw: float = 1,
ls: str = "-"):
"""
Add a bar plot like ``plt.bar(x, y)``.
Parameters
----------
{x}
{y}
width : float, default is 0.6
Width of each bar.
{face_color}
{edge_color}
{color}
{name}
{lw}
{ls}
Returns
-------
BarPlot
A plot item of the bar plot.
"""
x, y = _check_xy(x, y)
name = self._find_unique_name((name or "Bar"))
face_color, edge_color = _check_colors(face_color, edge_color, color)
item = BarPlot(x, y, width=width, face_color=face_color,
edge_color=edge_color, name=name, lw=lw, ls=ls)
self._add_item(item)
return item
@overload
def add_fillbetween(self, x: Sequence[float], **kwargs): ...
@overload
def add_fillbetween(self, x: Sequence[float], y: Sequence[float], **kwargs): ...
@write_docs
def add_fillbetween(self,
x=None,
y1=None,
y2=None,
face_color = None,
edge_color = None,
color = None,
name: str | None = None,
lw: float = 1,
ls: str = "-"):
x, y1 = _check_xy(x, y1)
name = self._find_unique_name((name or "FillBetween"))
face_color, edge_color = _check_colors(face_color, edge_color, color)
item = FillBetween(x, y1, y2, face_color=face_color, edge_color=edge_color,
name=name, lw=lw, ls=ls)
self._add_item(item)
@overload
def add_infline(self, slope: float, intercept: float, color = None,
name: str | None = None, lw: float = 1, ls: str = "-"):
...
@overload
def add_infline(self, pos: tuple[float, float], degree: float, color = None,
name: str | None = None, lw: float = 1, ls: str = "-"):
...
def add_infline(self,
*args,
color = None,
name: str | None = None,
lw: float = 1,
ls: str = "-",
**kwargs):
if kwargs:
if args:
raise TypeError("Cannot mix args and kwargs for infinite line parameters.")
keys = set(kwargs.keys())
if keys <= {"pos", "angle"}:
args = (kwargs.get("pos", (0, 0)), kwargs.get("angle", 0))
elif keys <= {"slope", "intercept"}:
args = (kwargs.get("slope", (0, 0)), kwargs.get("intercept", 0))
else:
raise ValueError(f"{kwargs} is invalid input.")
nargs = len(args)
if nargs == 1:
arg0 = args[0]
if np.isscalar(arg0):
angle = np.rad2deg(np.arctan(arg0))
pos = (0, 0)
else:
pos = arg0
angle = 90
elif nargs == 2:
arg0, arg1 = args
if np.isscalar(arg0):
angle = np.rad2deg(np.arctan(arg0))
pos = (0, arg1)
else:
pos = arg0
angle = arg1
else:
raise TypeError(
"Arguments of 'add_infline' should be either 'add_infline(slope, intercept)' "
"or 'add_infline(pos, degree)'."
)
item = InfLine(pos, angle, edge_color=color, name=name, lw=lw, ls=ls)
self._add_item(item)
@overload
def add_text(self, x: float, y: float, text: str, **kwargs):
...
@overload
def add_text(self, x: Sequence[float], y: Sequence[float], text: Sequence[str], **kwargs):
...
def add_text(self, x, y, text, color=None, name=None):
if np.isscalar(x) and np.isscalar(y):
x = [x]
y = [y]
text = [text]
item = TextGroup(x, y, text, color, name)
self._add_item(item)
def _add_item(self, item: LayerItem):
item.zorder = len(self._items)
self._graphics.addItem(item.native)
self._items.append(item)
def _insert_item(self, pos: int, item: LayerItem):
self._graphics.addItem(item.native)
self._items.insert(pos, item)
self._reorder()
def _swap_items(self, pos0: int, pos1: int):
item0 = self._items[pos0]
item1 = self._items[pos1]
self._items[pos0] = item1
self._items[pos1] = item0
self._reorder()
def _move_item(self, source: int, destination: int):
if source < destination:
destination -= 1
item = self._items.pop(source)
self._items.insert(destination, item)
self._reorder()
def _remove_item(self, item: LayerItem | int | str):
if isinstance(item, LayerItem):
i = self._items.index(item)
elif isinstance(item, int):
if item < 0:
item += len(self._items)
i = item
elif isinstance(item, str):
for i, each in enumerate(self._items):
if each.name == item:
break
else:
raise ValueError(f"No item named {item}")
if i < 0:
raise ValueError(f"Item {item} not found")
item = self._items.pop(i)
self._graphics.removeItem(item.native)
def _reorder(self):
for i, item in enumerate(self._items):
item.zorder = i
return None
def _find_unique_name(self, prefix: str):
existing_names = [item.name for item in self._items]
name = prefix
i = 0
while name in existing_names:
name = f"{prefix}-{i}"
i += 1
return name
class HasViewBox(HasDataItems):
def __init__(self, viewbox: pg.ViewBox):
self._viewbox = viewbox
self._items: list[LayerItem] = []
# prepare mouse event
self.mouse_click_callbacks = []
# This ROI is not editable. Mouse click event will use it to determine
# | |
<reponame>verilylifesciences/analysis-py-utils
# Copyright 2019 Verily Life Sciences Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for interacting with BigQuery.
Sample usage:
client = bq.Client(project_id)
result = client.get_query_results(query)
"""
# Workaround for https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2366
from __future__ import absolute_import
import csv
import json
import logging
import os
import time
from collections import OrderedDict
import six
import subprocess
from six.moves import cStringIO
from google.cloud.exceptions import BadRequest
from typing import Any, Dict, List, Optional, Tuple, Union # noqa: F401
from google.api_core.exceptions import NotFound
from google.cloud import bigquery, storage
from google.cloud.bigquery.dataset import Dataset, DatasetReference
from google.cloud.bigquery.job import ExtractJobConfig, LoadJobConfig, QueryJobConfig
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table, TableReference
from verily.bigquery_wrapper.bq_base import (MAX_TABLES, BigqueryBaseClient, BQ_PATH_DELIMITER,
validate_query_job, DEFAULT_TIMEOUT_SEC,
DEFAULT_RETRY_FOR_API_CALLS,
_transient_string_in_exception_message,
DEFAULT_RETRY_FOR_ASYNC_JOBS)
# Bigquery has a limit of max 10000 rows to insert per request
MAX_ROWS_TO_INSERT = 10000
# When exporting data to multiple files, BQ pads the shard number to 12 digits. See:
# https://cloud.google.com/bigquery/docs/exporting-data#exporting_data_into_one_or_more_files
MULTIFILE_EXPORT_PAD_LENGTH = 12
class Client(BigqueryBaseClient):
"""Stores credentials and pointers to a BigQuery project.
Args:
project_id: The id of the project to associate with the client.
default_dataset: Optional. The default dataset to use for operations if none is specified.
maximum_billing_tier: Optional. The maximum billing tier to use for operations.
max_wait_secs: Optional. The amount of time to keep retrying operations, or to wait on an
operation to finish. If not set, will default to DEFAULT_TIMEOUT_SEC
alternate_bq_client_class: Optional. If provided, the client will use this class to create an
instance rather than the Google one.
"""
def __init__(self, project_id, default_dataset=None, maximum_billing_tier=None,
max_wait_secs=DEFAULT_TIMEOUT_SEC, alternate_bq_client_class=None):
self.gclient = (alternate_bq_client_class or bigquery.Client)(project=project_id)
self.max_wait_secs = max_wait_secs
# Retry object for errors encountered in making API calls (executing jobs, etc.)
self.default_retry_for_api_calls = DEFAULT_RETRY_FOR_API_CALLS.with_deadline(max_wait_secs)
# Retry object for errors encountered while polling jobs in progress.
# See https://github.com/googleapis/google-cloud-python/issues/6301
self.default_retry_for_async_jobs = DEFAULT_RETRY_FOR_ASYNC_JOBS.with_deadline(
max_wait_secs)
super(Client, self).__init__(project_id, default_dataset, maximum_billing_tier)
def get_delimiter(self):
""" Returns the delimiter used to separate project, dataset, and table in a table path. """
return BQ_PATH_DELIMITER
@classmethod
def _wait_for_job(self, query_job, query, max_wait_secs=DEFAULT_TIMEOUT_SEC):
# type: (QueryJob, str, Optional[int]) -> Iterator[Row]
"""Waits for a query job to finish and returns the result.
Surfaces any validation errors along with the offending query.
I have filed a feature request that printing the query be the default behavior.
https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5408
Args:
query_job: The QueryJob to wait for.
query: The string query that the QueryJob is querying.
max_wait_secs: The maximum time to wait for the job to finish.
Returns:
The result of the query as an iterator of Row objects.
"""
# Sleep for 1 second to make sure that the started job has had time to propagate validation
# errors.
time.sleep(1)
validate_query_job(query_job, query)
# Block until the job is done and return the result.
return query_job.result(timeout=max_wait_secs)
def get_query_results(self, query, use_legacy_sql=False, max_wait_secs=None):
# type: (str, Optional[bool], Optional[int]) -> List[Tuple[Any]]
"""Returns a list or rows, each of which is a tuple of values.
Args:
query: A string with a complete SQL query.
use_legacy_sql: Whether to use legacy SQL
max_wait_secs: The maximum number of seconds to wait for the query to complete. If not
set, the class default will be used.
Returns:
A list of tuples of values.
"""
config = QueryJobConfig()
if self.maximum_billing_tier:
config.maximum_billing_tier = self.maximum_billing_tier
config.use_legacy_sql = use_legacy_sql
query_job = self._run_async_query(query, job_config=config)
rows = self._wait_for_job(query_job, query,
max_wait_secs=max_wait_secs or self.max_wait_secs)
if query_job.errors:
logging.warning('Errors in get_query_results: {}'.format(query_job.errors))
return [x.values() for x in list(rows)]
def get_table_reference_from_path(self, table_path):
# type: (str) -> TableReference
"""
Returns a TableReference for a given path to a BigQuery table.
Args:
table_path: A BigQuery table path in the form project.dataset.table
Returns:
A TableReference for the table specified by the path
"""
project, dataset, table = self.parse_table_path(table_path)
dataset_ref = DatasetReference(project, dataset)
return TableReference(dataset_ref, table)
def create_table_from_query(self,
query, # type: str
table_path, # type: str
write_disposition='WRITE_EMPTY', # type: Optional[str]
use_legacy_sql=False, # type: Optional[bool]
max_wait_secs=None, # type: Optional[int]
expected_schema=None # type: Optional[List[SchemaField]]
):
# type: (...) -> None
"""Creates a table in BigQuery from a specified query.
Args:
query: The query to run.
table_path: The path to the table (in the client's project) to write
the results to.
write_disposition: Specifies behavior if table already exists. See options here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs under
configuration.query.writeDisposition
use_legacy_sql: Whether the query is written in standard or legacy sql.
max_wait_secs: Seconds to wait for the query before timing out. If not
set, the class default will be used.
expected_schema: The expected schema of the resulting table; unused in this implementation
"""
if write_disposition not in ['WRITE_TRUNCATE', 'WRITE_APPEND', 'WRITE_EMPTY']:
raise ValueError('write_disposition must be one of WRITE_TRUNCATE, '
'WRITE_APPEND, or WRITE_EMPTY')
config = QueryJobConfig()
if self.maximum_billing_tier:
config.maximum_billing_tier = self.maximum_billing_tier
config.use_legacy_sql = use_legacy_sql
config.write_disposition = write_disposition
config.allow_large_results = True
config.destination = self.get_table_reference_from_path(table_path)
query_job = self._run_async_query(query, job_config=config)
return self._wait_for_job(query_job, query,
max_wait_secs=max_wait_secs or self.max_wait_secs)
def create_tables_from_dict(self,
table_names_to_schemas, # type: Dict[str, List[SchemaField]]
dataset_id=None, # type: Optional[str]
replace_existing_tables=False, # type: Optional[bool]
):
# type: (...) -> None
"""Creates a set of tables from a dictionary of table names to their schemas.
Args:
table_names_to_schemas: A dictionary of:
key: The table name.
value: A list of SchemaField objects.
dataset_id: The dataset in which to create tables. If not specified, use default dataset.
replace_existing_tables: If True, delete and re-create tables. Otherwise, checks to see
if any of the requested tables exist. If they do, it will raise a RuntimeError.
Raises:
RuntimeError if replace_existing_tables is False and any of the tables requested for
creation already exist
"""
dataset_id = dataset_id or self.default_dataset_id
dataset_ref = DatasetReference(self.project_id, dataset_id)
# If the flag isn't set to replace existing tables, raise an error if any tables we're
# trying to create already exist.
if not replace_existing_tables:
self._raise_if_tables_exist(table_names_to_schemas.keys(), dataset_id)
for name, schema in six.iteritems(table_names_to_schemas):
table_ref = TableReference(dataset_ref, name)
# Use the Table object so it retains its schema.
table = bigquery.Table(table_ref, schema=schema)
if self.table_exists(table) and replace_existing_tables:
self.delete_table(table)
self.create_table(table)
def create_dataset_by_name(self, name, expiration_hours=None):
# type: (str, Optional[float]) -> None
"""Create a new dataset within the current project.
Args:
name: The name of the new dataset.
expiration_hours: The default expiration time for tables within the dataset.
"""
if name not in self.get_datasets():
# Initialize the Dataset instead of passing a reference so we can set expiration hours.
dataset = Dataset(DatasetReference(self.project_id, str(name)))
if expiration_hours:
dataset.default_table_expiration_ms = expiration_hours * (60 * 60 * 1000)
self.create_dataset(dataset)
else:
logging.warning('Dataset {} already exists.'.format(name))
def delete_dataset_by_name(self, name, delete_all_tables=False):
# type: (str, bool) -> None
"""Delete a dataset within the current project.
Args:
name: The name of the dataset to delete.
delete_all_tables: If True, will delete all tables in the dataset before attempting to
delete the dataset. You can't delete a dataset until it contains no tables.
Raises:
RuntimeError if there are still tables in the dataset and you try to delete it (with
delete_all_tables set to False)
"""
dataset_id = str(name)
dataset_ref = DatasetReference(self.project_id, dataset_id)
self.delete_dataset(dataset_ref, delete_all_tables)
def delete_table_by_name(self, table_path):
# type: (str) -> None
"""Delete a table.
Args:
table_path: A string of the form '<dataset id>.<table name>' or
'<project id>.<dataset_id>.<table_name>'
"""
self.delete_table(self.get_table_reference_from_path(table_path))
def dataset_exists_with_name(self, dataset_name):
# type: (str) -> bool
"""Determines whether a dataset exists with the given name.
Args:
dataset_name: The name of the dataset to check.
Returns:
True if the dataset exists in this client's project, False otherwise.
"""
return self.dataset_exists(DatasetReference(self.project_id, dataset_name))
def table_exists_with_name(self, table_path):
# type: (str) -> bool
"""Determines whether a table exists at the given table path.
Args:
table_path: The table path of the table to check. Uses the default dataset ID if a
dataset is not specified as part of the table path.
Returns:
True if the table exists at the given path, False otherwise.
"""
return self.table_exists(self.get_table_reference_from_path(table_path))
def tables(self, dataset_id):
# type: (str) -> List[str]
"""Returns a list of | |
used are::
network_id -- string -- TODO: type description here.
Example:
profile_id -- string -- TODO: type description here.
Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"),
profile_id=options.get("profile_id"))
# Prepare query URL
_url_path = '/networks/{networkId}/sm/profile/clarity/{profileId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None),
'profileId': options.get('profile_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.delete(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def create_network_sm_profile_umbrella(self,
options=dict()):
"""Does a POST request to /networks/{networkId}/sm/profile/umbrella.
Create a new profile containing a Cisco Umbrella payload
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
network_id -- string -- TODO: type description here.
Example:
create_network_sm_profile_umbrella --
CreateNetworkSmProfileUmbrellaModel -- TODO: type
description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"),
create_network_sm_profile_umbrella=options.get("create_network_sm_profile_umbrella"))
# Prepare query URL
_url_path = '/networks/{networkId}/sm/profile/umbrella'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('create_network_sm_profile_umbrella')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def update_network_sm_profile_umbrella(self,
options=dict()):
"""Does a PUT request to /networks/{networkId}/sm/profile/umbrella/{profileId}.
Update an existing profile containing a Cisco Umbrella payload
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
network_id -- string -- TODO: type description here.
Example:
profile_id -- string -- TODO: type description here.
Example:
update_network_sm_profile_umbrella --
UpdateNetworkSmProfileUmbrellaModel -- TODO: type
description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"),
profile_id=options.get("profile_id"))
# Prepare query URL
_url_path = '/networks/{networkId}/sm/profile/umbrella/{profileId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None),
'profileId': options.get('profile_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('update_network_sm_profile_umbrella')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def add_network_sm_profile_umbrella(self,
options=dict()):
"""Does a POST request to /networks/{networkId}/sm/profile/umbrella/{profileId}.
Add a Cisco Umbrella payload to an existing profile
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
network_id -- string -- TODO: type description here.
Example:
profile_id -- string -- TODO: type description here.
Example:
add_network_sm_profile_umbrella --
AddNetworkSmProfileUmbrellaModel -- TODO: type
description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"),
profile_id=options.get("profile_id"),
add_network_sm_profile_umbrella=options.get("add_network_sm_profile_umbrella"))
# Prepare query URL
_url_path = '/networks/{networkId}/sm/profile/umbrella/{profileId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None),
'profileId': options.get('profile_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('add_network_sm_profile_umbrella')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def get_network_sm_profile_umbrella(self,
options=dict()):
"""Does a GET request to /networks/{networkId}/sm/profile/umbrella/{profileId}.
Get details for a Cisco Umbrella payload
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
network_id -- string -- TODO: type description here.
Example:
profile_id -- string -- TODO: type description here.
Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"),
profile_id=options.get("profile_id"))
# Prepare query URL
_url_path = '/networks/{networkId}/sm/profile/umbrella/{profileId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None),
'profileId': options.get('profile_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def delete_network_sm_profile_umbrella(self,
options=dict()):
"""Does a DELETE request to /networks/{networkId}/sm/profile/umbrella/{profileId}.
Delete a Cisco Umbrella payload. Deletes the entire profile if it's
empty after removing the payload
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
network_id -- string -- TODO: type description here.
Example:
profile_id -- string -- TODO: type description here.
Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"),
profile_id=options.get("profile_id"))
# Prepare query URL
_url_path = '/networks/{networkId}/sm/profile/umbrella/{profileId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None),
'profileId': options.get('profile_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.delete(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def get_network_sm_profiles(self,
network_id):
"""Does a GET request to /networks/{networkId}/sm/profiles.
List all the profiles in the network
Args:
network_id (string): TODO: type description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=network_id)
# Prepare query URL
_url_path = '/networks/{networkId}/sm/profiles'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': network_id
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def get_network_sm_user_device_profiles(self,
options=dict()):
"""Does a GET request to /networks/{networkId}/sm/user/{userId}/deviceProfiles.
Get the profiles associated with a user
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint | |
<filename>src/isle/h5io.py
r"""!\file
Routines for working with HDF5.
"""
from logging import getLogger
from pathlib import Path
from itertools import chain
import yaml
import h5py as h5
import numpy as np
from . import Vector, isleVersion, pythonVersion, blazeVersion, pybind11Version
from .random import readStateH5
from .collection import listToSlice, parseSlice, subslice, normalizeSlice
def empty(dtype):
return h5.Empty(dtype=dtype)
def createH5Group(base, name):
r"""!
Create a new HDF5 group if it does not yet exist.
\param base H5 group in which to create the new group.
\param name Name of the new group relative to base.
\returns The (potentially newly created) group.
"""
if name in base:
if isinstance(base[name], h5.Group):
return base[name] # there is already a group with that name
# something else than a group with that name
raise ValueError(("Cannot create group '{}', another object with the same"\
+" name already exists in '{}/{}'").format(name, base.filename, base.name))
# does not exists yet
return base.create_group(name)
def writeDict(h5group, dictionary):
"""!
Write a `dict` into an HDF5 group by storing each dict element as a dataset.
"""
for key, value in dictionary.items():
h5group[key] = value
def loadDict(h5group):
"""!
Load all datasets from an HDF5 group into a dictionary.
"""
return {key: dset[()] for key, dset in h5group.items()}
def loadString(dset):
"""!
Load a string from an HDF5 dataset and return as a Python str object.
Since version 3.0, h5py loads UTF8 strings as `bytes` objects.
This function provides uniform behavior across h5py 2.0 and h5py 3.0 by
always returning `str` objects.
"""
s = dset[()]
if isinstance(s, str):
return s
return s.decode("utf-8")
def writeMetadata(fname, lattice, params, makeActionSrc):
"""!
Write metadata to HDF5 file.
Overwrites any existing datasets.
"""
with h5.File(str(fname), "a") as outf:
metaGrp = createH5Group(outf, "meta")
metaGrp["lattice"] = yaml.dump(lattice)
metaGrp["params"] = yaml.dump(params)
metaGrp["action"] = makeActionSrc
vgrp = createH5Group(metaGrp, "version")
vgrp["isle"] = str(isleVersion)
vgrp["python"] = str(pythonVersion)
vgrp["blaze"] = str(blazeVersion)
vgrp["pybind11"] = str(pybind11Version)
def readMetadata(fname):
r"""!
Read metadata on ensemble from HDF5 file.
\returns Lattice, parameters, makeAction (source code of function)
"""
if isinstance(fname, (tuple, list)):
fname = fname[0]
with h5.File(str(fname), "r") as inf:
try:
metaGrp = inf["meta"]
lattice = yaml.safe_load(loadString(metaGrp["lattice"]))
params = yaml.safe_load(loadString(metaGrp["params"]))
makeActionSrc = loadString(metaGrp["action"])
versions = {name: loadString(val) for name, val in metaGrp["version"].items()}
except KeyError as exc:
getLogger(__name__).error("Cannot read metadata from file %s: %s",
str(fname), str(exc))
raise
return lattice, params, makeActionSrc, versions
def initializeNewFile(fname, overwrite, lattice, params, makeActionSrc, extraGroups=[]):
"""!
Prepare the output file by storing program versions, metadata, and creating groups.
If `overwrite==False` the file must not exist. If it is True, the file is removed if it exists.
"""
fname = Path(fname)
if fname.exists():
if overwrite:
fname.unlink()
getLogger(__name__).info("Output file %s exists -- overwriting", fname)
else:
getLogger(__name__).error("Output file %s exists and not allowed to overwrite", fname)
raise RuntimeError("Output file exists")
with h5.File(str(fname), "w-") as h5f:
for group in extraGroups:
createH5Group(h5f, group)
writeMetadata(fname, lattice, params, makeActionSrc)
def writeTrajectory(h5group, label, stage):
r"""!
Write a trajectory (endpoint) to a HDF5 group.
Creates a new group with name 'label' and stores the EvolutionStage.
\param h5group Base HDF5 group to store trajectory in.
\param label Name of the subgroup of `h5group` to write to.
The subgroup must not already exist.
\param stage EvolutionStage to save.
\returns The newly created HDF5 group containing the trajectory.
"""
grp = h5group.create_group(str(label))
stage.save(grp)
return grp
def writeCheckpoint(h5group, label, rng, trajGrpName, evolver, evolverManager):
r"""!
Write a checkpoint to a HDF5 group.
Creates a new group with name 'label' and stores RNG state
and a soft link to the trajectory for this checkpoint.
\param h5group Base HDF5 group to store trajectory in.
\param label Name of the subgroup of `h5group` to write to.
The subgroup must not already exist.
\param rng Random number generator whose state to save in the checkpoint.
\param trajGrpName Name of the HDF5 group containing the trajectory this
checkpoint corresponds to.
\param evolver Evolver used to make the trajectory at this checkpoint.
\param evolverManager Instance of EvolverManager to handle saving the evolver.
\returns The newly created HDF5 group containing the checkpoint.
"""
grp = h5group.create_group(str(label))
rng.writeH5(grp.create_group("rngState"))
grp["cfg"] = h5.SoftLink(trajGrpName)
evolverManager.save(evolver, grp.create_group("evolver"))
return grp
def loadCheckpoint(h5group, label, evolverManager, action, lattice):
r"""!
Load a checkpoint from a HDF5 group.
\param h5group Base HDF5 group containing checkpoints.
\param label Name of the subgroup of `h5group` to read from.
\param evolverManager A EvolverManager to load the evolver
including its type.
\param action Action to construct the evolver with.
\param lattice Lattice to construct the evolver with.
\returns (RNG, HDF5 group of configuration, evolver)
"""
grp = h5group[str(label)]
rng = readStateH5(grp["rngState"])
cfgGrp = grp["cfg"]
evolver = evolverManager.load(grp["evolver"], action, lattice, rng)
return rng, cfgGrp, evolver
def loadConfiguration(h5group, trajIdx=-1, path="configuration"):
r"""!
Load a configuration from HDF5.
\param h5group Base HDF5 group. Configurations must be located at `h5group[path]`.
\param trajIdx Trajectory index of the configuration to load.
This is the number under which the configuration is stored, not a
plain index into the array of all configurations.
\param path Path under `h5group` that contains configurations.
\returns (configuration, action value)
"""
configs = loadList(h5group[path])
# get proper positive index
idx = configs[-1][0]+trajIdx+1 if trajIdx < 0 else trajIdx
# get the configuration group with the given index
cfgGrp = next(pair[1] for pair in loadList(h5group[path]) if pair[0] == idx)
return Vector(cfgGrp["phi"][()]), cfgGrp["actVal"][()]
def loadList(h5group, convert=int):
r"""!
Load a list of objects from a HDF5 group.
All entries in `h5group` must have names convertible to `int` by `convert`.
\param h5group HDF5 group to load from. All elements in that group must be
named such that they can be processed by `convert`.
\param convert Function that takes a group entry name and returns an int.
\returns List of pairs (key, obj) where key is the name of each object converted to `int`.
"""
return sorted(map(lambda p: (convert(p[0]), p[1]), h5group.items()),
key=lambda item: item[0])
def loadActionValuesFrom(h5obj, full=False, base="/"):
r"""!
Load values of the action from a HDF5 file given via a HDF5 object in that file.
Reads the action from dataset `/action/action` if it exists.
Otherwise, read action from saved configurations.
\param fname An arbitrary HDF5 object in the file to read the action from.
\param full If True, always read from saved configurations as `/action/action` might
contain only a subset of all actions.
\param base Path in HDF5 file under which the action is stored.
\returns (action, configRange) where
- action: Numpy array of values of the action.
- configRange: `slice` indicating the range of configurations
the action was loaded for.
\throws RuntimeError if neither `/action/action` nor `/configuration` exist in the file.
"""
grp = h5obj.file[base]
action = None
if not full and "action" in grp:
action = grp["action/action"][()]
cRange = normalizeSlice(parseSlice(grp["action"].attrs["configurations"],
minComponents=3),
0, action.shape[0])
if not full and "weights" in grp:
action = grp["weights/actVal"][()]
cRange = normalizeSlice(parseSlice(grp["weights"].attrs["configurations"],
minComponents=3),
0, action.shape[0])
if action is None and "configuration" in grp:
indices, groups = zip(*loadList(grp["configuration"]))
action = np.array([grp["actVal"][()] for grp in groups])
cRange = listToSlice(indices)
if action is None:
getLogger(__name__).error("Cannot load action, no configurations or "
"separate action found in file %s.", grp.file.filename)
raise RuntimeError("No action found in file")
return action, cRange
def loadActionValues(fname, full=False, base="/"):
r"""!
Load values of the action from a HDF5 file.
Reads the action from dataset `/action/action` if it exists.
Otherwise, read action from saved configurations.
\param fname Name of the file to load action from.
\param full If True, always read from saved configurations as `/action/action` might
contain only a subset of all actions.
\param base Path in HDF5 file under which the action is stored.
\returns (action, configRange) where
- action: Numpy array of values of the action.
- configRange: `slice` indicating the range of configurations
the action was loaded for.
\throws RuntimeError if neither `/action/action` nor `/configuration` exist in the file.
"""
with h5.File(fname, "r") as h5f:
return loadActionValuesFrom(h5f, full, base)
def loadActionWeightsFor(dset, base="/"):
r"""!
Load the weights from the imaginary part of the action for a measurement result.
The weights are loaded based on the 'configurations' attribute stored in the
parent group of `dset`.
This requires the attribute to be stored properly (no `None`) and the file to
| |
server
def test_vsis3_opendir():
if gdaltest.webserver_port == 0:
pytest.skip()
# Unlimited depth
handler = webserver.SequentialHandler()
handler.add('GET', '/vsis3_opendir/', 200, {'Content-type': 'application/xml'},
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult>
<Prefix/>
<Marker/>
<Contents>
<Key>test.txt</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>40</Size>
</Contents>
<Contents>
<Key>subdir/</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>0</Size>
</Contents>
<Contents>
<Key>subdir/test.txt</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>5</Size>
</Contents>
</ListBucketResult>
""")
with webserver.install_http_handler(handler):
d = gdal.OpenDir('/vsis3/vsis3_opendir')
assert d is not None
entry = gdal.GetNextDirEntry(d)
assert entry.name == 'test.txt'
assert entry.size == 40
assert entry.mode == 32768
assert entry.mtime == 1
entry = gdal.GetNextDirEntry(d)
assert entry.name == 'subdir'
assert entry.mode == 16384
entry = gdal.GetNextDirEntry(d)
assert entry.name == 'subdir/test.txt'
entry = gdal.GetNextDirEntry(d)
assert entry is None
gdal.CloseDir(d)
# Depth = 0
handler = webserver.SequentialHandler()
handler.add('GET', '/vsis3_opendir/?delimiter=%2F', 200, {'Content-type': 'application/xml'},
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult>
<Prefix/>
<Marker/>
<Contents>
<Key>test.txt</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>40</Size>
</Contents>
<CommonPrefixes>
<Prefix>subdir/</Prefix>
</CommonPrefixes>
</ListBucketResult>
""")
with webserver.install_http_handler(handler):
d = gdal.OpenDir('/vsis3/vsis3_opendir', 0)
assert d is not None
entry = gdal.GetNextDirEntry(d)
assert entry.name == 'test.txt'
assert entry.size == 40
assert entry.mode == 32768
assert entry.mtime == 1
entry = gdal.GetNextDirEntry(d)
assert entry.name == 'subdir'
assert entry.mode == 16384
entry = gdal.GetNextDirEntry(d)
assert entry is None
gdal.CloseDir(d)
# Depth = 1
handler = webserver.SequentialHandler()
handler.add('GET', '/vsis3_opendir/?delimiter=%2F', 200, {'Content-type': 'application/xml'},
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult>
<Prefix/>
<Marker/>
<Contents>
<Key>test.txt</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>40</Size>
</Contents>
<CommonPrefixes>
<Prefix>subdir/</Prefix>
</CommonPrefixes>
</ListBucketResult>
""")
with webserver.install_http_handler(handler):
d = gdal.OpenDir('/vsis3/vsis3_opendir', 1)
assert d is not None
entry = gdal.GetNextDirEntry(d)
assert entry.name == 'test.txt'
assert entry.size == 40
assert entry.mode == 32768
assert entry.mtime == 1
entry = gdal.GetNextDirEntry(d)
assert entry.name == 'subdir'
assert entry.mode == 16384
handler = webserver.SequentialHandler()
handler.add('GET', '/vsis3_opendir/?delimiter=%2F&prefix=subdir%2F', 200, {'Content-type': 'application/xml'},
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult>
<Prefix>subdir/</Prefix>
<Marker/>
<Contents>
<Key>subdir/test.txt</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>5</Size>
</Contents>
</ListBucketResult>
""")
with webserver.install_http_handler(handler):
entry = gdal.GetNextDirEntry(d)
assert entry.name == 'subdir/test.txt'
entry = gdal.GetNextDirEntry(d)
assert entry is None
gdal.CloseDir(d)
###############################################################################
# Test simple PUT support with a fake AWS server
def test_vsis3_4():
if gdaltest.webserver_port == 0:
pytest.skip()
with webserver.install_http_handler(webserver.SequentialHandler()):
with gdaltest.error_handler():
f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3', 'wb')
assert f is None
handler = webserver.SequentialHandler()
handler.add('GET', '/s3_fake_bucket3/empty_file.bin', 200, {'Connection': 'close'}, 'foo')
with webserver.install_http_handler(handler):
assert gdal.VSIStatL('/vsis3/s3_fake_bucket3/empty_file.bin').size == 3
# Empty file
handler = webserver.SequentialHandler()
def method(request):
if request.headers['Content-Length'] != '0':
sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers))
request.send_response(400)
return
request.send_response(200)
request.send_header('Content-Length', 0)
request.end_headers()
handler.add('PUT', '/s3_fake_bucket3/empty_file.bin', custom_method=method)
with webserver.install_http_handler(handler):
f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/empty_file.bin', 'wb')
assert f is not None
gdal.ErrorReset()
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() == ''
handler = webserver.SequentialHandler()
handler.add('GET', '/s3_fake_bucket3/empty_file.bin', 200, {'Connection': 'close'}, '')
with webserver.install_http_handler(handler):
assert gdal.VSIStatL('/vsis3/s3_fake_bucket3/empty_file.bin').size == 0
# Invalid seek
handler = webserver.SequentialHandler()
with webserver.install_http_handler(handler):
f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/empty_file.bin', 'wb')
assert f is not None
with gdaltest.error_handler():
ret = gdal.VSIFSeekL(f, 1, 0)
assert ret != 0
gdal.VSIFCloseL(f)
# Invalid read
handler = webserver.SequentialHandler()
with webserver.install_http_handler(handler):
f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/empty_file.bin', 'wb')
assert f is not None
with gdaltest.error_handler():
ret = gdal.VSIFReadL(1, 1, f)
assert not ret
gdal.VSIFCloseL(f)
# Error case
handler = webserver.SequentialHandler()
handler.add('PUT', '/s3_fake_bucket3/empty_file_error.bin', 403)
with webserver.install_http_handler(handler):
f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/empty_file_error.bin', 'wb')
assert f is not None
gdal.ErrorReset()
with gdaltest.error_handler():
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() != ''
# Nominal case
gdal.NetworkStatsReset()
with gdaltest.config_option('CPL_VSIL_NETWORK_STATS_ENABLED', 'YES'):
with webserver.install_http_handler(webserver.SequentialHandler()):
f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/another_file.bin', 'wb')
assert f is not None
assert gdal.VSIFSeekL(f, gdal.VSIFTellL(f), 0) == 0
assert gdal.VSIFSeekL(f, 0, 1) == 0
assert gdal.VSIFSeekL(f, 0, 2) == 0
assert gdal.VSIFWriteL('foo', 1, 3, f) == 3
assert gdal.VSIFSeekL(f, gdal.VSIFTellL(f), 0) == 0
assert gdal.VSIFWriteL('bar', 1, 3, f) == 3
handler = webserver.SequentialHandler()
def method(request):
if request.headers['Content-Length'] != '6':
sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers))
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
request.wfile.write('HTTP/1.1 100 Continue\r\n\r\n'.encode('ascii'))
content = request.rfile.read(6).decode('ascii')
if content != 'foobar':
sys.stderr.write('Did not get expected content: %s\n' % content)
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
request.send_response(200)
request.send_header('Content-Length', 0)
request.end_headers()
handler.add('PUT', '/s3_fake_bucket3/another_file.bin', custom_method=method)
gdal.ErrorReset()
with webserver.install_http_handler(handler):
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() == ''
j = json.loads(gdal.NetworkStatsGetAsSerializedJSON())
#print(j)
assert j == {
"methods": {
"PUT": {
"count": 1,
"uploaded_bytes": 6
}
},
"handlers": {
"vsis3": {
"files": {
"/vsis3/s3_fake_bucket3/another_file.bin": {
"methods": {
"PUT": {
"count": 1,
"uploaded_bytes": 6
}
},
"actions": {
"Write": {
"methods": {
"PUT": {
"count": 1,
"uploaded_bytes": 6
}
}
}
}
}
},
"methods": {
"PUT": {
"count": 1,
"uploaded_bytes": 6
}
}
}
}
}
gdal.NetworkStatsReset()
# Redirect case
with webserver.install_http_handler(webserver.SequentialHandler()):
f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/redirect', 'wb')
assert f is not None
assert gdal.VSIFWriteL('foobar', 1, 6, f) == 6
handler = webserver.SequentialHandler()
def method(request):
request.protocol_version = 'HTTP/1.1'
if request.headers['Authorization'].find('us-east-1') >= 0:
request.send_response(400)
response = '<?xml version="1.0" encoding="UTF-8"?><Error><Message>bla</Message><Code>AuthorizationHeaderMalformed</Code><Region>us-west-2</Region></Error>'
response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response)
request.send_header('Content-type', 'application/xml')
request.send_header('Transfer-Encoding', 'chunked')
request.end_headers()
request.wfile.write(response.encode('ascii'))
elif request.headers['Authorization'].find('us-west-2') >= 0:
if request.headers['Content-Length'] != '6':
sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers))
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
request.wfile.write('HTTP/1.1 100 Continue\r\n\r\n'.encode('ascii'))
content = request.rfile.read(6).decode('ascii')
if content != 'foobar':
sys.stderr.write('Did not get expected content: %s\n' % content)
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
request.send_response(200)
request.send_header('Content-Length', 0)
request.end_headers()
else:
sys.stderr.write('Bad headers: %s\n' % str(request.headers))
request.send_response(403)
request.send_header('Content-Length', 0)
request.end_headers()
handler.add('PUT', '/s3_fake_bucket3/redirect', custom_method=method)
handler.add('PUT', '/s3_fake_bucket3/redirect', custom_method=method)
gdal.ErrorReset()
with webserver.install_http_handler(handler):
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() == ''
###############################################################################
# Test simple PUT support with retry logic
def test_vsis3_write_single_put_retry():
if gdaltest.webserver_port == 0:
pytest.skip()
with gdaltest.config_options({'GDAL_HTTP_MAX_RETRY': '2',
'GDAL_HTTP_RETRY_DELAY': '0.01'}):
with webserver.install_http_handler(webserver.SequentialHandler()):
f = gdal.VSIFOpenL('/vsis3/s3_fake_bucket3/put_with_retry.bin', 'wb')
assert f is not None
assert gdal.VSIFWriteL('foo', 1, 3, f) == 3
handler = webserver.SequentialHandler()
def method(request):
if request.headers['Content-Length'] != '3':
sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers))
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
request.wfile.write('HTTP/1.1 100 Continue\r\n\r\n'.encode('ascii'))
content = request.rfile.read(3).decode('ascii')
if content != 'foo':
sys.stderr.write('Did not get expected content: %s\n' % content)
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
request.send_response(200)
request.send_header('Content-Length', 0)
request.end_headers()
handler.add('PUT', '/s3_fake_bucket3/put_with_retry.bin', 502)
handler.add('PUT', '/s3_fake_bucket3/put_with_retry.bin', custom_method=method)
with gdaltest.error_handler():
with webserver.install_http_handler(handler):
gdal.VSIFCloseL(f)
###############################################################################
# Test simple DELETE support with a fake AWS server
def test_vsis3_5():
if gdaltest.webserver_port == 0:
pytest.skip()
with webserver.install_http_handler(webserver.SequentialHandler()):
with gdaltest.error_handler():
ret = gdal.Unlink('/vsis3/foo')
assert ret != 0
handler = webserver.SequentialHandler()
handler.add('GET', '/s3_delete_bucket/delete_file', 200, {'Connection': 'close'}, 'foo')
with webserver.install_http_handler(handler):
assert gdal.VSIStatL('/vsis3/s3_delete_bucket/delete_file').size == 3
handler = webserver.SequentialHandler()
with webserver.install_http_handler(handler):
assert gdal.VSIStatL('/vsis3/s3_delete_bucket/delete_file').size == 3
handler = webserver.SequentialHandler()
handler.add('DELETE', '/s3_delete_bucket/delete_file', 204)
with webserver.install_http_handler(handler):
ret = gdal.Unlink('/vsis3/s3_delete_bucket/delete_file')
assert ret == 0
handler = webserver.SequentialHandler()
handler.add('GET', '/s3_delete_bucket/delete_file', 404, {'Connection': 'close'})
handler.add('GET', '/s3_delete_bucket/?delimiter=%2F&max-keys=100&prefix=delete_file%2F', 404, {'Connection': 'close'})
with webserver.install_http_handler(handler):
assert gdal.VSIStatL('/vsis3/s3_delete_bucket/delete_file') is None
handler = webserver.SequentialHandler()
handler.add('GET', '/s3_delete_bucket/delete_file_error', 200)
handler.add('DELETE', '/s3_delete_bucket/delete_file_error', 403)
with webserver.install_http_handler(handler):
with gdaltest.error_handler():
ret = gdal.Unlink('/vsis3/s3_delete_bucket/delete_file_error')
assert ret != 0
handler = webserver.SequentialHandler()
handler.add('GET', '/s3_delete_bucket/redirect', 200)
def method(request):
request.protocol_version = 'HTTP/1.1'
if request.headers['Authorization'].find('us-east-1') >= 0:
request.send_response(400)
response = '<?xml version="1.0" encoding="UTF-8"?><Error><Message>bla</Message><Code>AuthorizationHeaderMalformed</Code><Region>us-west-2</Region></Error>'
response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response)
request.send_header('Content-type', 'application/xml')
request.send_header('Transfer-Encoding', 'chunked')
request.end_headers()
request.wfile.write(response.encode('ascii'))
elif request.headers['Authorization'].find('us-west-2') >= 0:
request.send_response(204)
request.send_header('Content-Length', 0)
request.end_headers()
else:
sys.stderr.write('Bad headers: %s\n' % str(request.headers))
request.send_response(403)
request.send_header('Content-Length', 0)
request.end_headers()
handler.add('DELETE', '/s3_delete_bucket/redirect', custom_method=method)
handler.add('DELETE', '/s3_delete_bucket/redirect', custom_method=method)
with webserver.install_http_handler(handler):
ret = gdal.Unlink('/vsis3/s3_delete_bucket/redirect')
assert ret == 0
###############################################################################
# Test DeleteObjects with a fake AWS server
def test_vsis3_unlink_batch():
if gdaltest.webserver_port == 0:
pytest.skip()
def method(request):
if request.headers['Content-MD5'] != 'Ze0X4LdlTwCsT+WpNxD9FA==':
sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers))
request.send_response(403)
return
content = request.rfile.read(int(request.headers['Content-Length'])).decode('ascii')
if content != """<?xml version="1.0" encoding="UTF-8"?>
<Delete xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Object>
<Key>foo</Key>
</Object>
<Object>
<Key>bar/baz</Key>
</Object>
</Delete>
""":
sys.stderr.write('Did not get expected content: %s\n' % content)
request.send_response(403)
return
request.protocol_version = 'HTTP/1.1'
request.send_response(200)
response = """<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Deleted><Key>foo</Key></Deleted><Deleted><Key>bar/baz</Key></Deleted></DeleteResult>"""
request.send_header('Content-Length', len(response))
request.send_header('Connection', 'close')
request.end_headers()
request.wfile.write(response.encode('ascii'))
handler = webserver.SequentialHandler()
handler.add('POST', '/unlink_batch/?delete', custom_method=method)
handler.add('POST', '/unlink_batch/?delete', 200, {},
"""<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Deleted><Key>baw</Key></Deleted></DeleteResult>""")
with gdaltest.config_option('CPL_VSIS3_UNLINK_BATCH_SIZE', '2'):
with webserver.install_http_handler(handler):
ret = gdal.UnlinkBatch(['/vsis3/unlink_batch/foo', '/vsis3/unlink_batch/bar/baz', '/vsis3/unlink_batch/baw'])
assert ret
handler = webserver.SequentialHandler()
handler.add('POST', '/unlink_batch/?delete', 200, {},
"""<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Failed><Key>foo</Key></Failed></DeleteResult>""")
with webserver.install_http_handler(handler):
ret = gdal.UnlinkBatch(['/vsis3/unlink_batch/foo'])
assert not ret
###############################################################################
# Test RmdirRecursive() with a fake AWS server
def test_vsis3_rmdir_recursive():
if gdaltest.webserver_port == 0:
pytest.skip()
handler = webserver.SequentialHandler()
handler.add('GET', '/test_rmdir_recursive/?prefix=somedir%2F', 200, {'Content-type': 'application/xml'},
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult>
<Prefix>somedir/</Prefix>
<Marker/>
<Contents>
<Key>somedir/test.txt</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>40</Size>
</Contents>
<Contents>
<Key>somedir/subdir/</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>0</Size>
</Contents>
<Contents>
<Key>somedir/subdir/test.txt</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>5</Size>
</Contents>
</ListBucketResult>
""")
def method(request):
content = request.rfile.read(int(request.headers['Content-Length'])).decode('ascii')
if content != """<?xml version="1.0" encoding="UTF-8"?>
<Delete xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Object>
<Key>somedir/test.txt</Key>
</Object>
<Object>
<Key>somedir/subdir/</Key>
</Object>
</Delete>
""":
sys.stderr.write('Did not get expected content: %s\n' % content)
request.send_response(403)
return
request.protocol_version = 'HTTP/1.1'
request.send_response(200)
response = """<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Deleted><Key>somedir/test.txt</Key></Deleted><Deleted><Key>somedir/subdir/</Key></Deleted></DeleteResult>"""
request.send_header('Content-Length', len(response))
request.send_header('Connection', 'close')
request.end_headers()
request.wfile.write(response.encode('ascii'))
handler.add('POST', '/test_rmdir_recursive/?delete', custom_method=method)
def method(request):
content = request.rfile.read(int(request.headers['Content-Length'])).decode('ascii')
if content != """<?xml version="1.0" encoding="UTF-8"?>
<Delete xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Object>
<Key>somedir/subdir/test.txt</Key>
</Object>
<Object>
<Key>somedir/</Key>
</Object>
</Delete>
""":
sys.stderr.write('Did not get expected content: %s\n' % content)
request.send_response(403)
return
request.protocol_version = 'HTTP/1.1'
request.send_response(200)
response = """<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Deleted><Key>somedir/subdir/test.txt</Key></Deleted><Deleted><Key>somedir/</Key></Deleted></DeleteResult>"""
request.send_header('Content-Length', len(response))
request.send_header('Connection', 'close')
| |
hasattr(self, 'bgp_cer_cidr') and self.bgp_cer_cidr is not None:
_dict['bgp_cer_cidr'] = self.bgp_cer_cidr
if hasattr(self, 'bgp_ibm_asn') and self.bgp_ibm_asn is not None:
_dict['bgp_ibm_asn'] = self.bgp_ibm_asn
if hasattr(self, 'bgp_ibm_cidr') and self.bgp_ibm_cidr is not None:
_dict['bgp_ibm_cidr'] = self.bgp_ibm_cidr
if hasattr(self, 'bgp_status') and self.bgp_status is not None:
_dict['bgp_status'] = self.bgp_status
if hasattr(self, 'change_request') and self.change_request is not None:
_dict['change_request'] = self.change_request
if hasattr(self, 'created_at') and self.created_at is not None:
_dict['created_at'] = datetime_to_string(self.created_at)
if hasattr(self, 'crn') and self.crn is not None:
_dict['crn'] = self.crn
if hasattr(self, 'customer_account_id') and self.customer_account_id is not None:
_dict['customer_account_id'] = self.customer_account_id
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'operational_status') and self.operational_status is not None:
_dict['operational_status'] = self.operational_status
if hasattr(self, 'port') and self.port is not None:
_dict['port'] = self.port.to_dict()
if hasattr(self, 'provider_api_managed') and self.provider_api_managed is not None:
_dict['provider_api_managed'] = self.provider_api_managed
if hasattr(self, 'speed_mbps') and self.speed_mbps is not None:
_dict['speed_mbps'] = self.speed_mbps
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'vlan') and self.vlan is not None:
_dict['vlan'] = self.vlan
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ProviderGateway object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ProviderGateway') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ProviderGateway') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class BgpStatusEnum(Enum):
"""
Gateway BGP status.
The list of enumerated values for this property may expand in the future. Code and
processes using this field must tolerate unexpected values.
"""
ACTIVE = "active"
CONNECT = "connect"
ESTABLISHED = "established"
IDLE = "idle"
class OperationalStatusEnum(Enum):
"""
Gateway operational status.
The list of enumerated values for this property may expand in the future. Code and
processes using this field must tolerate unexpected values.
"""
CONFIGURING = "configuring"
CREATE_PENDING = "create_pending"
CREATE_REJECTED = "create_rejected"
DELETE_PENDING = "delete_pending"
PROVISIONED = "provisioned"
class ProviderGatewayChangeRequest():
"""
ProviderGatewayChangeRequest.
"""
def __init__(self) -> None:
"""
Initialize a ProviderGatewayChangeRequest object.
"""
msg = "Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}".format(
", ".join(['ProviderGatewayChangeRequestProviderGatewayCreate', 'ProviderGatewayChangeRequestProviderGatewayDelete', 'ProviderGatewayChangeRequestProviderGatewayUpdateAttributes']))
raise Exception(msg)
class ProviderGatewayCollection():
"""
A paginated collection of resources.
:attr ProviderGatewayCollectionFirst first: A reference to the first page of
resources.
:attr int limit: The maximum number of resources can be returned by the request.
:attr ProviderGatewayCollectionNext next: (optional) A reference to the next
page of resources; this reference is included for all pages except the last
page.
:attr int total_count: The total number of resources across all pages.
:attr List[ProviderGateway] gateways: Collection of Direct Link gateways.
"""
def __init__(self,
first: 'ProviderGatewayCollectionFirst',
limit: int,
total_count: int,
gateways: List['ProviderGateway'],
*,
next: 'ProviderGatewayCollectionNext' = None) -> None:
"""
Initialize a ProviderGatewayCollection object.
:param ProviderGatewayCollectionFirst first: A reference to the first page
of resources.
:param int limit: The maximum number of resources can be returned by the
request.
:param int total_count: The total number of resources across all pages.
:param List[ProviderGateway] gateways: Collection of Direct Link gateways.
:param ProviderGatewayCollectionNext next: (optional) A reference to the
next page of resources; this reference is included for all pages except the
last page.
"""
self.first = first
self.limit = limit
self.next = next
self.total_count = total_count
self.gateways = gateways
@classmethod
def from_dict(cls, _dict: Dict) -> 'ProviderGatewayCollection':
"""Initialize a ProviderGatewayCollection object from a json dictionary."""
args = {}
if 'first' in _dict:
args['first'] = ProviderGatewayCollectionFirst.from_dict(_dict.get('first'))
else:
raise ValueError('Required property \'first\' not present in ProviderGatewayCollection JSON')
if 'limit' in _dict:
args['limit'] = _dict.get('limit')
else:
raise ValueError('Required property \'limit\' not present in ProviderGatewayCollection JSON')
if 'next' in _dict:
args['next'] = ProviderGatewayCollectionNext.from_dict(_dict.get('next'))
if 'total_count' in _dict:
args['total_count'] = _dict.get('total_count')
else:
raise ValueError('Required property \'total_count\' not present in ProviderGatewayCollection JSON')
if 'gateways' in _dict:
args['gateways'] = [ProviderGateway.from_dict(x) for x in _dict.get('gateways')]
else:
raise ValueError('Required property \'gateways\' not present in ProviderGatewayCollection JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ProviderGatewayCollection object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'first') and self.first is not None:
_dict['first'] = self.first.to_dict()
if hasattr(self, 'limit') and self.limit is not None:
_dict['limit'] = self.limit
if hasattr(self, 'next') and self.next is not None:
_dict['next'] = self.next.to_dict()
if hasattr(self, 'total_count') and self.total_count is not None:
_dict['total_count'] = self.total_count
if hasattr(self, 'gateways') and self.gateways is not None:
_dict['gateways'] = [x.to_dict() for x in self.gateways]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ProviderGatewayCollection object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ProviderGatewayCollection') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ProviderGatewayCollection') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ProviderGatewayCollectionFirst():
"""
A reference to the first page of resources.
:attr str href: The URL for the first page of resources.
"""
def __init__(self,
href: str) -> None:
"""
Initialize a ProviderGatewayCollectionFirst object.
:param str href: The URL for the first page of resources.
"""
self.href = href
@classmethod
def from_dict(cls, _dict: Dict) -> 'ProviderGatewayCollectionFirst':
"""Initialize a ProviderGatewayCollectionFirst object from a json dictionary."""
args = {}
if 'href' in _dict:
args['href'] = _dict.get('href')
else:
raise ValueError('Required property \'href\' not present in ProviderGatewayCollectionFirst JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ProviderGatewayCollectionFirst object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'href') and self.href is not None:
_dict['href'] = self.href
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ProviderGatewayCollectionFirst object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ProviderGatewayCollectionFirst') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ProviderGatewayCollectionFirst') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ProviderGatewayCollectionNext():
"""
A reference to the next page of resources; this reference is included for all pages
except the last page.
:attr str href: The URL for the next page of resources.
:attr str start: start token for the next page of resources.
"""
def __init__(self,
href: str,
start: str) -> None:
"""
Initialize a ProviderGatewayCollectionNext object.
:param str href: The URL for the next page of resources.
:param str start: start token for the next page of resources.
"""
self.href = href
self.start = start
@classmethod
def from_dict(cls, _dict: Dict) -> 'ProviderGatewayCollectionNext':
"""Initialize a ProviderGatewayCollectionNext object from a json dictionary."""
args = {}
if 'href' in _dict:
args['href'] = _dict.get('href')
else:
raise ValueError('Required property \'href\' not present in ProviderGatewayCollectionNext JSON')
if 'start' in _dict:
args['start'] = _dict.get('start')
else:
raise ValueError('Required property \'start\' not present in ProviderGatewayCollectionNext JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ProviderGatewayCollectionNext object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'href') and self.href is not None:
_dict['href'] = self.href
if hasattr(self, 'start') and self.start is not None:
_dict['start'] = self.start
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ProviderGatewayCollectionNext object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ProviderGatewayCollectionNext') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ProviderGatewayCollectionNext') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class | |
(delem[profile]['members'] is None):
intf["interfaces"][0]["nvPairs"]["MEMBER_INTERFACES"] = ""
else:
intf["interfaces"][0]["nvPairs"]["MEMBER_INTERFACES"] = ",".join(delem[profile]['members'])
intf["interfaces"][0]["nvPairs"]["PC_MODE"] = delem[profile]['pc_mode']
intf["interfaces"][0]["nvPairs"]["INTF_VRF"] = delem[profile]['int_vrf']
intf["interfaces"][0]["nvPairs"]["IP"] = str(delem[profile]['ipv4_addr'])
if (delem[profile]['ipv4_addr'] != ''):
intf["interfaces"][0]["nvPairs"]["PREFIX"] = str(delem[profile]['ipv4_mask_len'])
else:
intf["interfaces"][0]["nvPairs"]["PREFIX"] = ''
intf["interfaces"][0]["nvPairs"]["ROUTING_TAG"] = delem[profile]['route_tag']
intf["interfaces"][0]["nvPairs"]["PO_ID"] = ifname
intf["interfaces"][0]["nvPairs"]["MTU"] = str(delem[profile]['mtu'])
if (delem[profile]['mode'] == 'monitor'):
intf["interfaces"][0]["nvPairs"]["INTF_NAME"] = ifname
if (delem[profile]['mode'] != 'monitor'):
intf["interfaces"][0]["nvPairs"]["DESC"] = delem[profile]['description']
if (delem[profile]['cmds'] is None):
intf["interfaces"][0]["nvPairs"]["CONF"] = ""
else:
intf["interfaces"][0]["nvPairs"]["CONF"] = "\n".join(delem[profile]['cmds'])
intf["interfaces"][0]["nvPairs"]["ADMIN_STATE"] = str(delem[profile]['admin_state']).lower()
def dcnm_intf_get_vpc_payload (self, delem, intf, profile):
# Extract port id from the given name, which is of the form 'vpc300'
ifname, port_id = self.dcnm_intf_get_if_name (delem['name'], delem['type'])
intf["interfaces"][0].update ({"ifName" : ifname})
if (delem[profile]['mode'] == 'trunk'):
if (delem[profile]['peer1_members'] is None):
intf["interfaces"][0]["nvPairs"]["PEER1_MEMBER_INTERFACES"] = ""
else:
intf["interfaces"][0]["nvPairs"]["PEER1_MEMBER_INTERFACES"] = ",".join(delem[profile]['peer1_members'])
if (delem[profile]['peer2_members'] is None):
intf["interfaces"][0]["nvPairs"]["PEER2_MEMBER_INTERFACES"] = ""
else:
intf["interfaces"][0]["nvPairs"]["PEER2_MEMBER_INTERFACES"] = ",".join(delem[profile]['peer2_members'])
intf["interfaces"][0]["nvPairs"]["PC_MODE"] = delem[profile]['pc_mode']
intf["interfaces"][0]["nvPairs"]["BPDUGUARD_ENABLED"] = delem[profile]['bpdu_guard'].lower()
intf["interfaces"][0]["nvPairs"]["PORTTYPE_FAST_ENABLED"] = str(delem[profile]['port_type_fast']).lower()
intf["interfaces"][0]["nvPairs"]["MTU"] = str(delem[profile]['mtu'])
intf["interfaces"][0]["nvPairs"]["PEER1_ALLOWED_VLANS"] = delem[profile]['peer1_allowed_vlans']
intf["interfaces"][0]["nvPairs"]["PEER2_ALLOWED_VLANS"] = delem[profile]['peer2_allowed_vlans']
if (delem[profile]["peer1_pcid"] == 0):
intf["interfaces"][0]["nvPairs"]["PEER1_PCID"] = str(port_id)
else:
intf["interfaces"][0]["nvPairs"]["PEER1_PCID"] = str(delem[profile]["peer1_pcid"])
if (delem[profile]["peer2_pcid"] == 0):
intf["interfaces"][0]["nvPairs"]["PEER2_PCID"] = str(port_id)
else:
intf["interfaces"][0]["nvPairs"]["PEER2_PCID"] = str(delem[profile]["peer2_pcid"])
if (delem[profile]['mode'] == 'access'):
if (delem[profile]['peer1_members'] is None):
intf["interfaces"][0]["nvPairs"]["PEER1_MEMBER_INTERFACES"] = ""
else:
intf["interfaces"][0]["nvPairs"]["PEER1_MEMBER_INTERFACES"] = ",".join(delem[profile]['peer1_members'])
if (delem[profile]['peer2_members'] is None):
intf["interfaces"][0]["nvPairs"]["PEER2_MEMBER_INTERFACES"] = ""
else:
intf["interfaces"][0]["nvPairs"]["PEER2_MEMBER_INTERFACES"] = ",".join(delem[profile]['peer2_members'])
intf["interfaces"][0]["nvPairs"]["PC_MODE"] = delem[profile]['pc_mode']
intf["interfaces"][0]["nvPairs"]["BPDUGUARD_ENABLED"] = delem[profile]['bpdu_guard'].lower()
intf["interfaces"][0]["nvPairs"]["PORTTYPE_FAST_ENABLED"] = str(delem[profile]['port_type_fast']).lower()
intf["interfaces"][0]["nvPairs"]["MTU"] = str(delem[profile]['mtu'])
intf["interfaces"][0]["nvPairs"]["PEER1_ACCESS_VLAN"] = delem[profile]['peer1_access_vlan']
intf["interfaces"][0]["nvPairs"]["PEER2_ACCESS_VLAN"] = delem[profile]['peer2_access_vlan']
if (delem[profile]["peer1_pcid"] == 0):
intf["interfaces"][0]["nvPairs"]["PEER1_PCID"] = str(port_id)
else:
intf["interfaces"][0]["nvPairs"]["PEER1_PCID"] = str(delem[profile]["peer1_pcid"])
if (delem[profile]["peer2_pcid"] == 0):
intf["interfaces"][0]["nvPairs"]["PEER2_PCID"] = str(port_id)
else:
intf["interfaces"][0]["nvPairs"]["PEER2_PCID"] = str(delem[profile]["peer2_pcid"])
intf["interfaces"][0]["nvPairs"]["PEER1_PO_DESC"] = delem[profile]['peer1_description']
intf["interfaces"][0]["nvPairs"]["PEER2_PO_DESC"] = delem[profile]['peer2_description']
if (delem[profile]['peer1_cmds'] is None):
intf["interfaces"][0]["nvPairs"]["PEER1_PO_CONF"] = ""
else:
intf["interfaces"][0]["nvPairs"]["PEER1_PO_CONF"] = "\n".join(delem[profile]['peer1_cmds'])
if (delem[profile]['peer2_cmds'] is None):
intf["interfaces"][0]["nvPairs"]["PEER2_PO_CONF"] = ""
else:
intf["interfaces"][0]["nvPairs"]["PEER2_PO_CONF"] = "\n".join(delem[profile]['peer2_cmds'])
intf["interfaces"][0]["nvPairs"]["ADMIN_STATE"] = str(delem[profile]['admin_state']).lower()
intf["interfaces"][0]["nvPairs"]["INTF_NAME"] = ifname
def dcnm_intf_get_sub_intf_payload (self, delem, intf, profile):
# Extract port id from the given name, which is of the form 'po300'
ifname, port_id = self.dcnm_intf_get_if_name (delem['name'], delem['type'])
intf["interfaces"][0].update ({"ifName" : ifname})
intf["interfaces"][0]["nvPairs"]["VLAN"] = str(delem[profile]['vlan'])
intf["interfaces"][0]["nvPairs"]["INTF_VRF"] = delem[profile]['int_vrf']
intf["interfaces"][0]["nvPairs"]["IP"] = str(delem[profile]['ipv4_addr'])
intf["interfaces"][0]["nvPairs"]["PREFIX"] = str(delem[profile]['ipv4_mask_len'])
if (delem[profile]['ipv6_addr']):
intf["interfaces"][0]["nvPairs"]["IPv6"] = str(delem[profile]['ipv6_addr'])
intf["interfaces"][0]["nvPairs"]["IPv6_PREFIX"] = str(delem[profile]['ipv6_mask_len'])
else:
intf["interfaces"][0]["nvPairs"]["IPv6"] = ""
intf["interfaces"][0]["nvPairs"]["IPv6_PREFIX"] = ""
intf["interfaces"][0]["nvPairs"]["MTU"] = str(delem[profile]['mtu'])
intf["interfaces"][0]["nvPairs"]["INTF_NAME"] = ifname
intf["interfaces"][0]["nvPairs"]["DESC"] = delem[profile]['description']
if (delem[profile]['cmds'] is None):
intf["interfaces"][0]["nvPairs"]["CONF"] = ""
else:
intf["interfaces"][0]["nvPairs"]["CONF"] = "\n".join(delem[profile]['cmds'])
intf["interfaces"][0]["nvPairs"]["ADMIN_STATE"] = str(delem[profile]['admin_state']).lower()
def dcnm_intf_get_loopback_payload (self, delem, intf, profile):
# Extract port id from the given name, which is of the form 'po300'
ifname, port_id = self.dcnm_intf_get_if_name (delem['name'], delem['type'])
intf["interfaces"][0].update ({"ifName" : ifname})
intf["interfaces"][0]["nvPairs"]["INTF_VRF"] = delem[profile]['int_vrf']
intf["interfaces"][0]["nvPairs"]["IP"] = str(delem[profile]['ipv4_addr'])
intf["interfaces"][0]["nvPairs"]["V6IP"] = str(delem[profile]['ipv6_addr'])
intf["interfaces"][0]["nvPairs"]["ROUTE_MAP_TAG"] = delem[profile]['route_tag']
intf["interfaces"][0]["nvPairs"]["INTF_NAME"] = ifname
intf["interfaces"][0]["nvPairs"]["DESC"] = delem[profile]['description']
if (delem[profile]['cmds'] is None):
intf["interfaces"][0]["nvPairs"]["CONF"] = ""
else:
intf["interfaces"][0]["nvPairs"]["CONF"] = "\n".join(delem[profile]['cmds'])
intf["interfaces"][0]["nvPairs"]["ADMIN_STATE"] = str(delem[profile]['admin_state']).lower()
def dcnm_intf_get_eth_payload (self, delem, intf, profile):
# Extract port id from the given name, which is of the form 'po300'
ifname, port_id = self.dcnm_intf_get_if_name (delem['name'], delem['type'])
intf["interfaces"][0].update ({"ifName" : ifname})
if (delem[profile]['mode'] == 'trunk'):
intf["interfaces"][0]["nvPairs"]["BPDUGUARD_ENABLED"] = delem[profile]['bpdu_guard'].lower()
intf["interfaces"][0]["nvPairs"]["PORTTYPE_FAST_ENABLED"] = str(delem[profile]['port_type_fast']).lower()
intf["interfaces"][0]["nvPairs"]["MTU"] = str(delem[profile]['mtu'])
intf["interfaces"][0]["nvPairs"]["SPEED"] = str(delem[profile]['speed'])
intf["interfaces"][0]["nvPairs"]["ALLOWED_VLANS"] = delem[profile]['allowed_vlans']
intf["interfaces"][0]["nvPairs"]["INTF_NAME"] = ifname
if (delem[profile]['mode'] == 'access'):
intf["interfaces"][0]["nvPairs"]["BPDUGUARD_ENABLED"] = delem[profile]['bpdu_guard'].lower()
intf["interfaces"][0]["nvPairs"]["PORTTYPE_FAST_ENABLED"] = str(delem[profile]['port_type_fast']).lower()
intf["interfaces"][0]["nvPairs"]["MTU"] = str(delem[profile]['mtu'])
intf["interfaces"][0]["nvPairs"]["SPEED"] = str(delem[profile]['speed'])
intf["interfaces"][0]["nvPairs"]["ACCESS_VLAN"] = delem[profile]['access_vlan']
intf["interfaces"][0]["nvPairs"]["INTF_NAME"] = ifname
if (delem[profile]['mode'] == 'routed'):
intf["interfaces"][0]["nvPairs"]["INTF_VRF"] = delem[profile]['int_vrf']
intf["interfaces"][0]["nvPairs"]["IP"] = str(delem[profile]['ipv4_addr'])
if (delem[profile]['ipv4_addr'] != ''):
intf["interfaces"][0]["nvPairs"]["PREFIX"] = str(delem[profile]['ipv4_mask_len'])
else:
intf["interfaces"][0]["nvPairs"]["PREFIX"] = ''
intf["interfaces"][0]["nvPairs"]["ROUTING_TAG"] = delem[profile]['route_tag']
intf["interfaces"][0]["nvPairs"]["MTU"] = str(delem[profile]['mtu'])
intf["interfaces"][0]["nvPairs"]["SPEED"] = str(delem[profile]['speed'])
intf["interfaces"][0]["nvPairs"]["INTF_NAME"] = ifname
if (delem[profile]['mode'] == 'monitor'):
intf["interfaces"][0]["nvPairs"]["INTF_NAME"] = ifname
if (delem[profile]['mode'] == 'epl_routed'):
intf["interfaces"][0]["nvPairs"]["IP"] = str(delem[profile]['ipv4_addr'])
intf["interfaces"][0]["nvPairs"]["PREFIX"] = str(delem[profile]['ipv4_mask_len'])
intf["interfaces"][0]["nvPairs"]["IPv6"] = str(delem[profile]['ipv6_addr'])
intf["interfaces"][0]["nvPairs"]["IPv6_PREFIX"] = str(delem[profile]['ipv6_mask_len'])
intf["interfaces"][0]["nvPairs"]["ROUTING_TAG"] = delem[profile]['route_tag']
intf["interfaces"][0]["nvPairs"]["MTU"] = str(delem[profile]['mtu'])
intf["interfaces"][0]["nvPairs"]["SPEED"] = str(delem[profile]['speed'])
intf["interfaces"][0]["nvPairs"]["INTF_NAME"] = ifname
if (delem[profile]['mode'] != 'monitor'):
intf["interfaces"][0]["nvPairs"]["DESC"] = delem[profile]['description']
if (delem[profile]['cmds'] is None):
intf["interfaces"][0]["nvPairs"]["CONF"] = ""
else:
intf["interfaces"][0]["nvPairs"]["CONF"] = "\n".join(delem[profile]['cmds'])
intf["interfaces"][0]["nvPairs"]["ADMIN_STATE"] = str(delem[profile]['admin_state']).lower()
# New Interfaces
def dcnm_get_intf_payload (self, delem, sw):
intf = {
"deploy": False,
"policy": "",
"interfaceType": "",
"interfaces": [
{
"serialNumber": "",
"interfaceType": "",
"ifName": "",
"fabricName": "",
"nvPairs": {
}
}
],
"skipResourceCheck": str(True).lower()
}
# Each interface type will have a different profile name. Set that based on the interface type and use that
# below to extract the required parameters
# Monitor ports are not put into diff_deploy, since they don't have any
# commands to be executed on switch. This will affect the idempotence
# check
if (delem['profile']['mode'] == 'monitor'):
intf.update ({"deploy" : False})
else:
intf.update ({"deploy" : delem['deploy']})
# Each type of interface and mode will have a different set of params.
# First fill in the params common to all interface types and modes
#intf.update ({"interfaceType" : self.int_types[delem['type']]})
if ('vpc' == delem['type']):
intf["interfaces"][0].update ({"serialNumber" : str(self.vpc_ip_sn[sw])})
else:
intf["interfaces"][0].update ({"serialNumber" : str(self.ip_sn[sw])})
intf["interfaces"][0].update ({"interfaceType" : self.int_types[delem['type']]})
intf["interfaces"][0].update ({"fabricName" : self.fabric})
if ('profile' not in delem.keys()):
# for state 'deleted', 'profile' construct is not included. So just update the ifName here
# and return. Rest of the code is all 'profile' specific and hence not required for 'deleted'
ifname, port_id = self.dcnm_intf_get_if_name (delem['name'], delem['type'])
intf["interfaces"][0].update ({"ifName" : ifname})
return intf
pol_ind_str = delem['type'] + '_' + delem['profile']['mode']
#intf.update ({"policy" : self.pol_types[delem['profile']['mode']]})
intf.update ({"policy" : self.pol_types[pol_ind_str]})
intf.update ({"interfaceType" : self.int_types[delem['type']]})
# Rest of the data in the dict depends on the interface type and the template
if ('pc' == delem['type']):
self.dcnm_intf_get_pc_payload(delem, intf, 'profile')
if ('sub_int' == delem['type']):
self.dcnm_intf_get_sub_intf_payload(delem, intf, 'profile')
if ('lo' == delem['type']):
self.dcnm_intf_get_loopback_payload (delem, intf, 'profile')
if ('vpc' == delem['type']):
self.dcnm_intf_get_vpc_payload(delem, intf, 'profile')
if ('eth' == delem['type']):
self.dcnm_intf_get_eth_payload(delem, intf, 'profile')
# Ethernet interface payload does not have interfaceType and skipResourceCheck flags. Pop
# them out
intf.pop('skipResourceCheck')
return intf
def dcnm_intf_merge_intf_info (self, intf_info, if_head):
if (not if_head):
if_head.append(intf_info)
return
for item in if_head:
if (item['policy'] == intf_info['policy']):
item['interfaces'].append(intf_info['interfaces'][0])
return
if_head.append(intf_info)
def dcnm_intf_get_want(self):
if (None is self.config):
return
if not self.intf_info:
return
# self.intf_info is a list of directories each having config related to a particular interface
for delem in self.intf_info:
if (any('profile' in key for key in delem)):
for sw in delem['switch']:
intf_payload = self.dcnm_get_intf_payload (delem, sw)
if (intf_payload not in self.want):
self.want.append(intf_payload)
def dcnm_intf_get_intf_info(self, ifName, serialNumber, ifType):
# For VPC interfaces the serialNumber will be a combibed one. But GET on interface cannot
# pass this combined serial number. We will have to pass individual ones
if (ifType == 'INTERFACE_VPC'):
sno = serialNumber.split('~')[0]
else:
sno = serialNumber
path = '/rest/interface?serialNumber=' + sno + '&ifName=' + ifName
resp = dcnm_send (self.module, 'GET', path)
if ('DATA' in resp and resp['DATA']):
return resp['DATA'][0]
else:
return []
def dcnm_intf_get_intf_info_from_dcnm(self, intf):
return self.dcnm_intf_get_intf_info (intf['ifName'], intf['serialNumber'], intf['interfaceType'])
def dcnm_intf_get_have_all (self, sw):
# Check if you have already got the details for this switch
if (sw in self.have_all_list):
return
# Check if the serial number is a combined one which will be the case for vPC interfaces.
# If combined, then split it up and pass one of the serial numbers and not the combined one.
if ('~' in self.ip_sn[sw]):
sno = self.ip_sn[sw].split('~')[0]
else:
sno = self.ip_sn[sw]
# GET all interfaces
path = '/rest/interface/detail?serialNumber=' + sno
resp = dcnm_send(self.module, 'GET', path)
if ('DATA' in resp and resp['DATA']):
self.have_all.extend(resp['DATA'])
self.have_all_list.append(sw)
else:
self.have_all_list.append(sw)
return []
# adminStatus in all_int_raw will give the deployed status. For deployed interfaces
# adminStatus will be 1 and ifIndex will also be allocated and non zero
def dcnm_intf_get_have(self):
if (not self.want):
return
# We have all the requested interface config in self.want. Interfaces are grouped together based on the
# policy string and the interface name in a single dict entry.
for elem in self.want:
for intf in elem['interfaces']:
# For each interface present here, get the information that is already available
# in DCNM. Based on this information, we will create the required payloads to be sent
# to the DCNM controller based on the requested
# Fetch the information from DCNM w.r.t to the interafce that we have in self.want
intf_payload = self.dcnm_intf_get_intf_info_from_dcnm(intf)
if (intf_payload):
self.have.append(intf_payload)
def dcnm_intf_compare_elements (self, name, sno, fabric, ie1, ie2, k, state):
# unicode encoded strings must be decoded to get proper strings which is required
# for comparison purposes
if sys.version_info[0] >= 3:
# Python version 3 onwards trfeats unicode as strings. No special | |
"""
Module: LMR_verify_gridPRCP.py
Purpose: Generates spatial verification statistics of LMR gridded precipitation
against various gridded historical instrumental precipitation datasets
and precipitation from reanalyses.
Originator: <NAME>, U. of Washington, March 2016
Revisions:
"""
import matplotlib
# need to do this backend when running remotely or to suppress figures interactively
matplotlib.use('Agg')
# generic imports
import numpy as np
import glob, os, sys, calendar
from datetime import datetime, timedelta
from netCDF4 import Dataset, date2num, num2date
import mpl_toolkits.basemap as bm
import matplotlib.pyplot as plt
from matplotlib import ticker
from spharm import Spharmt, getspecindx, regrid
# LMR specific imports
sys.path.append('../')
from LMR_utils import global_hemispheric_means, assimilated_proxies, coefficient_efficiency
from load_gridded_data import read_gridded_data_CMIP5_model
from LMR_plot_support import *
# change default value of latlon kwarg to True.
bm.latlon_default = True
##################################
# START: set user parameters here
##################################
# option to suppress figures
iplot = True
iplot_individual_years = False
# centered time mean (nya must be odd! 3 = 3 yr mean; 5 = 5 year mean; etc 0 = none)
nya = 0
# option to print figures
fsave = True
#fsave = False
# set paths, the filename for plots, and global plotting preferences
# override datadir
#datadir_output = './data/'
#datadir_output = '/home/disk/kalman2/wperkins/LMR_output/archive'
datadir_output = '/home/disk/kalman3/rtardif/LMR/output'
#datadir_output = '/home/disk/ekman4/rtardif/LMR/output'
#datadir_output = '/home/disk/kalman3/hakim/LMR'
# Directories where precip and reanalysis data can be found
datadir_precip = '/home/disk/kalman3/rtardif/LMR/data/verification'
datadir_reanl = '/home/disk/kalman3/rtardif/LMR/data/model'
# file specification
#
# current datasets
# ---
#nexp = 'production_gis_ccsm4_pagesall_0.75'
#nexp = 'production_mlost_ccsm4_pagesall_0.75'
#nexp = 'production_cru_ccsm4_pagesall_0.75'
#nexp = 'production_mlost_era20c_pagesall_0.75'
#nexp = 'production_mlost_era20cm_pagesall_0.75'
# ---
nexp = 'test'
# ---
# perform verification using all recon. MC realizations ( MCset = None )
# or over a custom selection ( MCset = (begin,end) )
# ex. MCset = (0,0) -> only the first MC run
# MCset = (0,10) -> the first 11 MC runs (from 0 to 10 inclusively)
# MCset = (80,100) -> the 80th to 100th MC runs (21 realizations)
MCset = None
#MCset = (0,10)
# Definition of variables to verify
# kind name variable long name bounds units mult. factor
verif_dict = \
{
'pr_sfc_Amon' : ('anom', 'PRCP', 'Precipitation',-400.0,400.0,'(mm/yr)',1.0), \
}
# time range for verification (in years CE)
#trange = [1979,2000] #works for nya = 0
trange = [1880,2000] #works for nya = 0
#trange = [1900,2000] #works for nya = 0
#trange = [1885,1995] #works for nya = 5
#trange = [1890,1990] #works for nya = 10
# reference period over which mean is calculated & subtracted
# from all datasets (in years CE)
# NOTE: GPCP and CMAP data cover the 1979-2015 period
ref_period = [1979, 1999]
valid_frac = 0.0
# number of contours for plots
nlevs = 21
# plot alpha transparency
alpha = 0.5
# set the default size of the figure in inches. ['figure.figsize'] = width, height;
# aspect ratio appears preserved on smallest of the two
plt.rcParams['figure.figsize'] = 10, 10 # that's default image size for this interactive session
plt.rcParams['axes.linewidth'] = 2.0 # set the value globally
plt.rcParams['font.weight'] = 'bold' # set the font weight globally
plt.rcParams['font.size'] = 11 # set the font size globally
#plt.rc('text', usetex=True)
plt.rc('text', usetex=False)
##################################
# END: set user parameters here
##################################
verif_vars = list(verif_dict.keys())
workdir = datadir_output + '/' + nexp
print('working directory = ' + workdir)
print('\n getting file system information...\n')
# get number of mc realizations from directory count
# RT: modified way to determine list of directories with mc realizations
# get a listing of the iteration directories
dirs = glob.glob(workdir+"/r*")
# selecting the MC iterations to keep
if MCset:
dirset = dirs[MCset[0]:MCset[1]+1]
else:
dirset = dirs
mcdir = [item.split('/')[-1] for item in dirset]
niters = len(mcdir)
print('mcdir:' + str(mcdir))
print('niters = ' + str(niters))
# Loop over verif. variables
for var in verif_vars:
# read ensemble mean data
print('\n reading LMR ensemble-mean data...\n')
first = True
k = -1
for dir in mcdir:
k = k + 1
ensfiln = workdir + '/' + dir + '/ensemble_mean_'+var+'.npz'
npzfile = np.load(ensfiln)
print(dir, ':', npzfile.files)
tmp = npzfile['xam']
print('shape of tmp: ' + str(np.shape(tmp)))
if first:
first = False
recon_times = npzfile['years']
LMR_time = np.array(list(map(int,recon_times)))
lat = npzfile['lat']
lon = npzfile['lon']
nlat = npzfile['nlat']
nlon = npzfile['nlon']
lat2 = np.reshape(lat,(nlat,nlon))
lon2 = np.reshape(lon,(nlat,nlon))
years = npzfile['years']
nyrs = len(years)
xam = np.zeros([nyrs,np.shape(tmp)[1],np.shape(tmp)[2]])
xam_all = np.zeros([niters,nyrs,np.shape(tmp)[1],np.shape(tmp)[2]])
xam = xam + tmp
xam_all[k,:,:,:] = tmp
# this is the sample mean computed with low-memory accumulation
xam = xam/len(mcdir)
# this is the sample mean computed with numpy on all data
xam_check = xam_all.mean(0)
# check..
max_err = np.max(np.max(np.max(xam_check - xam)))
if max_err > 1e-4:
print('max error = ' + str(max_err))
raise Exception('sample mean does not match what is in the ensemble files!')
# sample variance
xam_var = xam_all.var(0)
print(np.shape(xam_var))
print('\n shape of the ensemble array: ' + str(np.shape(xam_all)) +'\n')
print('\n shape of the ensemble-mean array: ' + str(np.shape(xam)) +'\n')
# Convert units to match verif dataset: from kg m-2 s-1 to mm (per year)
rho = 1000.0
for y in range(nyrs):
if calendar.isleap(int(years[y])):
xam[y,:,:] = 1000.*xam[y,:,:]*366.*86400./rho
else:
xam[y,:,:] = 1000.*xam[y,:,:]*365.*86400./rho
#################################################################
# BEGIN: load verification data #
#################################################################
print('\nloading verification data...\n')
# GPCP ----------------------------------------------------------
infile = datadir_precip+'/'+'GPCP/'+'GPCPv2.2_precip.mon.mean.nc'
verif_data = Dataset(infile,'r')
# Time
time = verif_data.variables['time']
time_obj = num2date(time[:],units=time.units)
time_yrs = np.asarray([time_obj[k].year for k in range(len(time_obj))])
yrs_range = list(set(time_yrs))
# lat/lon
verif_lat = verif_data.variables['lat'][:]
verif_lon = verif_data.variables['lon'][:]
nlat_GPCP = len(verif_lat)
nlon_GPCP = len(verif_lon)
lon_GPCP, lat_GPCP = np.meshgrid(verif_lon, verif_lat)
# Precip
verif_precip_monthly = verif_data.variables['precip'][:]
[ntime,nlon_v,nlat_v] = verif_precip_monthly.shape
# convert mm/day monthly data to mm/year yearly data
GPCP_time = np.zeros(shape=len(yrs_range),dtype=np.int)
GPCP = np.zeros(shape=[len(yrs_range),nlat_GPCP,nlon_GPCP])
i = 0
for yr in yrs_range:
GPCP_time[i] = int(yr)
inds = np.where(time_yrs == yr)[0]
if calendar.isleap(yr):
nbdays = 366.
else:
nbdays = 365.
accum = np.zeros(shape=[nlat_GPCP, nlon_GPCP])
for k in range(len(inds)):
days_in_month = calendar.monthrange(time_obj[inds[k]].year, time_obj[inds[k]].month)[1]
accum = accum + verif_precip_monthly[inds[k],:,:]*days_in_month
GPCP[i,:,:] = accum # precip in mm
i = i + 1
# CMAP ----------------------------------------------------------
infile = datadir_precip+'/'+'CMAP/'+'CMAP_enhanced_precip.mon.mean.nc'
verif_data = Dataset(infile,'r')
# Time
time = verif_data.variables['time']
time_obj = num2date(time[:],units=time.units)
time_yrs = np.asarray([time_obj[k].year for k in range(len(time_obj))])
yrs_range = list(set(time_yrs))
# lat/lon
verif_lat = verif_data.variables['lat'][:]
verif_lon = verif_data.variables['lon'][:]
nlat_CMAP = len(verif_lat)
nlon_CMAP = len(verif_lon)
lon_CMAP, lat_CMAP = np.meshgrid(verif_lon, verif_lat)
# Precip
verif_precip_monthly = verif_data.variables['precip'][:]
[ntime,nlon_v,nlat_v] = verif_precip_monthly.shape
# convert mm/day monthly data to mm/year yearly data
CMAP_time = np.zeros(shape=len(yrs_range),dtype=np.int)
CMAP = np.zeros(shape=[len(yrs_range),nlat_CMAP,nlon_CMAP])
i = 0
for yr in yrs_range:
CMAP_time[i] = int(yr)
inds = np.where(time_yrs == yr)[0]
if calendar.isleap(yr):
nbdays = 366.
else:
nbdays = 365.
accum = np.zeros(shape=[nlat_CMAP, nlon_CMAP])
for k in range(len(inds)):
days_in_month = calendar.monthrange(time_obj[inds[k]].year, time_obj[inds[k]].month)[1]
accum = accum + verif_precip_monthly[inds[k],:,:]*days_in_month
CMAP[i,:,:] = accum # precip in mm
i = i + 1
# ----------
# Reanalyses
# ----------
# Define month sequence for the calendar year
# (argument needed in upload of reanalysis data)
annual = list(range(1,13))
# 20th Century reanalysis (TCR) ---------------------------------
vardict = {var: verif_dict[var][0]}
vardef = var
datadir = datadir_reanl +'/20cr'
datafile = vardef +'_20CR_185101-201112.nc'
dd = read_gridded_data_CMIP5_model(datadir,datafile,vardict,outtimeavg=annual,
anom_ref=ref_period)
rtime = dd[vardef]['years']
TCR_time = np.array([d.year for d in rtime])
lats = dd[vardef]['lat']
lons = dd[vardef]['lon']
latshape = lats.shape
lonshape = lons.shape
if len(latshape) == 2 & len(lonshape) == 2:
# stored in 2D arrays
lat_TCR = np.unique(lats)
lon_TCR = np.unique(lons)
nlat_TCR, = lat_TCR.shape
nlon_TCR, = lon_TCR.shape
else:
# stored in 1D arrays
lon_TCR = lons
lat_TCR = lats
nlat_TCR = len(lat_TCR)
nlon_TCR = len(lon_TCR)
lon2_TCR, lat2_TCR = np.meshgrid(lon_TCR, lat_TCR)
TCRfull = dd[vardef]['value'] + dd[vardef]['climo'] # Full field
TCR = dd[vardef]['value'] # Anomalies
# Conversion from kg m-2 s-1
rho = 1000.0
i = 0
for y in TCR_time:
if calendar.isleap(y):
TCRfull[i,:,:] = 1000.*TCRfull[i,:,:]*366.*86400./rho
TCR[i,:,:] = 1000.*TCR[i,:,:]*366.*86400./rho
else:
TCRfull[i,:,:] = 1000.*TCRfull[i,:,:]*365.*86400./rho
TCR[i,:,:] = 1000.*TCR[i,:,:]*365.*86400./rho
i = i + 1
# ERA 20th Century reanalysis (ERA20C) ---------------------------------
vardict = {var: verif_dict[var][0]}
vardef = var
datadir = datadir_reanl +'/era20c'
datafile = vardef +'_ERA20C_190001-201012.nc'
dd = read_gridded_data_CMIP5_model(datadir,datafile,vardict,outtimeavg=annual,
anom_ref=ref_period)
rtime = dd[vardef]['years']
ERA_time = np.array([d.year for d in rtime])
lats = dd[vardef]['lat']
lons = dd[vardef]['lon']
latshape = lats.shape
lonshape = lons.shape
if len(latshape) == 2 & len(lonshape) == 2:
# stored in 2D arrays
lat_ERA = np.unique(lats)
lon_ERA = np.unique(lons)
nlat_ERA, = lat_ERA.shape
nlon_ERA, = lon_ERA.shape
else:
# stored in 1D arrays
lon_ERA = lons
lat_ERA = lats
nlat_ERA = len(lat_ERA)
nlon_ERA = len(lon_ERA)
lon2_ERA, lat2_ERA = np.meshgrid(lon_ERA, lat_ERA)
ERAfull = dd[vardef]['value'] + dd[vardef]['climo'] # Full field
ERA = dd[vardef]['value'] # Anomalies
# Conversion from kg m-2 s-1
rho = | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#=======================================================================
#
# xchacha.py
# ----------
# Simple model of the XChaCha stream cipher. Used as a reference for
# the HW implementation. Also used as part of the TRNG Python model.
# The code follows the structure of the HW implementation as much
# as possible.
#
# This model is heavily based on the chacha.py model in the
# Secworks ChaCha HW implementation.
#
#
# Author: <NAME>
# Copyright (c) 2014, Secworks Sweden AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#=======================================================================
#-------------------------------------------------------------------
# Python module imports.
#-------------------------------------------------------------------
import sys
#-------------------------------------------------------------------
# Constants.
#-------------------------------------------------------------------
TAU = [0x61707865, 0x3120646e, 0x79622d36, 0x6b206574]
SIGMA = [0x61707865, 0x3320646e, 0x79622d32, 0x6b206574]
#-------------------------------------------------------------------
# XChaCha()
#-------------------------------------------------------------------
class XChaCha():
#---------------------------------------------------------------
# __init__()
#---------------------------------------------------------------
def __init__(self, rounds = 8, verbose = 0):
self.state = [0] * 16
self.x = [0] * 16
self.rounds = rounds
self.verbose = verbose
#---------------------------------------------------------------
# set_key_iv()
#
# Set key and iv. Basically reinitialize the cipher.
# This also resets the block counter.
#---------------------------------------------------------------
def set_key_iv(self, key, iv):
keyword0 = self._b2w(key[0:4])
keyword1 = self._b2w(key[4:8])
keyword2 = self._b2w(key[8:12])
keyword3 = self._b2w(key[12:16])
if len(key) == 16:
self.state[0] = TAU[0]
self.state[1] = TAU[1]
self.state[2] = TAU[2]
self.state[3] = TAU[3]
self.state[4] = keyword0
self.state[5] = keyword1
self.state[6] = keyword2
self.state[7] = keyword3
self.state[8] = keyword0
self.state[9] = keyword1
self.state[10] = keyword2
self.state[11] = keyword3
elif len(key) == 32:
keyword4 = self._b2w(key[16:20])
keyword5 = self._b2w(key[20:24])
keyword6 = self._b2w(key[24:28])
keyword7 = self._b2w(key[28:32])
self.state[0] = SIGMA[0]
self.state[1] = SIGMA[1]
self.state[2] = SIGMA[2]
self.state[3] = SIGMA[3]
self.state[4] = keyword0
self.state[5] = keyword1
self.state[6] = keyword2
self.state[7] = keyword3
self.state[8] = keyword4
self.state[9] = keyword5
self.state[10] = keyword6
self.state[11] = keyword7
else:
print("Key length of %d bits, is not supported." % (len(key) * 8))
# Common state init for both key lengths.
self.block_counter = [0, 0]
self.state[12] = self.block_counter[0]
self.state[13] = self.block_counter[1]
self.state[14] = self._b2w(iv[0:4])
self.state[15] = self._b2w(iv[4:8])
if self.verbose:
print("State after init:")
self._print_state()
#---------------------------------------------------------------
# next()
#
# Encyp/decrypt the next block. This also updates the
# internal state and increases the block counter.
#---------------------------------------------------------------
def next(self, data_in):
# Copy the current internal state to the temporary state x.
self.x = self.state[:]
if self.verbose:
print("State before round processing.")
self._print_state()
if self.verbose:
print("X before round processing:")
self._print_x()
# Update the internal state by performing
# (rounds / 2) double rounds.
for i in range(int(self.rounds / 2)):
if (self.verbose > 1):
print("Doubleround 0x%02x:" % i)
self._doubleround()
if (self.verbose > 1):
print("")
if self.verbose:
print("X after round processing:")
self._print_x()
# Update the internal state by adding the elements
# of the temporary state to the internal state.
self.state = [((self.state[i] + self.x[i]) & 0xffffffff) for i in range(16)]
if self.verbose:
print("State after round processing.")
self._print_state()
bytestate = []
for i in self.state:
bytestate += self._w2b(i)
# Create the data out words.
data_out = [data_in[i] ^ bytestate[i] for i in range(64)]
# Update the block counter.
self._inc_counter()
return data_out
#---------------------------------------------------------------
# _doubleround()
#
# Perform the two complete rounds that comprises the
# double round.
#---------------------------------------------------------------
def _doubleround(self):
if (self.verbose > 0):
print("Start of double round processing.")
self._quarterround(0, 4, 8, 12)
if (self.verbose > 1):
print("X after QR 0")
self._print_x()
self._quarterround(1, 5, 9, 13)
if (self.verbose > 1):
print("X after QR 1")
self._print_x()
self._quarterround(2, 6, 10, 14)
if (self.verbose > 1):
print("X after QR 2")
self._print_x()
self._quarterround(3, 7, 11, 15)
if (self.verbose > 1):
print("X after QR 3")
self._print_x()
self._quarterround(0, 5, 10, 15)
if (self.verbose > 1):
print("X after QR 4")
self._print_x()
self._quarterround(1, 6, 11, 12)
if (self.verbose > 1):
print("X after QR 5")
self._print_x()
self._quarterround(2, 7, 8, 13)
if (self.verbose > 1):
print("X after QR 6")
self._print_x()
self._quarterround(3, 4, 9, 14)
if (self.verbose > 1):
print("X after QR 7")
self._print_x()
if (self.verbose > 0):
print("End of double round processing.")
#---------------------------------------------------------------
# _quarterround()
#
# Updates four elements in the state vector x given by
# their indices.
#---------------------------------------------------------------
def _quarterround(self, ai, bi, ci, di):
# Extract four elemenst from x using the qi tuple.
a, b, c, d = self.x[ai], self.x[bi], self.x[ci], self.x[di]
if (self.verbose > 1):
print("Indata to quarterround:")
print("X state indices:", ai, bi, ci, di)
print("a = 0x%08x, b = 0x%08x, c = 0x%08x, d = 0x%08x" %\
(a, b, c, d))
print("")
a0 = (a + b) & 0xffffffff
d0 = d ^ a0
d1 = ((d0 << 16) + (d0 >> 16)) & 0xffffffff
c0 = (c + d1) & 0xffffffff
b0 = b ^ c0
b1 = ((b0 << 12) + (b0 >> 20)) & 0xffffffff
a1 = (a0 + b1) & 0xffffffff
d2 = d1 ^ a1
d3 = ((d2 << 8) + (d2 >> 24)) & 0xffffffff
c1 = (c0 + d3) & 0xffffffff
b2 = b1 ^ c1
b3 = ((b2 << 7) + (b2 >> 25)) & 0xffffffff
if (self.verbose > 2):
print("Intermediate values:")
print("a0 = 0x%08x, a1 = 0x%08x" % (a0, a1))
print("b0 = 0x%08x, b1 = 0x%08x, b2 = 0x%08x, b3 = 0x%08x" %\
(b0, b1, b2, b3))
print("c0 = 0x%08x, c1 = 0x%08x" % (c0, c1))
print("d0 = 0x%08x, d1 = 0x%08x, d2 = 0x%08x, d3 = 0x%08x" %\
(d0, d1, d2, d3))
print("")
a_prim = a1
b_prim = b3
c_prim = c1
d_prim = d3
if (self.verbose > 1):
print("Outdata from quarterround:")
print("a_prim = 0x%08x, b_prim = 0x%08x, c_prim = 0x%08x, d_prim = 0x%08x" %\
(a_prim, b_prim, c_prim, d_prim))
print("")
# Update the four elemenst in x using the qi tuple.
self.x[ai], self.x[bi] = a_prim, b_prim
self.x[ci], self.x[di] = c_prim, d_prim
#---------------------------------------------------------------
# _inc_counter()
#
# Increase the 64 bit block counter.
#---------------------------------------------------------------
def _inc_counter(self):
self.block_counter[0] += 1 & 0xffffffff
if not (self.block_counter[0] % 0xffffffff):
self.block_counter[1] += 1 & 0xffffffff
#---------------------------------------------------------------
# _b2w()
#
# Given a list of four bytes returns the little endian
# 32 bit word representation of the bytes.
#---------------------------------------------------------------
def _b2w(self, bytes):
return (bytes[0] + (bytes[1] << 8)
+ (bytes[2] << 16) + (bytes[3] << 24)) & 0xffffffff
#---------------------------------------------------------------
# _w2b()
#
# Given a 32-bit word returns a list of set of four bytes
# that is the little endian byte representation of the word.
#---------------------------------------------------------------
def _w2b(self, word):
return [(word & 0x000000ff), ((word & 0x0000ff00) >> 8),
((word & 0x00ff0000) >> 16), ((word & 0xff000000) >> 24)]
#---------------------------------------------------------------
# _print_state()
#
# Print the internal state.
#---------------------------------------------------------------
def _print_state(self):
print(" 0: 0x%08x, 1: 0x%08x, 2: 0x%08x, 3: 0x%08x" %\
(self.state[0], self.state[1], self.state[2], self.state[3]))
print(" 4: 0x%08x, 5: 0x%08x, 6: 0x%08x, 7: 0x%08x" %\
(self.state[4], self.state[5], self.state[6], self.state[7]))
print(" 8: 0x%08x, 9: 0x%08x, 10: 0x%08x, 11: 0x%08x" %\
(self.state[8], self.state[9], self.state[10], self.state[11]))
| |
self.de_acti3(self.de_conv3(self.de_padd3(self.de_upbi3(d3))))
d5 = self.de_conv4(self.de_padd4(d4))
d5_1 = self.de_acti4_1(self.de_conv4_1(self.de_padd4_1(d4)))
lr_x = lr2
lr_x2 = lr_x * self.down(msk) + self.down(rimg) * (1.0 - self.down(msk))
compltd_img = d5
compltd_img = compltd_img * msk + rimg * (1.0 - msk)
lr_compltd_img = self.down(compltd_img)
lr_res = lr_x2 - lr_compltd_img
hr_res = self.up(lr_res)
out = compltd_img + hr_res * d5_1
return compltd_img, out, lr_x
# return compltd_img, reconst_img, lr_x
class BlendGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=3, norm_layer=nn.InstanceNorm2d,
pad_type='reflect', activation=nn.ELU()):
assert (n_blocks >= 0)
super(BlendGenerator, self).__init__()
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d
# Image encode
self.en_padd1 = self.pad(3)
self.en_conv1 = nn.Conv2d(input_nc, ngf, kernel_size=7, stride=1, padding=0)
self.en_norm1 = norm_layer(ngf)
self.en_acti1 = activation
self.en_padd2 = self.pad(1)
self.en_conv2 = nn.Conv2d(ngf, ngf * 2, kernel_size=3, stride=2, padding=0)
self.en_norm2 = norm_layer(ngf * 2)
self.en_acti2 = activation
self.en_padd3 = self.pad(1)
self.en_conv3 = nn.Conv2d(ngf * 2, ngf * 4, kernel_size=3, stride=2, padding=0)
self.en_norm3 = norm_layer(ngf * 4)
self.en_acti3 = activation
self.en_padd4 = self.pad(1)
self.en_conv4 = nn.Conv2d(ngf * 4, ngf * 8, kernel_size=3, stride=2, padding=0)
self.en_norm4 = norm_layer(ngf * 8)
self.en_acti4 = activation
# middle resnetblocks
self.res_blk1 = ResnetBlock(ngf * 8, kernel_size=3, stride=1, padding=1, pad_type='reflect', norm='instance')
self.res_blk2 = ResnetBlock(ngf * 8, kernel_size=3, stride=1, padding=1, pad_type='reflect', norm='instance')
self.res_blk3 = ResnetBlock(ngf * 8, kernel_size=3, stride=1, padding=1, pad_type='reflect', norm='instance')
# image decoder
self.de_conv1 = nn.ConvTranspose2d(ngf * 8, ngf * 4, kernel_size=3, stride=2, padding=1, output_padding=1)
self.de_norm1 = norm_layer(ngf * 4)
self.de_acti1 = activation
self.de_conv2 = nn.ConvTranspose2d(ngf * 4, ngf * 2, kernel_size=3, stride=2, padding=1, output_padding=1)
self.de_norm2 = norm_layer(ngf * 2)
self.de_acti2 = activation
self.de_conv3 = nn.ConvTranspose2d(ngf * 2, ngf, kernel_size=3, stride=2, padding=1, output_padding=1)
self.de_norm3 = norm_layer(ngf)
self.de_acti3 = activation
self.de_padd4 = self.pad(3)
self.de_conv4 = nn.Conv2d(ngf, output_nc, kernel_size=7, stride=1, padding=0)
self.de_acti4 = nn.Sigmoid()
def forward(self, completed_img, msked_img):
x = torch.cat((completed_img, msked_img), dim=1)
e1 = self.en_acti1(self.en_norm1(self.en_conv1(self.en_padd1(x)))) # 512x512x64
e2 = self.en_acti2(self.en_norm2(self.en_conv2(self.en_padd2(e1)))) # 256x256x128
e3 = self.en_acti3(self.en_norm3(self.en_conv3(self.en_padd3(e2)))) # 128x128x256
e4 = self.en_acti4(self.en_norm4(self.en_conv4(self.en_padd4(e3)))) # 64x64x512
middle1 = self.res_blk1(e4)
middle2 = self.res_blk2(middle1)
middle3 = self.res_blk3(middle2)
d1 = self.de_acti1(self.de_norm1(self.de_conv1(middle3))) # 128x128x256
d2 = self.de_acti2(self.de_norm2(self.de_conv2(d1))) # 256x256x128
d3 = self.de_acti3(self.de_norm3(self.de_conv3(d2))) # 512x512x64
d4 = self.de_acti4(self.de_conv4(self.de_padd4(d3))) # 512x512x1
return completed_img * d4 + msked_img * (1.0 - d4), d4
############################################################
### Losses
############################################################
class TVLoss(nn.Module):
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.__tensor__size(x[:, :, 1:, :])
count_w = self.__tensor__size(x[:, :, :, 1:])
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
return 2 * (h_tv / count_h + w_tv / count_w) / batch_size
def _tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
class MyWcploss(nn.Module):
def __init__(self):
super(MyWcploss, self).__init__()
self.epsilon = 1e-10
def forward(self, pred, gt):
# sigmoid_pred = torch.sigmoid(pred)
count_pos = torch.sum(gt) * 1.0 + self.epsilon
count_neg = torch.sum(1. - gt) * 1.0
beta = count_neg / count_pos
beta_back = count_pos / (count_pos + count_neg)
bce1 = nn.BCEWithLogitsLoss(pos_weight=beta)
loss = beta_back * bce1(pred, gt)
return loss
# Lap_criterion = LapLoss(max_levels=5)
class LapLoss(nn.Module):
def __init__(self, max_levels=5, k_size=5, sigma=2.0):
super(LapLoss, self).__init__()
self.max_levels = max_levels
self.k_size = k_size
self.sigma = sigma
self._gauss_kernel = None
self.L1_loss = nn.L1Loss()
def forward(self, input, target):
if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]:
self._gauss_kernel = build_gauss_kernel(size=self.k_size, sigma=self.sigma,
n_channels=input.shape[1], cuda=input.is_cuda)
pyr_input = laplacian_pyramid(input, self._gauss_kernel, self.max_levels)
pyr_target = laplacian_pyramid(target, self._gauss_kernel, self.max_levels)
return sum(self.L1_loss(a, b) for a, b in zip(pyr_input, pyr_target))
class LapMap(nn.Module):
def __init__(self, max_levels=5, k_size=5, sigma=2.0):
super(LapMap, self).__init__()
self.max_levels = max_levels
self.k_size = k_size
self.sigma = sigma
self._gauss_kernel = None
def forward(self, input):
if self._gauss_kernel is None or self._gauss_kernel.shape[1] != input.shape[1]:
self._gauss_kernel = build_gauss_kernel(size=self.k_size, sigma=self.sigma,
n_channels=input.shape[1], cuda=input.is_cuda)
pyr_input = laplacian_pyramid(input, self._gauss_kernel, self.max_levels)
return pyr_input
class VGGLoss(nn.Module):
# vgg19 perceptual loss
def __init__(self, gpu_ids):
super(VGGLoss, self).__init__()
self.vgg = Vgg19().cuda()
self.criterion = nn.L1Loss()
self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).cuda()
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).cuda()
self.register_buffer('mean', mean)
self.register_buffer('std', std)
def forward(self, x, y):
x = (x - self.mean) / self.std
y = (y - self.mean) / self.std
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * \
self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
class DHingeLoss(nn.Module):
# hinge loss for discriminator
def forward(self, x, target_is_real):
# d_loss = 0
# for input_i in x:
# pred = input_i[-1]
# one_tensor = torch.FloatTensor(pred.size()).fill_(1)
# one_tensor = Variable(one_tensor, requires_grad=False)
# if target_is_real:
# # d_loss_real
# d_loss += torch.nn.ReLU()(one_tensor - pred).mean()
# else:
# # d_loss_fake
# d_loss += torch.nn.ReLU()(one_tensor - pred).mean()
# return d_loss
zero_tensor = torch.FloatTensor(1).fill_(0)
zero_tensor.requires_grad_(False)
zero_tensor = zero_tensor.expand_as(x)
if target_is_real:
minval = torch.min(x - 1, zero_tensor)
loss = -torch.mean(minval)
else:
minval = torch.min(-x - 1, zero_tensor)
loss = -torch.mean(minval)
class GHingeLoss(nn.Module):
# hinge loss for generator
# g_loss_fake
def forward(self, x):
# g_loss = 0
# for input_i in x:
# pred = input_i[-1]
# one_tensor = torch.FloatTensor(pred.size()).fill_(1)
# one_tensor = Variable(one_tensor, requires_grad=False)
# g_loss += -torch.mean(x)
# return g_loss
return -x.mean()
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0, tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCEWithLogitsLoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or (self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or (self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
if isinstance(input[0], list):
loss = 0
for input_i in input:
pred = input_i[-1]
target_tensor = self.get_target_tensor(pred, target_is_real)
loss += self.loss(pred, target_tensor)
return loss
else:
target_tensor = self.get_target_tensor(input[-1], target_is_real)
return self.loss(input[-1], target_tensor)
# Define the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.InstanceNorm2d, use_sigmoid=False,
getIntermFeat=False):
super(NLayerDiscriminator, self).__init__()
self.getIntermFeat = getIntermFeat
self.n_layers = n_layers
kw = 4
padw = int(np.ceil((kw - 1.0) / 2))
sequence = [
[SpectralNorm(nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw)), nn.LeakyReLU(0.2, True)]]
nf = ndf
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
SpectralNorm(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw)),
# nn.LeakyReLU(0.2, True)
# norm_layer(nf),
nn.LeakyReLU(0.2, True)
]]
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
SpectralNorm(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw)),
# norm_layer(nf),
nn.LeakyReLU(0.2, True)
]]
sequence += [[SpectralNorm(nn.Conv2d(nf, nf, kernel_size=kw, stride=1, padding=padw))]]
# sequence += [[SpectralNorm(nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw))]]
# sequence += [[MultiDilationResnetBlock_v2(nf, kernel_size=3, stride=1, padding=1)]]
if use_sigmoid:
sequence += [[nn.Sigmoid()]]
if getIntermFeat:
for n in range(len(sequence)):
setattr(self, 'model' + str(n), nn.Sequential(*sequence[n]))
else:
sequence_stream = []
for n in range(len(sequence)):
sequence_stream += sequence[n]
self.model = nn.Sequential(*sequence_stream)
def forward(self, input):
if self.getIntermFeat:
res = [input]
for n in range(self.n_layers + 2):
model = getattr(self, 'model' + str(n))
res.append(model(res[-1]))
return res[1:]
else:
return self.model(input)
# Define the Multiscale Discriminator.
class MultiscaleDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, num_D=3,
getIntermFeat=False):
super(MultiscaleDiscriminator, self).__init__()
self.num_D = num_D
self.n_layers = n_layers
self.getIntermFeat = getIntermFeat
for i in range(num_D):
netD = NLayerDiscriminator(input_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat)
if getIntermFeat:
for j in range(n_layers + 2):
setattr(self, 'scale' + str(i) + '_layer' + str(j), getattr(netD, 'model' + str(j)))
else:
setattr(self, 'layer' + str(i), netD.model)
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def singleD_forward(self, model, input):
if self.getIntermFeat:
result = [input]
for i in range(len(model)):
result.append(model[i](result[-1]))
return result[1:]
else:
return [model(input)]
def forward(self, input):
num_D = self.num_D
result = []
input_downsampled = input
for i in range(num_D):
if self.getIntermFeat:
model = [getattr(self, 'scale' + str(num_D - 1 - i) + '_layer' + str(j)) for j in
range(self.n_layers + 2)]
else:
model = getattr(self, 'layer' + str(num_D - 1 - i))
result.append(self.singleD_forward(model, input_downsampled))
if i != (num_D - 1):
input_downsampled = self.downsample(input_downsampled)
return result
### Define Vgg19 for | |
path == fileobj.name or fd == fileobj.fileno():
break
else:
self.fail("no file found; files=%s" % repr(p.open_files()))
self.assertEqual(path, fileobj.name)
if WINDOWS:
self.assertEqual(fd, -1)
else:
self.assertEqual(fd, fileobj.fileno())
# test positions
ntuple = p.open_files()[0]
self.assertEqual(ntuple[0], ntuple.path)
self.assertEqual(ntuple[1], ntuple.fd)
# test file is gone
self.assertTrue(fileobj.name not in p.open_files())
def compare_proc_sys_cons(self, pid, proc_cons):
from psutil._common import pconn
sys_cons = []
for c in psutil.net_connections(kind='all'):
if c.pid == pid:
sys_cons.append(pconn(*c[:-1]))
if BSD:
# on BSD all fds are set to -1
proc_cons = [pconn(*[-1] + list(x[1:])) for x in proc_cons]
self.assertEqual(sorted(proc_cons), sorted(sys_cons))
@skip_on_access_denied(only_if=OSX)
def test_connections(self):
def check_conn(proc, conn, family, type, laddr, raddr, status, kinds):
all_kinds = ("all", "inet", "inet4", "inet6", "tcp", "tcp4",
"tcp6", "udp", "udp4", "udp6")
check_connection_ntuple(conn)
self.assertEqual(conn.family, family)
self.assertEqual(conn.type, type)
self.assertEqual(conn.laddr, laddr)
self.assertEqual(conn.raddr, raddr)
self.assertEqual(conn.status, status)
for kind in all_kinds:
cons = proc.connections(kind=kind)
if kind in kinds:
self.assertNotEqual(cons, [])
else:
self.assertEqual(cons, [])
# compare against system-wide connections
# XXX Solaris can't retrieve system-wide UNIX
# sockets.
if not SUNOS:
self.compare_proc_sys_cons(proc.pid, [conn])
tcp_template = textwrap.dedent("""
import socket, time
s = socket.socket($family, socket.SOCK_STREAM)
s.bind(('$addr', 0))
s.listen(1)
with open('$testfn', 'w') as f:
f.write(str(s.getsockname()[:2]))
time.sleep(60)
""")
udp_template = textwrap.dedent("""
import socket, time
s = socket.socket($family, socket.SOCK_DGRAM)
s.bind(('$addr', 0))
with open('$testfn', 'w') as f:
f.write(str(s.getsockname()[:2]))
time.sleep(60)
""")
from string import Template
testfile = os.path.basename(TESTFN)
tcp4_template = Template(tcp_template).substitute(
family=int(AF_INET), addr="127.0.0.1", testfn=testfile)
udp4_template = Template(udp_template).substitute(
family=int(AF_INET), addr="127.0.0.1", testfn=testfile)
tcp6_template = Template(tcp_template).substitute(
family=int(AF_INET6), addr="::1", testfn=testfile)
udp6_template = Template(udp_template).substitute(
family=int(AF_INET6), addr="::1", testfn=testfile)
# launch various subprocess instantiating a socket of various
# families and types to enrich psutil results
tcp4_proc = pyrun(tcp4_template)
tcp4_addr = eval(wait_for_file(testfile))
udp4_proc = pyrun(udp4_template)
udp4_addr = eval(wait_for_file(testfile))
if supports_ipv6():
tcp6_proc = pyrun(tcp6_template)
tcp6_addr = eval(wait_for_file(testfile))
udp6_proc = pyrun(udp6_template)
udp6_addr = eval(wait_for_file(testfile))
else:
tcp6_proc = None
udp6_proc = None
tcp6_addr = None
udp6_addr = None
for p in psutil.Process().children():
cons = p.connections()
self.assertEqual(len(cons), 1)
for conn in cons:
# TCP v4
if p.pid == tcp4_proc.pid:
check_conn(p, conn, AF_INET, SOCK_STREAM, tcp4_addr, (),
psutil.CONN_LISTEN,
("all", "inet", "inet4", "tcp", "tcp4"))
# UDP v4
elif p.pid == udp4_proc.pid:
check_conn(p, conn, AF_INET, SOCK_DGRAM, udp4_addr, (),
psutil.CONN_NONE,
("all", "inet", "inet4", "udp", "udp4"))
# TCP v6
elif p.pid == getattr(tcp6_proc, "pid", None):
check_conn(p, conn, AF_INET6, SOCK_STREAM, tcp6_addr, (),
psutil.CONN_LISTEN,
("all", "inet", "inet6", "tcp", "tcp6"))
# UDP v6
elif p.pid == getattr(udp6_proc, "pid", None):
check_conn(p, conn, AF_INET6, SOCK_DGRAM, udp6_addr, (),
psutil.CONN_NONE,
("all", "inet", "inet6", "udp", "udp6"))
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'),
'AF_UNIX is not supported')
@skip_on_access_denied(only_if=OSX)
def test_connections_unix(self):
def check(type):
safe_remove(TESTFN)
sock = socket.socket(AF_UNIX, type)
with contextlib.closing(sock):
sock.bind(TESTFN)
cons = psutil.Process().connections(kind='unix')
conn = cons[0]
check_connection_ntuple(conn)
if conn.fd != -1: # != sunos and windows
self.assertEqual(conn.fd, sock.fileno())
self.assertEqual(conn.family, AF_UNIX)
self.assertEqual(conn.type, type)
self.assertEqual(conn.laddr, TESTFN)
if not SUNOS:
# XXX Solaris can't retrieve system-wide UNIX
# sockets.
self.compare_proc_sys_cons(os.getpid(), cons)
check(SOCK_STREAM)
check(SOCK_DGRAM)
@unittest.skipUnless(hasattr(socket, "fromfd"),
'socket.fromfd() is not availble')
@unittest.skipIf(WINDOWS or SUNOS,
'connection fd not available on this platform')
def test_connection_fromfd(self):
with contextlib.closing(socket.socket()) as sock:
sock.bind(('localhost', 0))
sock.listen(1)
p = psutil.Process()
for conn in p.connections():
if conn.fd == sock.fileno():
break
else:
self.fail("couldn't find socket fd")
dupsock = socket.fromfd(conn.fd, conn.family, conn.type)
with contextlib.closing(dupsock):
self.assertEqual(dupsock.getsockname(), conn.laddr)
self.assertNotEqual(sock.fileno(), dupsock.fileno())
def test_connection_constants(self):
ints = []
strs = []
for name in dir(psutil):
if name.startswith('CONN_'):
num = getattr(psutil, name)
str_ = str(num)
assert str_.isupper(), str_
assert str_ not in strs, str_
assert num not in ints, num
ints.append(num)
strs.append(str_)
if SUNOS:
psutil.CONN_IDLE
psutil.CONN_BOUND
if WINDOWS:
psutil.CONN_DELETE_TCB
@unittest.skipUnless(POSIX, 'posix only')
def test_num_fds(self):
p = psutil.Process()
start = p.num_fds()
file = open(TESTFN, 'w')
self.addCleanup(file.close)
self.assertEqual(p.num_fds(), start + 1)
sock = socket.socket()
self.addCleanup(sock.close)
self.assertEqual(p.num_fds(), start + 2)
file.close()
sock.close()
self.assertEqual(p.num_fds(), start)
@skip_on_not_implemented(only_if=LINUX)
def test_num_ctx_switches(self):
p = psutil.Process()
before = sum(p.num_ctx_switches())
for x in range(500000):
after = sum(p.num_ctx_switches())
if after > before:
return
self.fail("num ctx switches still the same after 50.000 iterations")
def test_parent_ppid(self):
this_parent = os.getpid()
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
self.assertEqual(p.ppid(), this_parent)
self.assertEqual(p.parent().pid, this_parent)
# no other process is supposed to have us as parent
for p in psutil.process_iter():
if p.pid == sproc.pid:
continue
self.assertTrue(p.ppid() != this_parent)
def test_children(self):
p = psutil.Process()
self.assertEqual(p.children(), [])
self.assertEqual(p.children(recursive=True), [])
sproc = get_test_subprocess()
children1 = p.children()
children2 = p.children(recursive=True)
for children in (children1, children2):
self.assertEqual(len(children), 1)
self.assertEqual(children[0].pid, sproc.pid)
self.assertEqual(children[0].ppid(), os.getpid())
def test_children_recursive(self):
# here we create a subprocess which creates another one as in:
# A (parent) -> B (child) -> C (grandchild)
s = "import subprocess, os, sys, time;"
s += "PYTHON = os.path.realpath(sys.executable);"
s += "cmd = [PYTHON, '-c', 'import time; time.sleep(60);'];"
s += "subprocess.Popen(cmd);"
s += "time.sleep(60);"
get_test_subprocess(cmd=[PYTHON, "-c", s])
p = psutil.Process()
self.assertEqual(len(p.children(recursive=False)), 1)
# give the grandchild some time to start
stop_at = time.time() + 1.5
while time.time() < stop_at:
children = p.children(recursive=True)
if len(children) > 1:
break
self.assertEqual(len(children), 2)
self.assertEqual(children[0].ppid(), os.getpid())
self.assertEqual(children[1].ppid(), children[0].pid)
def test_children_duplicates(self):
# find the process which has the highest number of children
table = collections.defaultdict(int)
for p in psutil.process_iter():
try:
table[p.ppid()] += 1
except psutil.Error:
pass
# this is the one, now let's make sure there are no duplicates
pid = sorted(table.items(), key=lambda x: x[1])[-1][0]
p = psutil.Process(pid)
try:
c = p.children(recursive=True)
except psutil.AccessDenied: # windows
pass
else:
self.assertEqual(len(c), len(set(c)))
def test_suspend_resume(self):
sproc = get_test_subprocess(wait=True)
p = psutil.Process(sproc.pid)
p.suspend()
for x in range(100):
if p.status() == psutil.STATUS_STOPPED:
break
time.sleep(0.01)
p.resume()
self.assertNotEqual(p.status(), psutil.STATUS_STOPPED)
def test_invalid_pid(self):
self.assertRaises(TypeError, psutil.Process, "1")
self.assertRaises(ValueError, psutil.Process, -1)
def test_as_dict(self):
p = psutil.Process()
d = p.as_dict(attrs=['exe', 'name'])
self.assertEqual(sorted(d.keys()), ['exe', 'name'])
p = psutil.Process(min(psutil.pids()))
d = p.as_dict(attrs=['connections'], ad_value='foo')
if not isinstance(d['connections'], list):
self.assertEqual(d['connections'], 'foo')
def test_halfway_terminated_process(self):
# Test that NoSuchProcess exception gets raised in case the
# process dies after we create the Process object.
# Example:
# >>> proc = Process(1234)
# >>> time.sleep(2) # time-consuming task, process dies in meantime
# >>> proc.name()
# Refers to Issue #15
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.kill()
p.wait()
excluded_names = ['pid', 'is_running', 'wait', 'create_time']
if LINUX and not RLIMIT_SUPPORT:
excluded_names.append('rlimit')
for name in dir(p):
if (name.startswith('_') or
name in excluded_names):
continue
try:
meth = getattr(p, name)
# get/set methods
if name == 'nice':
if POSIX:
meth(1)
else:
meth(psutil.NORMAL_PRIORITY_CLASS)
elif name == 'ionice':
meth()
meth(2)
elif name == 'rlimit':
meth(psutil.RLIMIT_NOFILE)
meth(psutil.RLIMIT_NOFILE, (5, 5))
elif name == 'cpu_affinity':
meth()
meth([0])
elif name == 'send_signal':
meth(signal.SIGTERM)
else:
meth()
except psutil.ZombieProcess:
self.fail("ZombieProcess for %r was not supposed to happen" %
name)
except psutil.NoSuchProcess:
pass
except NotImplementedError:
pass
else:
self.fail("NoSuchProcess exception not raised for %r" % name)
self.assertFalse(p.is_running())
@unittest.skipUnless(POSIX, 'posix only')
def test_zombie_process(self):
def succeed_or_zombie_p_exc(fun, *args, **kwargs):
try:
fun(*args, **kwargs)
except (psutil.ZombieProcess, psutil.AccessDenied):
pass
# Note: in this test we'll be creating two sub processes.
# Both of them are supposed to be freed / killed by
# reap_children() as they are attributable to 'us'
# (os.getpid()) via children(recursive=True).
src = textwrap.dedent("""\
import os, sys, time, socket, contextlib
child_pid = os.fork()
if child_pid > 0:
time.sleep(3000)
else:
# this is the zombie process
s = socket.socket(socket.AF_UNIX)
with contextlib.closing(s):
s.connect('%s')
if sys.version_info < (3, ):
pid = str(os.getpid())
else:
pid = bytes(str(os.getpid()), 'ascii')
s.sendall(pid)
""" % TESTFN)
with contextlib.closing(socket.socket(socket.AF_UNIX)) as sock:
try:
sock.settimeout(GLOBAL_TIMEOUT)
sock.bind(TESTFN)
sock.listen(1)
pyrun(src)
conn, _ = sock.accept()
select.select([conn.fileno()], [], [], GLOBAL_TIMEOUT)
zpid = int(conn.recv(1024))
zproc = psutil.Process(zpid)
call_until(lambda: zproc.status(),
"ret == psutil.STATUS_ZOMBIE")
# A zombie process should always be instantiable
zproc = psutil.Process(zpid)
# ...and at least its status always be querable
self.assertEqual(zproc.status(), psutil.STATUS_ZOMBIE)
# ...and it should be considered 'running'
self.assertTrue(zproc.is_running())
# ...and as_dict() shouldn't crash
zproc.as_dict()
if hasattr(zproc, "rlimit"):
succeed_or_zombie_p_exc(zproc.rlimit, psutil.RLIMIT_NOFILE)
succeed_or_zombie_p_exc(zproc.rlimit, psutil.RLIMIT_NOFILE,
(5, 5))
# set methods
succeed_or_zombie_p_exc(zproc.parent)
if hasattr(zproc, 'cpu_affinity'):
succeed_or_zombie_p_exc(zproc.cpu_affinity, [0])
succeed_or_zombie_p_exc(zproc.nice, 0)
if hasattr(zproc, 'ionice'):
if LINUX:
succeed_or_zombie_p_exc(zproc.ionice, 2, 0)
else:
succeed_or_zombie_p_exc(zproc.ionice, 0) # Windows
if hasattr(zproc, 'rlimit'):
succeed_or_zombie_p_exc(zproc.rlimit,
psutil.RLIMIT_NOFILE, (5, 5))
succeed_or_zombie_p_exc(zproc.suspend)
succeed_or_zombie_p_exc(zproc.resume)
succeed_or_zombie_p_exc(zproc.terminate)
succeed_or_zombie_p_exc(zproc.kill)
# ...its parent should 'see' it
# edit: not true on BSD and OSX
# descendants = [x.pid for x in psutil.Process().children(
# recursive=True)]
# self.assertIn(zpid, descendants)
# XXX should we also assume ppid be usable? Note: this
# would be an important use case as the only way to get
# rid of a zombie is to | |
TX'},
'1512407':{'en': 'Austin, TX'},
'1512414':{'en': 'Austin, TX'},
'1512416':{'en': 'Austin, TX'},
'1512418':{'en': 'Austin, TX'},
'1512419':{'en': 'Austin, TX'},
'1512420':{'en': 'Austin, TX'},
'151244':{'en': 'Austin, TX'},
'1512446':{'en': 'Rockdale, TX'},
'151245':{'en': 'Austin, TX'},
'1512462':{'en': 'Austin, TX'},
'1512463':{'en': 'Austin, TX'},
'1512467':{'en': 'Austin, TX'},
'1512469':{'en': 'Austin, TX'},
'151247':{'en': 'Austin, TX'},
'1512480':{'en': 'Austin, TX'},
'1512481':{'en': 'Austin, TX'},
'1512482':{'en': 'Austin, TX'},
'1512485':{'en': 'Austin, TX'},
'1512491':{'en': 'Austin, TX'},
'1512495':{'en': 'Austin, TX'},
'1512499':{'en': 'Austin, TX'},
'1512502':{'en': 'Austin, TX'},
'1512505':{'en': 'Austin, TX'},
'1512506':{'en': 'Austin, TX'},
'1512509':{'en': 'Round Rock, TX'},
'1512515':{'en': 'Liberty Hill, TX'},
'1512524':{'en': 'Austin, TX'},
'1512527':{'en': 'Austin, TX'},
'1512533':{'en': 'Austin, TX'},
'1512542':{'en': 'Austin, TX'},
'1512551':{'en': 'Austin, TX'},
'1512556':{'en': 'Lampasas, TX'},
'1512581':{'en': 'Bastrop, TX'},
'1512610':{'en': 'Austin, TX'},
'1512614':{'en': 'Austin, TX'},
'1512615':{'en': 'Austin, TX'},
'1512617':{'en': 'Austin, TX'},
'1512628':{'en': 'Austin, TX'},
'1512637':{'en': 'Austin, TX'},
'1512651':{'en': 'Austin, TX'},
'1512671':{'en': 'Round Rock, TX'},
'1512707':{'en': 'Austin, TX'},
'1512708':{'en': 'Austin, TX'},
'1512715':{'en': 'Burnet, TX'},
'1512719':{'en': 'Austin, TX'},
'1512732':{'en': 'Austin, TX'},
'1512746':{'en': 'Jarrell, TX'},
'1512754':{'en': 'San Marcos, TX'},
'1512756':{'en': 'Burnet, TX'},
'1512759':{'en': 'Hutto, TX'},
'1512763':{'en': 'Georgetown, TX'},
'1512778':{'en': 'Liberty Hill, TX'},
'1512794':{'en': 'Austin, TX'},
'1512795':{'en': 'Austin, TX'},
'1512804':{'en': 'Austin, TX'},
'1512805':{'en': 'San Marcos, TX'},
'1512819':{'en': 'Georgetown, TX'},
'1512821':{'en': 'Austin, TX'},
'151283':{'en': 'Austin, TX'},
'1512846':{'en': 'Hutto, TX'},
'1512847':{'en': 'Wimberley, TX'},
'1512854':{'en': 'Austin, TX'},
'1512858':{'en': 'Dripping Springs, TX'},
'1512863':{'en': 'Georgetown, TX'},
'1512864':{'en': 'Georgetown, TX'},
'1512868':{'en': 'Georgetown, TX'},
'1512869':{'en': 'Georgetown, TX'},
'1512873':{'en': 'Austin, TX'},
'1512878':{'en': 'San Marcos, TX'},
'1512891':{'en': 'Austin, TX'},
'1512892':{'en': 'Austin, TX'},
'1512894':{'en': 'Dripping Springs, TX'},
'1512899':{'en': 'Austin, TX'},
'1512901':{'en': 'Austin, TX'},
'1512912':{'en': 'Austin, TX'},
'1512916':{'en': 'Austin, TX'},
'1512918':{'en': 'Austin, TX'},
'1512926':{'en': 'Austin, TX'},
'1512928':{'en': 'Austin, TX'},
'1512929':{'en': 'Austin, TX'},
'1512930':{'en': 'Georgetown, TX'},
'1512931':{'en': 'Georgetown, TX'},
'1512943':{'en': 'Georgetown, TX'},
'1512973':{'en': 'Austin, TX'},
'1512974':{'en': 'Austin, TX'},
'1513':{'en': 'Ohio'},
'1513202':{'en': 'Harrison, OH'},
'1513204':{'en': 'Mason, OH'},
'1513217':{'en': 'Middletown, OH'},
'1513221':{'en': 'Cincinnati, OH'},
'1513228':{'en': 'Lebanon, OH'},
'1513229':{'en': 'Mason, OH'},
'1513231':{'en': 'Cincinnati, OH'},
'1513232':{'en': 'Cincinnati, OH'},
'1513233':{'en': 'Cincinnati, OH'},
'1513234':{'en': 'Mason, OH'},
'1513241':{'en': 'Cincinnati, OH'},
'1513242':{'en': 'Cincinnati, OH'},
'1513244':{'en': 'Cincinnati, OH'},
'1513245':{'en': 'Cincinnati, OH'},
'1513247':{'en': 'Cincinnati, OH'},
'1513248':{'en': 'Milford, OH'},
'1513251':{'en': 'Cincinnati, OH'},
'1513271':{'en': 'Cincinnati, OH'},
'1513272':{'en': 'Cincinnati, OH'},
'1513281':{'en': 'Cincinnati, OH'},
'1513285':{'en': 'Hamilton, OH'},
'1513321':{'en': 'Cincinnati, OH'},
'1513333':{'en': 'Cincinnati, OH'},
'1513336':{'en': 'Mason, OH'},
'1513346':{'en': 'Cincinnati, OH'},
'1513347':{'en': 'Cincinnati, OH'},
'1513351':{'en': 'Cincinnati, OH'},
'1513352':{'en': 'Cincinnati, OH'},
'1513353':{'en': 'Cleves, OH'},
'1513354':{'en': 'Cincinnati, OH'},
'1513357':{'en': 'Cincinnati, OH'},
'1513360':{'en': 'Monroe, OH'},
'1513367':{'en': 'Harrison, OH'},
'1513376':{'en': 'Cincinnati, OH'},
'1513381':{'en': 'Cincinnati, OH'},
'1513385':{'en': 'Cincinnati, OH'},
'1513389':{'en': 'Cincinnati, OH'},
'1513398':{'en': 'Mason, OH'},
'1513407':{'en': 'Cincinnati, OH'},
'1513420':{'en': 'Middletown, OH'},
'1513421':{'en': 'Cincinnati, OH'},
'1513422':{'en': 'Middletown, OH'},
'1513423':{'en': 'Middletown, OH'},
'1513424':{'en': 'Middletown, OH'},
'1513425':{'en': 'Middletown, OH'},
'1513451':{'en': 'Cincinnati, OH'},
'1513459':{'en': 'Mason, OH'},
'1513469':{'en': 'Cincinnati, OH'},
'1513471':{'en': 'Cincinnati, OH'},
'1513474':{'en': 'Cincinnati, OH'},
'1513475':{'en': 'Cincinnati, OH'},
'1513481':{'en': 'Cincinnati, OH'},
'1513489':{'en': 'Cincinnati, OH'},
'1513521':{'en': 'Cincinnati, OH'},
'1513522':{'en': 'Cincinnati, OH'},
'1513523':{'en': 'Oxford, OH'},
'1513524':{'en': 'Oxford, OH'},
'1513528':{'en': 'Cincinnati, OH'},
'1513530':{'en': 'Cincinnati, OH'},
'1513531':{'en': 'Cincinnati, OH'},
'1513533':{'en': 'Cincinnati, OH'},
'1513539':{'en': 'Monroe, OH'},
'1513541':{'en': 'Cincinnati, OH'},
'1513542':{'en': 'Cincinnati, OH'},
'1513553':{'en': 'New Richmond, OH'},
'1513554':{'en': 'Cincinnati, OH'},
'1513557':{'en': 'Cincinnati, OH'},
'1513558':{'en': 'Cincinnati, OH'},
'1513559':{'en': 'Cincinnati, OH'},
'1513561':{'en': 'Cincinnati, OH'},
'1513563':{'en': 'Cincinnati, OH'},
'1513574':{'en': 'Cincinnati, OH'},
'1513575':{'en': 'Milford, OH'},
'1513576':{'en': 'Milford, OH'},
'1513579':{'en': 'Cincinnati, OH'},
'1513584':{'en': 'Cincinnati, OH'},
'1513585':{'en': 'Cincinnati, OH'},
'1513598':{'en': 'Cincinnati, OH'},
'1513621':{'en': 'Cincinnati, OH'},
'1513624':{'en': 'Cincinnati, OH'},
'1513631':{'en': 'Cincinnati, OH'},
'1513636':{'en': 'Cincinnati, OH'},
'1513641':{'en': 'Cincinnati, OH'},
'1513651':{'en': 'Cincinnati, OH'},
'1513661':{'en': 'Cincinnati, OH'},
'1513662':{'en': 'Cincinnati, OH'},
'1513671':{'en': 'Cincinnati, OH'},
'1513672':{'en': 'Cincinnati, OH'},
'1513674':{'en': 'Cincinnati, OH'},
'1513681':{'en': 'Cincinnati, OH'},
'1513683':{'en': 'Loveland, OH'},
'1513686':{'en': 'Cincinnati, OH'},
'1513721':{'en': 'Cincinnati, OH'},
'1513724':{'en': 'Williamsburg, OH'},
'1513727':{'en': 'Middletown, OH'},
'1513729':{'en': 'Cincinnati, OH'},
'1513731':{'en': 'Cincinnati, OH'},
'1513732':{'en': 'Batavia, OH'},
'1513733':{'en': 'Cincinnati, OH'},
'1513734':{'en': 'Bethel, OH'},
'1513735':{'en': 'Batavia, OH'},
'1513737':{'en': 'Hamilton, OH'},
'1513741':{'en': 'Cincinnati, OH'},
'1513742':{'en': 'Cincinnati, OH'},
'1513745':{'en': 'Cincinnati, OH'},
'1513751':{'en': 'Cincinnati, OH'},
'1513752':{'en': 'Cincinnati, OH'},
'1513754':{'en': 'Mason, OH'},
'1513755':{'en': 'West Chester, OH'},
'1513759':{'en': 'West Chester, OH'},
'1513761':{'en': 'Cincinnati, OH'},
'1513762':{'en': 'Cincinnati, OH'},
'1513769':{'en': 'Cincinnati, OH'},
'1513770':{'en': 'Mason, OH'},
'1513771':{'en': 'Cincinnati, OH'},
'1513772':{'en': 'Cincinnati, OH'},
'1513777':{'en': 'West Chester, OH'},
'1513779':{'en': 'West Chester, OH'},
'1513785':{'en': 'Hamilton, OH'},
'1513791':{'en': 'Cincinnati, OH'},
'1513792':{'en': 'Cincinnati, OH'},
'1513793':{'en': 'Cincinnati, OH'},
'1513794':{'en': 'Cincinnati, OH'},
'1513797':{'en': 'Amelia, OH'},
'1513821':{'en': 'Cincinnati, OH'},
'1513825':{'en': 'Cincinnati, OH'},
'1513829':{'en': 'Fairfield, OH'},
'1513831':{'en': 'Milford, OH'},
'1513834':{'en': 'Cincinnati, OH'},
'1513841':{'en': 'Cincinnati, OH'},
'1513844':{'en': 'Hamilton, OH'},
'1513851':{'en': 'Cincinnati, OH'},
'1513852':{'en': 'Cincinnati, OH'},
'1513858':{'en': 'Fairfield, OH'},
'1513861':{'en': 'Cincinnati, OH'},
'1513862':{'en': 'Cincinnati, OH'},
'1513863':{'en': 'Hamilton, OH'},
'1513867':{'en': 'Hamilton, OH'},
'1513868':{'en': 'Hamilton, OH'},
'1513871':{'en': 'Cincinnati, OH'},
'1513872':{'en': 'Cincinnati, OH'},
'1513875':{'en': 'Fayetteville, OH'},
'1513876':{'en': 'Felicity, OH'},
'1513887':{'en': 'Hamilton, OH'},
'1513891':{'en': 'Cincinnati, OH'},
'1513892':{'en': 'Hamilton, OH'},
'1513893':{'en': 'Hamilton, OH'},
'1513894':{'en': 'Hamilton, OH'},
'1513895':{'en': 'Hamilton, OH'},
'1513896':{'en': 'Hamilton, OH'},
'1513897':{'en': 'Waynesville, OH'},
'1513899':{'en': 'Morrow, OH'},
'1513921':{'en': 'Cincinnati, OH'},
'1513922':{'en': 'Cincinnati, OH'},
'1513923':{'en': 'Cincinnati, OH'},
'1513931':{'en': 'Cincinnati, OH'},
'1513932':{'en': 'Lebanon, OH'},
'1513933':{'en': 'Lebanon, OH'},
'1513934':{'en': 'Lebanon, OH'},
'1513939':{'en': 'Fairfield, OH'},
'1513941':{'en': 'Cincinnati, OH'},
'1513943':{'en': 'Cincinnati, OH'},
'1513947':{'en': 'Cincinnati, OH'},
'1513948':{'en': 'Cincinnati, OH'},
'1513961':{'en': 'Cincinnati, OH'},
'1513965':{'en': 'Milford, OH'},
'1513977':{'en': 'Cincinnati, OH'},
'1513984':{'en': 'Cincinnati, OH'},
'1513985':{'en': 'Cincinnati, OH'},
'1513988':{'en': 'Trenton, OH'},
'1514':{'en': 'Quebec'},
'1514223':{'en': 'Montreal, QC'},
'1514227':{'en': 'Montreal, QC'},
'151425':{'en': 'Montreal, QC'},
'151427':{'en': 'Montreal, QC'},
'151428':{'en': 'Montreal, QC'},
'1514303':{'en': 'Montreal, QC'},
'1514313':{'en': 'Montreal, QC'},
'1514315':{'en': 'Montreal, QC'},
'1514333':{'en': 'Saint-Laurent, QC'},
'1514335':{'en': 'Saint-Laurent, QC'},
'1514339':{'en': 'Saint-Laurent, QC'},
'1514340':{'en': 'Montreal, QC'},
'1514342':{'en': 'Montreal, QC'},
'1514345':{'en': 'Montreal, QC'},
'151436':{'en': 'Lasalle, QC'},
'1514369':{'en': 'Montreal, QC'},
'1514374':{'en': 'Montreal, QC'},
'1514376':{'en': 'Montreal, QC'},
'151438':{'en': 'Montreal, QC'},
'151439':{'en': 'Montreal, QC'},
'1514419':{'en': 'Montreal, QC'},
'1514422':{'en': 'Dorval, QC'},
'1514426':{'en': 'Pointe-Claire, QC'},
'1514428':{'en': 'Pointe-Claire, QC'},
'1514439':{'en': 'Montreal, QC'},
'1514461':{'en': 'Montreal, QC'},
'151448':{'en': 'Montreal, QC'},
'1514495':{'en': 'Montreal, QC'},
'1514498':{'en': 'Pointe-aux-Trembles, QC'},
'1514499':{'en': 'Montreal, QC'},
'1514504':{'en': 'Montreal, QC'},
'1514507':{'en': 'Montreal, QC'},
'1514508':{'en': 'Montreal, QC'},
'1514509':{'en': 'Montreal, QC'},
'1514510':{'en': 'Montreal, QC'},
'151452':{'en': 'Montreal, QC'},
'1514543':{'en': 'Montreal, QC'},
'1514544':{'en': 'Montreal, QC'},
'1514564':{'en': 'Montreal, QC'},
'1514593':{'en': 'Montreal, QC'},
'1514595':{'en': 'Lasalle, QC'},
'1514596':{'en': 'Montreal, QC'},
'1514598':{'en': 'Montreal, QC'},
'1514630':{'en': 'Pointe-Claire, QC'},
'1514631':{'en': 'Dorval, QC'},
'1514633':{'en': 'Dorval, QC'},
'1514634':{'en': 'Lachine, QC'},
'1514636':{'en': 'Dorval, QC'},
'1514637':{'en': 'Lachine, QC'},
'1514639':{'en': 'Lachine, QC'},
'1514642':{'en': 'Pointe-aux-Trembles, QC'},
'1514658':{'en': 'Montreal, QC'},
'1514670':{'en': 'Montreal, QC'},
'1514678':{'en': 'Montreal, QC'},
'1514694':{'en': 'Pointe-Claire, QC'},
'1514695':{'en': 'Pointe-Claire, QC'},
'1514697':{'en': 'Pointe-Claire, QC'},
'151472':{'en': 'Montreal, QC'},
'151473':{'en': 'Montreal, QC'},
'1514744':{'en': 'Saint-Laurent, QC'},
'1514747':{'en': 'Saint-Laurent, QC'},
'1514748':{'en': 'Saint-Laurent, QC'},
'1514750':{'en': 'Montreal, QC'},
'1514759':{'en': 'Montreal, QC'},
'1514761':{'en': 'Verdun, QC'},
'1514788':{'en': 'Montreal, QC'},
'1514789':{'en': 'Montreal, QC'},
'1514798':{'en': 'Montreal, QC'},
'1514807':{'en': 'Montreal, QC'},
'151484':{'en': 'Montreal, QC'},
'1514855':{'en': 'Saint-Laurent, QC'},
'1514858':{'en': 'Montreal, QC'},
'1514861':{'en': 'Montreal, QC'},
'1514866':{'en': 'Montreal, QC'},
'1514868':{'en': 'Montreal, QC'},
'151487':{'en': 'Montreal, QC'},
'1514899':{'en': 'Montreal, QC'},
'1514903':{'en': 'Montreal, QC'},
'1514904':{'en': 'Montreal, QC'},
'1514905':{'en': 'Montreal, QC'},
'1514906':{'en': 'Montreal, QC'},
'1514908':{'en': 'Montreal, QC'},
'151493':{'en': 'Montreal, QC'},
'1514940':{'en': 'Montreal, QC'},
'1514948':{'en': 'Montreal, QC'},
'1514954':{'en': 'Montreal, QC'},
'1514956':{'en': 'Saint-Laurent, QC'},
'1514982':{'en': 'Montreal, QC'},
'1514985':{'en': 'Montreal, QC'},
'1514987':{'en': 'Montreal, QC'},
'1514989':{'en': 'Montreal, QC'},
'1515':{'en': 'Iowa'},
'1515221':{'en': 'West Des Moines, IA'},
'1515222':{'en': 'West Des Moines, IA'},
'1515223':{'en': 'West Des Moines, IA'},
'1515224':{'en': 'West Des Moines, IA'},
'1515225':{'en': 'West Des Moines, IA'},
'1515232':{'en': 'Ames, IA'},
'1515233':{'en': 'Ames, IA'},
'1515237':{'en': 'Des Moines, IA'},
'1515239':{'en': 'Ames, IA'},
'151524':{'en': 'Des Moines, IA'},
'1515255':{'en': 'Des Moines, IA'},
'1515256':{'en': 'Des Moines, IA'},
'1515262':{'en': 'Des Moines, IA'},
'1515263':{'en': 'Des Moines, IA'},
'1515264':{'en': 'Des Moines, IA'},
'1515265':{'en': 'Des Moines, IA'},
'1515266':{'en': 'Des Moines, IA'},
'1515267':{'en': 'West Des Moines, IA'},
'1515271':{'en': 'Des Moines, IA'},
'1515274':{'en': 'Des Moines, IA'},
'1515275':{'en': 'Ogden, IA'},
'1515277':{'en': 'Des Moines, IA'},
'1515279':{'en': 'Des Moines, IA'},
'151528':{'en': 'Des Moines, IA'},
'1515289':{'en': 'Ankeny, IA'},
'1515292':{'en': 'Ames, IA'},
'1515294':{'en': 'Ames, IA'},
'1515295':{'en': 'Algona, IA'},
'1515327':{'en': 'West Des Moines, IA'},
'1515332':{'en': 'Humboldt, IA'},
'1515352':{'en': 'Gowrie, IA'},
'1515382':{'en': 'Nevada, IA'},
'1515386':{'en': 'Jefferson, IA'},
'1515432':{'en': 'Boone, IA'},
'1515433':{'en': 'Boone, IA'},
'1515440':{'en': 'West Des Moines, IA'},
'1515448':{'en': 'Eagle Grove, IA'},
'1515453':{'en': 'West Des Moines, IA'},
'1515457':{'en': 'West Des Moines, IA'},
'1515462':{'en': 'Winterset, IA'},
'1515465':{'en': 'Perry, IA'},
'1515523':{'en': 'Stuart, IA'},
'1515532':{'en': 'Clarion, IA'},
'1515573':{'en': 'Fort Dodge, IA'},
'1515574':{'en': 'Fort Dodge, IA'},
'1515576':{'en': 'Fort Dodge, IA'},
'1515597':{'en': 'Huxley, IA'},
'1515643':{'en': 'Des Moines, IA'},
'1515674':{'en': 'Colfax, IA'},
'1515699':{'en': 'Des Moines, IA'},
'1515733':{'en': 'Story City, IA'},
'1515795':{'en': 'Madrid, IA'},
'1515832':{'en': 'Webster City, IA'},
'1515885':{'en': 'Bancroft, IA'},
'1515887':{'en': 'West Bend, IA'},
'1515953':{'en': 'Des Moines, IA'},
'1515955':{'en': 'Fort Dodge, IA'},
'1515957':{'en': 'Altoona, IA'},
'1515961':{'en': 'Indianola, | |
'node': self._primary_key,
'description_new': self.description,
'description_original': original
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def update_search(self):
from website import search
try:
search.search.update_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def remove_node(self, auth, date=None):
"""Marks a node as deleted.
TODO: Call a hook on addons
Adds a log to the parent node if applicable
:param auth: an instance of :class:`Auth`.
:param date: Date node was removed
:type date: `datetime.datetime` or `None`
"""
# TODO: rename "date" param - it's shadowing a global
if self.is_dashboard:
raise NodeStateError("Dashboards may not be deleted.")
if not self.can_edit(auth):
raise PermissionsError('{0!r} does not have permission to modify this {1}'.format(auth.user, self.category or 'node'))
#if this is a folder, remove all the folders that this is pointing at.
if self.is_folder:
for pointed in self.nodes_pointer:
if pointed.node.is_folder:
pointed.node.remove_node(auth=auth)
if [x for x in self.nodes_primary if not x.is_deleted]:
raise NodeStateError("Any child components must be deleted prior to deleting this project.")
# After delete callback
for addon in self.get_addons():
message = addon.after_delete(self, auth.user)
if message:
status.push_status_message(message)
log_date = date or datetime.datetime.utcnow()
# Add log to parent
if self.node__parent:
self.node__parent[0].add_log(
NodeLog.NODE_REMOVED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
else:
self.add_log(
NodeLog.PROJECT_DELETED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
self.is_deleted = True
self.deleted_date = date
self.save()
auth_signals.node_deleted.send(self)
return True
def fork_node(self, auth, title='Fork of '):
"""Recursively fork a node.
:param Auth auth: Consolidated authorization
:param str title: Optional text to prepend to forked title
:return: Forked node
"""
user = auth.user
# Non-contributors can't fork private nodes
if not (self.is_public or self.has_permission(user, 'read')):
raise PermissionsError('{0!r} does not have permission to fork node {1!r}'.format(user, self._id))
folder_old = os.path.join(settings.UPLOADS_PATH, self._primary_key)
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
# Note: Cloning a node copies its `files_current` and
# `wiki_pages_current` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its file and wiki objects to
# build the correct URLs to that content.
forked = original.clone()
forked.logs = self.logs
forked.tags = self.tags
# Recursively fork child nodes
for node_contained in original.nodes:
forked_node = None
try: # Catch the potential PermissionsError above
forked_node = node_contained.fork_node(auth=auth, title='')
except PermissionsError:
pass # If this exception is thrown omit the node from the result set
if forked_node is not None:
forked.nodes.append(forked_node)
forked.title = title + forked.title
forked.is_fork = True
forked.is_registration = False
forked.forked_date = when
forked.forked_from = original
forked.creator = user
forked.piwik_site_id = None
# Forks default to private status
forked.is_public = False
# Clear permissions before adding users
forked.permissions = {}
forked.visible_contributor_ids = []
forked.add_contributor(contributor=user, log=False, save=False)
forked.add_log(
action=NodeLog.NODE_FORKED,
params={
'project': original.parent_id,
'node': original._primary_key,
'registration': forked._primary_key,
},
auth=auth,
log_date=when,
save=False,
)
forked.save()
# After fork callback
for addon in original.get_addons():
_, message = addon.after_fork(original, forked, user)
if message:
status.push_status_message(message)
# TODO: Remove after migration to OSF Storage
if settings.COPY_GIT_REPOS and os.path.exists(folder_old):
folder_new = os.path.join(settings.UPLOADS_PATH, forked._primary_key)
Repo(folder_old).clone(folder_new)
return forked
def register_node(self, schema, auth, template, data):
"""Make a frozen copy of a node.
:param schema: Schema object
:param auth: All the auth information including user, API key.
:template: Template name
:data: Form data
"""
# NOTE: Admins can register child nodes even if they don't have write access them
if not self.can_edit(auth=auth) and not self.is_admin_parent(user=auth.user):
raise PermissionsError(
'User {} does not have permission '
'to register this node'.format(auth.user._id)
)
if self.is_folder:
raise NodeStateError("Folders may not be registered")
folder_old = os.path.join(settings.UPLOADS_PATH, self._primary_key)
template = urllib.unquote_plus(template)
template = to_mongo(template)
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
# Note: Cloning a node copies its `files_current` and
# `wiki_pages_current` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its file and wiki objects to
# build the correct URLs to that content.
registered = original.clone()
registered.is_registration = True
registered.registered_date = when
registered.registered_user = auth.user
registered.registered_schema = schema
registered.registered_from = original
if not registered.registered_meta:
registered.registered_meta = {}
registered.registered_meta[template] = data
registered.contributors = self.contributors
registered.forked_from = self.forked_from
registered.creator = self.creator
registered.logs = self.logs
registered.tags = self.tags
registered.piwik_site_id = None
registered.save()
# After register callback
for addon in original.get_addons():
_, message = addon.after_register(original, registered, auth.user)
if message:
status.push_status_message(message)
# TODO: Remove after migration to OSF Storage
if settings.COPY_GIT_REPOS and os.path.exists(folder_old):
folder_new = os.path.join(settings.UPLOADS_PATH, registered._primary_key)
Repo(folder_old).clone(folder_new)
registered.nodes = []
for node_contained in original.nodes:
registered_node = node_contained.register_node(
schema, auth, template, data
)
if registered_node is not None:
registered.nodes.append(registered_node)
original.add_log(
action=NodeLog.PROJECT_REGISTERED,
params={
'project': original.parent_id,
'node': original._primary_key,
'registration': registered._primary_key,
},
auth=auth,
log_date=when,
save=False,
)
original.save()
registered.save()
for node in registered.nodes:
node.update_search()
return registered
def remove_tag(self, tag, auth, save=True):
if tag in self.tags:
self.tags.remove(tag)
self.add_log(
action=NodeLog.TAG_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_tag(self, tag, auth, save=True):
if tag not in self.tags:
new_tag = Tag.load(tag)
if not new_tag:
new_tag = Tag(_id=tag)
new_tag.save()
self.tags.append(new_tag)
self.add_log(
action=NodeLog.TAG_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
# TODO: Move to NodeFile
def read_file_object(self, file_object):
folder_name = os.path.join(settings.UPLOADS_PATH, self._primary_key)
repo = Repo(folder_name)
tree = repo.commit(file_object.git_commit).tree
mode, sha = tree_lookup_path(repo.get_object, tree, file_object.path)
return repo[sha].data, file_object.content_type
def get_file(self, path, version):
#folder_name = os.path.join(settings.UPLOADS_PATH, self._primary_key)
file_object = self.get_file_object(path, version)
return self.read_file_object(file_object)
def get_file_object(self, path, version=None):
"""Return the :class:`NodeFile` object at the given path.
:param str path: Path to the file.
:param int version: Version number, 0-indexed.
"""
# TODO: Fix circular imports
from website.addons.osffiles.model import NodeFile
from website.addons.osffiles.exceptions import (
InvalidVersionError,
VersionNotFoundError,
)
folder_name = os.path.join(settings.UPLOADS_PATH, self._primary_key)
err_msg = 'Upload directory is not a git repo'
assert os.path.exists(os.path.join(folder_name, ".git")), err_msg
try:
file_versions = self.files_versions[path.replace('.', '_')]
# Default to latest version
version = version if version is not None else len(file_versions) - 1
except (AttributeError, KeyError):
raise ValueError('Invalid path: {}'.format(path))
if version < 0:
raise InvalidVersionError('Version number must be >= 0.')
try:
file_id = file_versions[version]
except IndexError:
raise VersionNotFoundError('Invalid version number: {}'.format(version))
except TypeError:
raise InvalidVersionError('Invalid version type. Version number'
'must be an integer >= 0.')
return NodeFile.load(file_id)
def remove_file(self, auth, path):
'''Removes a file from the filesystem, NodeFile collection, and does a git delete ('git rm <file>')
:param auth: All the auth informtion including user, API key.
:param path:
:raises: website.osffiles.exceptions.FileNotFoundError if file is not found.
'''
from website.addons.osffiles.model import NodeFile
from website.addons.osffiles.exceptions import FileNotFoundError
from website.addons.osffiles.utils import urlsafe_filename
file_name_key = urlsafe_filename(path)
repo_path = os.path.join(settings.UPLOADS_PATH, self._primary_key)
# TODO make sure it all works, otherwise rollback as needed
# Do a git delete, which also removes from working filesystem.
try:
subprocess.check_output(
['git', 'rm', path],
cwd=repo_path,
shell=False
)
repo = Repo(repo_path)
message = '{path} deleted'.format(path=path)
committer = self._get_committer(auth)
repo.do_commit(message, committer)
except subprocess.CalledProcessError as error:
if error.returncode == 128:
raise FileNotFoundError('File {0!r} was not found'.format(path))
raise
if file_name_key in self.files_current:
nf = NodeFile.load(self.files_current[file_name_key])
nf.is_deleted = True
nf.save()
self.files_current.pop(file_name_key, None)
if file_name_key in self.files_versions:
for i in self.files_versions[file_name_key]:
nf = NodeFile.load(i)
nf.is_deleted = True
nf.save()
self.files_versions.pop(file_name_key)
self.add_log(
action=NodeLog.FILE_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'path': path
},
auth=auth,
log_date=nf.date_modified,
save=False,
)
# Updates self.date_modified
self.save()
@staticmethod
def _get_committer(auth):
user = auth.user
api_key = auth.api_key
if api_key:
commit_key_msg = ':{}'.format(api_key.label)
if api_key.user:
commit_name = api_key.user.fullname
commit_id = api_key.user._primary_key
commit_category = 'user'
if api_key.node:
commit_name = api_key.node.title
commit_id = api_key.node._primary_key
commit_category = 'node'
elif user:
commit_key_msg = ''
commit_name = user.fullname
commit_id = user._primary_key
commit_category = 'user'
else:
raise Exception('Must provide either user or api_key.')
committer = u'{name}{key_msg} <{category}-{id}@osf.io>'.format(
name=commit_name,
key_msg=commit_key_msg,
category=commit_category,
id=commit_id,
)
committer = normalize_unicode(committer)
return committer
def add_file(self, auth, file_name, content, size, content_type):
"""
Instantiates a new NodeFile object, and adds it to the current Node as
necessary.
"""
from website.addons.osffiles.model import NodeFile
from website.addons.osffiles.exceptions import FileNotModified
# TODO: Reading the whole file into memory is not scalable. Fix this.
# This node's folder
folder_name = os.path.join(settings.UPLOADS_PATH, self._primary_key)
# TODO: This should be part of the build phase, not here.
# verify the upload root exists
if not | |
<filename>dse_simulation/test/test_information_filter.py
#!/usr/bin/env python
from __future__ import print_function
import roslib
import os
import sys
import unittest
import rospy
import rostest
from optparse import OptionParser
import numpy as np
import datetime
import time
from dse_msgs.msg import PoseMarkers
from std_msgs.msg import Float64MultiArray
from std_msgs.msg import MultiArrayLayout
from std_msgs.msg import MultiArrayDimension
from dse_msgs.msg import InfFilterPartials
from dse_msgs.msg import InfFilterResults
from scipy.spatial.transform import Rotation as R
import copy
sys.path.append(os.path.join(sys.path[0], "../src"))
import dse_lib
import consensus_lib
PKG = 'dse_simulation'
roslib.load_manifest(PKG)
##############################################################################
##############################################################################
class TestInformationFilterCommon(unittest.TestCase):
##############################################################################
##############################################################################
# def set_up(self):
##############################################################################
def __init__(self, *args):
##############################################################################
# rospy.loginfo("-D- TestRangeFilter __init__")
# super(TestRangeFilterCommon, self).__init__(*args)
self.set_up()
super(TestInformationFilterCommon, self).__init__(*args)
##############################################################################
def set_up(self):
##############################################################################
rospy.init_node("test_observation_jacobian")
# self.coefficient = rospy.get_param("range_filter/coefficient", 266)
# self.exponent = rospy.get_param("range_filter/exponent", -1.31)
# self.rolling_pts = rospy.get_param("range_filter/rolling_pts", 4)
self.test_rate = rospy.get_param("~test_rate", 100)
self.results_sub = rospy.Subscriber("/tb3_0/dse/inf/results", InfFilterResults, self.estimator_results_callback)
self.inf_pub = rospy.Publisher("/tb3_0/dse/inf/partial", InfFilterPartials, queue_size=10)
# self.latest_filtered = 1e10
# self.latest_std = 2e10
self.dim_state = 6
self.dim_obs = 3
self.euler_order = 'zyx'
self.got_callback = False
##############################################################################
def send_poses(self, poses, rate):
##############################################################################
r = rospy.Rate(rate)
# rospy.loginfo("-D- sendmsgs: sending %s" % str(msgs))
for pose in poses:
rospy.loginfo("-D- publishing %d" % pose)
self.pose_pub.publish()
r.sleep()
# When the information filter sends back results, store them locally
def information_callback(self, data):
rospy.loginfo("-D- information_filter.py sent back data")
inf_id_list = data.ids
self.inf_Y_prior = dse_lib.multi_array_2d_output(data.inf_matrix_prior)
self.inf_y_prior = dse_lib.multi_array_2d_output(data.inf_vector_prior)
self.inf_I = dse_lib.multi_array_2d_output(data.obs_matrix)
self.inf_i = dse_lib.multi_array_2d_output(data.obs_vector)
# When the direct estimator or consensus returns the combined information variables
def estimator_results_callback(self, data):
rospy.loginfo("-D- information_filter.py sent back data")
self.inf_id_list = np.array(data.ids)
self.inf_Y = dse_lib.multi_array_2d_output(data.inf_matrix)
self.inf_y = dse_lib.multi_array_2d_output(data.inf_vector)
self.got_callback = True
##############################################################################
##############################################################################
class TestInformationFilterValid(TestInformationFilterCommon):
##############################################################################
##############################################################################
##############################################################################
def test_one_Equal_one(self):
##############################################################################
rospy.loginfo("-D- test_one_Equal_one")
self.assertEqual(1, 1, "1!=1")
def test_theta_2_rotm_zero(self):
##############################################################################
rospy.loginfo("-D- test_theta_2_rotm_0")
rotm = dse_lib.theta_2_rotm(0)
x_0 = np.transpose([1, 2])
x_rotm = rotm.dot(x_0)
x_true = x_0
self.assertEqual(True, np.allclose(x_true, x_rotm))
def test_theta_2_rotm_90(self):
##############################################################################
rospy.loginfo("-D- test_theta_2_rotm_0")
theta = 90
rotm = dse_lib.theta_2_rotm(theta * np.pi / 180.0)
x_0 = np.transpose([1, 2])
x_rotm = rotm.dot(x_0)
x_true = np.transpose([-2, 1])
self.assertEqual(True, np.allclose(x_true, x_rotm))
def test_theta_2_rotm_45(self):
##############################################################################
rospy.loginfo("-D- test_theta_2_rotm_0")
theta = 45
rotm = dse_lib.theta_2_rotm(theta * np.pi / 180.0)
x_0 = np.transpose([1, 1])
x_rotm = rotm.dot(x_0)
x_true = np.transpose([0, np.sqrt(2)])
self.assertEqual(True, np.allclose(x_true, x_rotm))
def test_to_frame_1(self):
##############################################################################
rospy.loginfo("-D- test_from_frame_1")
agent1_global = np.array([[0], [0], [np.pi]])
agent2_global = np.array([[1], [0], [0]])
agent2_in_frame_agent1_true = np.array([[-1], [0], [np.pi]])
agent1_in_frame_agent2_true = np.array([[-1], [0], [np.pi]])
agent2_in_frame_agent1_est = dse_lib.agent2_to_frame_agent1_3D(agent1_global, agent2_global)
agent1_in_frame_agent2_est = dse_lib.agent2_to_frame_agent1_3D(agent2_global, agent1_global)
if agent2_in_frame_agent1_est[2, 0] < 0:
agent2_in_frame_agent1_est[2, 0] += 2*np.pi
if agent1_in_frame_agent2_est[2, 0] < 0:
agent1_in_frame_agent2_est[2, 0] += 2*np.pi
self.assertEqual(True, np.allclose(agent2_in_frame_agent1_true, agent2_in_frame_agent1_est))
self.assertEqual(True, np.allclose(agent1_in_frame_agent2_true, agent1_in_frame_agent2_est))
def test_to_frame_2(self):
##############################################################################
rospy.loginfo("-D- test_from_frame_1")
agent1_global = np.array([[0], [0], [0]])
agent2_global = np.array([[-1], [1], [0]])
agent2_in_frame_agent1_true = np.array([[-1], [1], [0]])
agent1_in_frame_agent2_true = np.array([[1], [-1], [0]])
agent2_in_frame_agent1_est = dse_lib.agent2_to_frame_agent1_3D(agent1_global, agent2_global)
agent1_in_frame_agent2_est = dse_lib.agent2_to_frame_agent1_3D(agent2_global, agent1_global)
if agent2_in_frame_agent1_est[2, 0] < 0:
agent2_in_frame_agent1_est[2, 0] += 2*np.pi
if agent1_in_frame_agent2_est[2, 0] < 0:
agent1_in_frame_agent2_est[2, 0] += 2*np.pi
self.assertEqual(True, np.allclose(agent2_in_frame_agent1_true, agent2_in_frame_agent1_est))
self.assertEqual(True, np.allclose(agent1_in_frame_agent2_true, agent1_in_frame_agent2_est))
def test_to_frame_3(self):
##############################################################################
rospy.loginfo("-D- test_from_frame_1")
agent1_global = np.array([[0], [0], [np.pi]])
agent2_global = np.array([[1], [0], [np.pi/2]])
agent2_in_frame_agent1_true = np.array([[-1], [0], [3*np.pi/2]])
agent1_in_frame_agent2_true = np.array([[0], [1], [np.pi/2]])
agent2_in_frame_agent1_est = dse_lib.agent2_to_frame_agent1_3D(agent1_global, agent2_global)
agent1_in_frame_agent2_est = dse_lib.agent2_to_frame_agent1_3D(agent2_global, agent1_global)
if agent2_in_frame_agent1_est[2, 0] < 0:
agent2_in_frame_agent1_est[2, 0] += 2*np.pi
if agent1_in_frame_agent2_est[2, 0] < 0:
agent1_in_frame_agent2_est[2, 0] += 2*np.pi
self.assertEqual(True, np.allclose(agent2_in_frame_agent1_true, agent2_in_frame_agent1_est))
self.assertEqual(True, np.allclose(agent1_in_frame_agent2_true, agent1_in_frame_agent2_est))
def test_to_frame_4(self):
##############################################################################
rospy.loginfo("-D- test_from_frame_1")
agent1_global = np.array([[1], [1], [7/4.0*np.pi]])
agent2_global = np.array([[0.4], [-0.6], [5/4.0*np.pi]])
agent2_in_frame_agent1_true = np.array([[0.5*np.sqrt(2)], [-1.1*np.sqrt(2)], [3/2.0*np.pi]])
agent1_in_frame_agent2_true = np.array([[-1.1*np.sqrt(2)], [-0.5*np.sqrt(2)], [1/2.0*np.pi]])
agent2_in_frame_agent1_est = dse_lib.agent2_to_frame_agent1_3D(agent1_global, agent2_global)
agent1_in_frame_agent2_est = dse_lib.agent2_to_frame_agent1_3D(agent2_global, agent1_global)
if agent2_in_frame_agent1_est[2, 0] < 0:
agent2_in_frame_agent1_est[2, 0] += 2*np.pi
if agent1_in_frame_agent2_est[2, 0] < 0:
agent1_in_frame_agent2_est[2, 0] += 2*np.pi
self.assertEqual(True, np.allclose(agent2_in_frame_agent1_true, agent2_in_frame_agent1_est))
self.assertEqual(True, np.allclose(agent1_in_frame_agent2_true, agent1_in_frame_agent2_est))
def test_from_frame_0(self):
##############################################################################
rospy.loginfo("-D- test_from_frame_1")
agent1_global = np.array([[1], [1], [7/4.0*np.pi]])
agent2_global = np.array([[0.4], [-0.6], [5/4.0*np.pi]])
agent2_in_frame_agent1_true = np.array([[0.5*np.sqrt(2)], [-1.1*np.sqrt(2)], [3/2.0*np.pi]])
agent1_in_frame_agent2_true = np.array([[-1.1*np.sqrt(2)], [-0.5*np.sqrt(2)], [1/2.0*np.pi]])
agent2_in_frame_agent1_est = dse_lib.agent2_to_frame_agent1_3D(agent1_global, agent2_global)
agent2_global_est = dse_lib.agent2_from_frame_agent1_3D(agent1_global, agent2_in_frame_agent1_est)
if agent2_global_est[2, 0] < 0:
agent2_global_est[2, 0] += 2*np.pi
self.assertEqual(True, np.allclose(agent2_global, agent2_global_est))
def test_from_frame_1(self):
##############################################################################
rospy.loginfo("-D- test_from_frame_1")
# 1 is fixed, 2 is this, 3 is object
agent1_global = np.array([[1], [1], [7 / 4.0 * np.pi]])
agent2_global = np.array([[0.4], [-0.6], [5 / 4.0 * np.pi]])
agent3_global = np.array([[1], [0], [np.pi/2]])
agent1_in_frame_agent2 = dse_lib.agent2_to_frame_agent1_3D(agent2_global, agent1_global)
agent1_in_frame_agent3 = dse_lib.agent2_to_frame_agent1_3D(agent3_global, agent1_global)
agent2_in_frame_agent1 = dse_lib.agent2_to_frame_agent1_3D(agent1_global, agent2_global)
agent2_in_frame_agent3 = dse_lib.agent2_to_frame_agent1_3D(agent3_global, agent2_global)
agent3_in_frame_agent1 = dse_lib.agent2_to_frame_agent1_3D(agent1_global, agent3_global)
agent3_in_frame_agent2 = dse_lib.agent2_to_frame_agent1_3D(agent2_global, agent3_global)
z_true = agent3_in_frame_agent2
x = agent3_in_frame_agent1
agent2_global_est = dse_lib.agent2_from_frame_agent1_3D(agent1_global, agent2_in_frame_agent1)
agent3_global_est = dse_lib.agent2_from_frame_agent1_3D(agent1_global, agent3_in_frame_agent1)
z_est = dse_lib.agent2_to_frame_agent1_3D(agent2_global_est, agent3_global_est)
z_est_2 = dse_lib.agent2_to_frame_agent1_3D(agent2_in_frame_agent1, agent3_in_frame_agent1)
self.assertEqual(True, np.allclose(z_true, z_est))
self.assertEqual(True, np.allclose(z_true, z_est_2))
def test_dual_relative_obs_jacobian_3D_0(self):
##############################################################################
rospy.loginfo("-D- test_from_frame_1")
# 1 is fixed, 2 is this, 3 is object
agent1_global = np.array([[1], [1], [7 / 4.0 * np.pi]])
agent2_global = np.array([[0.4], [-0.6], [5 / 4.0 * np.pi]])
agent3_global = np.array([[1.5], [0321], [np.pi/2]])
agent1_in_frame_agent2 = dse_lib.agent2_to_frame_agent1_3D(agent2_global, agent1_global)
agent1_in_frame_agent3 = dse_lib.agent2_to_frame_agent1_3D(agent3_global, agent1_global)
agent2_in_frame_agent1 = dse_lib.agent2_to_frame_agent1_3D(agent1_global, agent2_global)
agent2_in_frame_agent3 = dse_lib.agent2_to_frame_agent1_3D(agent3_global, agent2_global)
agent3_in_frame_agent1 = dse_lib.agent2_to_frame_agent1_3D(agent1_global, agent3_global)
agent3_in_frame_agent2 = dse_lib.agent2_to_frame_agent1_3D(agent2_global, agent3_global)
z_true = agent3_in_frame_agent2
z_fun = dse_lib.agent2_to_frame_agent1_3D(agent2_global, agent3_global)
H = np.array(dse_lib.dual_relative_obs_jacobian_3D(agent2_global, agent3_global))
x = np.append(agent2_global, agent3_global)[:, None]
z_h = H.dot(x)
z_h = np.array([z_h[0][0][0], z_h[1][0][0], z_h[2][0]])[:, None]
self.assertEqual(True, np.allclose(z_true, z_fun))
self.assertEqual(True, np.allclose(z_true, z_h))
def test_jacobian_fixed_to_obs_3D_0(self):
##############################################################################
rospy.loginfo("-D- test_from_frame_1")
# 1 is fixed, 2 is this, 3 is object
agent1_global = np.array([[1], [1], [7 / 4.0 * np.pi]])
agent2_global = np.array([[0.4], [-0.6], [5 / 4.0 * np.pi]])
agent3_global = np.array([[1.5], [0321], [30.1234*np.pi/2]])
agent1_in_frame_agent2 = dse_lib.agent2_to_frame_agent1_3D(agent2_global, agent1_global)
agent1_in_frame_agent3 = dse_lib.agent2_to_frame_agent1_3D(agent3_global, agent1_global)
agent2_in_frame_agent1 = dse_lib.agent2_to_frame_agent1_3D(agent1_global, agent2_global)
agent2_in_frame_agent3 = dse_lib.agent2_to_frame_agent1_3D(agent3_global, agent2_global)
agent3_in_frame_agent1 = dse_lib.agent2_to_frame_agent1_3D(agent1_global, agent3_global)
agent3_in_frame_agent2 = dse_lib.agent2_to_frame_agent1_3D(agent2_global, agent3_global)
z_true = agent3_in_frame_agent2
x = agent3_in_frame_agent1
z_fun = dse_lib.agent2_to_frame_agent1_3D(agent2_in_frame_agent1, agent3_in_frame_agent1)
H = np.array(dse_lib.jacobian_fixed_to_obs_3D(agent2_in_frame_agent1, agent3_in_frame_agent1))
x = np.append(agent2_in_frame_agent1, agent3_in_frame_agent1)[:, None]
z_h = H.dot(x)
z_h = np.array([z_h[0][0][0], z_h[1][0][0], z_h[2][0]])[:, None]
self.assertEqual(True, np.allclose(z_true, z_fun))
self.assertEqual(True, np.allclose(z_true, z_h))
def test_observation_jacobian_zeros(self):
##############################################################################
rospy.loginfo("-D- test_observation_jacobian_0")
agent1 = 0
agent2 = 1
x = np.zeros((12, 1))
H = np.zeros((3, 12))
H = dse_lib.h_camera_3D(H, x, 0, agent1, agent2, self.dim_state, self.dim_obs)
z_jac = H.dot(x)
agent1_row_min = self.dim_state * agent1
agent1_row_max = agent1_row_min + self.dim_obs
agent2_row_min = self.dim_state * agent2
agent2_row_max = agent2_row_min + self.dim_obs
x1 = x[agent1_row_min:agent1_row_max]
t1 = x1[0:2]
R1 = dse_lib.theta_2_rotm(x1[2, 0])
x2 = x[agent2_row_min:agent2_row_max]
t2 = x2[0:2]
R2 = dse_lib.theta_2_rotm(x2[2, 0])
zt = (np.transpose(R1).dot(t2) - np.transpose(R1).dot(t1))[:, 0]
zR = np.transpose(R1).dot(R2)
zr = [-np.arccos(zR[0, 0])]
z_true = np.concatenate((zt, zr))[:, None]
rospy.loginfo("-D- z_jac (%d, %d)" % (np.shape(z_jac)[0], np.shape(z_jac)[1]))
rospy.loginfo("-D- z_jac (%d, %d)" % (np.shape(z_true)[0], np.shape(z_true)[1]))
self.assertEqual(True, np.allclose(z_true, z_jac))
def test_observation_jacobian_translation(self):
##############################################################################
rospy.loginfo("-D- test_observation_jacobian_0")
agent1 = 0
agent2 = 1
x = np.transpose([1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])[:, None]
H = np.zeros((3, 12))
H = dse_lib.h_camera_3D(H, x, 0, agent1, agent2, self.dim_state, self.dim_obs)
z_jac = H.dot(x)
agent1_row_min = self.dim_state * agent1
agent1_row_max = agent1_row_min + self.dim_obs
agent2_row_min = self.dim_state * agent2
agent2_row_max = agent2_row_min + self.dim_obs
x1 = x[agent1_row_min:agent1_row_max]
t1 = x1[0:2]
R1 = dse_lib.theta_2_rotm(x1[2, 0])
x2 = x[agent2_row_min:agent2_row_max]
t2 = x2[0:2]
R2 = dse_lib.theta_2_rotm(x2[2, 0])
zt = (np.transpose(R1).dot(t2) - np.transpose(R1).dot(t1))[:, 0]
zR = np.transpose(R1).dot(R2)
zr = [-np.arccos(zR[0, 0])]
z_true = np.concatenate((zt, zr))[:, None]
rospy.loginfo("-D- z_jac (%d, %d)" % (np.shape(z_jac)[0], np.shape(z_jac)[1]))
rospy.loginfo("-D- z_jac (%d, %d)" % (np.shape(z_true)[0], np.shape(z_true)[1]))
self.assertEqual(True, np.allclose(z_true, z_jac))
def test_observation_jacobian_translation_rotation(self):
##############################################################################
rospy.loginfo("-D- test_observation_jacobian_0")
agent1 = 0
agent2 = 1
x = np.transpose([1, 2, np.pi/2, 0, 0, 0, 0, 0, -np.pi/2, 0, 0, 0])[:, None]
H = np.zeros((3, 12))
H = dse_lib.h_camera_3D(H, x, 0, agent1, agent2, self.dim_state, self.dim_obs)
z_jac = H.dot(x)
agent1_row_min = self.dim_state * agent1
agent1_row_max = agent1_row_min + self.dim_obs
agent2_row_min = self.dim_state * agent2
agent2_row_max = agent2_row_min + self.dim_obs
x1 = x[agent1_row_min:agent1_row_max]
t1 = x1[0:2]
R1 = dse_lib.theta_2_rotm(x1[2, 0])
x2 = x[agent2_row_min:agent2_row_max]
t2 = x2[0:2]
R2 = dse_lib.theta_2_rotm(x2[2, 0])
zt = (np.transpose(R1).dot(t2) - np.transpose(R1).dot(t1))[:, 0]
zR = np.transpose(R1).dot(R2)
zr = [-np.arccos(zR[0, 0])]
z_true = np.concatenate((zt, zr))[:, None]
self.assertEqual(True, np.allclose(z_true, z_jac))
def test_extend_arrays_no_extension(self):
##############################################################################
rospy.loginfo("-D- test_extend_arrays_0")
dim_state = 12
id_list = np.arange(5)
observed_ids = id_list
n_ids = len(id_list)
Y_11 = np.eye((dim_state * n_ids))
y_11 = np.ones((dim_state * n_ids, 1))
x_11 = np.linalg.inv(Y_11).dot(y_11)
P_11 = np.linalg.inv(Y_11)
id_list_2, Y_11_2, y_11_2, P_11_2, x_11_2 = dse_lib.extend_arrays(observed_ids, id_list, Y_11, y_11, dim_state)
self.assertEqual(True, np.allclose(P_11, P_11_2))
self.assertEqual(True, np.allclose(x_11, x_11_2))
self.assertEqual(True, np.allclose(Y_11, Y_11_2))
self.assertEqual(True, np.allclose(y_11, y_11_2))
self.assertEqual(True, np.allclose(id_list, id_list_2))
def test_extend_arrays_add_1(self):
##############################################################################
rospy.loginfo("-D- test_extend_arrays_0")
dim_state = 12
id_list = np.arange(5)
observed_ids = np.arange(6)
n_ids = len(id_list)
Y_11 = np.eye((dim_state * n_ids))
y_11 = np.ones((dim_state * n_ids, 1))
x_11 = np.linalg.inv(Y_11).dot(y_11)
P_11 = np.linalg.inv(Y_11)
id_list_2, | |
other functions to work with the new field.
global u, v, arrows, stacks, polar_tracker, dyn_coord
# take inputs and globally update them
update_variables()
# set radial tracker
polar_tracker = False
# take all these values, and the input from field component bnoxes to set up the field:
u, v = eq_to_comps(string_x, string_y, xg, yg)
# plot depending on chosen type of vector
if tensor.get() == 0:
arrows = False
stacks = True
elif tensor.get() == 1:
arrows = True
stacks = False
elif tensor.get() == 2:
arrows = True
stacks = True
# clear the current axis
main_axis.clear()
# deal with grids if user is in the LI tab
if tab_text == 'Line Integrals':
global LI_total, LI_coord, shape_area
# first, initialise variables again
LI_coord = []
LI_total = 0
flux = 0
shape_area = 0
ratio1 = 0
ratio2 = 0
# update the labels
LI_total_label.configure(text=LI_total)
flux_label.configure(text=flux)
shape_area_label.configure(text=shape_area)
ratio1_label.configure(text=ratio1)
ratio2_label.configure(text=ratio2)
# plot the grid
poly_grid_submit()
if tab_text == 'Dynamics':
if test_for_clearing_dyn == 1:
for a in range(len(dyn_coord)):
try:
exec('global ' + 'xy' + str(a) + '\n' + 'del ' + 'xy' + str(a))
except NameError:
pass
# then clear coordinates
dyn_coord = []
# create a figure and display it
stack_plot(xg, yg, main_axis, u, v, s_max, L, pt_den, fract, arrows, stacks, orientation, scale, w_head, h_head, 0, logartmic_scale_bool=logartmic_scale_bool)
canvas.draw()
# recolour pt_den to white, if it was red from polar plots
pt_den_entry.configure(bg='white')
# colour the x and y boxes green to show that these plot
x_comp_entry.configure(bg='#C0F6BB')
y_comp_entry.configure(bg='#C0F6BB')
# define a function that will respons to field selection in the drop down menu
def field_selection_response(event):
global u, v, fract, calculus_form_tracker, polar_tracker, arrows, stacks
# clear the x and y component boxes
x_comp_entry.delete(0, 'end')
y_comp_entry.delete(0, 'end')
# get the index at which this entry is
selected_index = field_name_list.index(str(field_select_drop.get()))
# using that index, get the x and y components from their lists
# and insert these into x and y comp. entry boxes
x_comp_selected = field_x_list[selected_index]
y_comp_selected = field_y_list[selected_index]
x_comp_entry.insert(0, x_comp_selected)
y_comp_entry.insert(0, y_comp_selected)
# colour code to be able to distinguish what is being plotted
x_comp_entry.configure(bg='#C0F6BB')
y_comp_entry.configure(bg='#C0F6BB')
# now call the plot function to finalise all these onto the plot
# depending on tab ,use correct 1 form plotting fucntion
if tab_text == 'Ext. Alegebra':
# this plots 1 form always for these responses
form_1_stacks_response()
else:
# check if the selected field is stricte a 1-form
# if so, change representation.
if selected_index == 7 or selected_index == 8 or selected_index == 9 or selected_index == 10:
# select stacks to be plotted
tensor.set(0)
# call a function to deal with this change too:
vect_type_response(tensor.get())
# respons to this by removing options unavaliable to 1-forms in
# main tab:
if click_opt_int != 0 and click_opt_int != 1:
click_option.set(0)
click_option_handler(click_option.get())
click_option_Deriv_btn.configure(state=tk.DISABLED)
click_option_Div_btn.configure(state=tk.DISABLED)
click_option_Curl_btn.configure(state=tk.DISABLED)
component_x_entry_label.configure(text='dx component')
component_y_entry_label.configure(text='dy component')
field_select_drop_label.configure(text='Select Pre-Defined 1-Form:')
# then, with all these set, call the plot function.
PLOT_response()
''' CUSTOMISATIONS '''
# define a function to respond to submitting arrohead changes in the new window
def custom_submission():
# first, take from entry boxes, wanted parameters and make them global:
global w_head, h_head, fract, scale
w_head = float(w_entry.get())
h_head = float(h_entry.get())
fract = float(fract_entry.get())
scale = float(arr_scale_entry.get())
# DO not actually replot, just save these as globals
# then close the window
arrowH_opt_window.destroy()
# recolour pt_den to white, if it was red from polar plots
pt_den_entry.configure(bg='white')
# define a reponse function to open a new window when arrowh_btn is pressed:
def custom_btn_reponse():
global w_entry, h_entry, fract_entry, arr_scale_entry, arrowH_opt_window
# open a titled new window
arrowH_opt_window = tk.Toplevel()
arrowH_opt_window.title('optimisation settings')
# define and label and first entry, for width
tk.Label(arrowH_opt_window, text='arrowhead base width as sheet width fraction:').grid(row=0, column=0)
w_entry = tk.Entry(arrowH_opt_window, width=30, borderwidth=1)
w_entry.insert(0, w_head)
w_entry.grid(row=1, column=0)
# define and label second entry, for height
tk.Label(arrowH_opt_window, text='arrowhead perp. height as sheet length fraction:').grid(row=2, column=0)
h_entry = tk.Entry(arrowH_opt_window, width=30, borderwidth=1)
h_entry.insert(0, h_head)
h_entry.grid(row=3, column=0)
# define an entry for fract update, to change the size of each stack as a frac of graph size L
tk.Label(arrowH_opt_window, text='fraction of graph to be set as the stack size:').grid(row=4, column=0)
fract_entry = tk.Entry(arrowH_opt_window, width=30, borderwidth=1)
fract_entry.insert(0, fract)
fract_entry.grid(row=5, column=0)
# define an entry for fract update, to change the size of each stack as a frac of graph size L
tk.Label(arrowH_opt_window, text='arrow size linear scaling:').grid(row=6, column=0)
arr_scale_entry = tk.Entry(arrowH_opt_window, width=30, borderwidth=1)
arr_scale_entry.insert(0, scale)
arr_scale_entry.grid(row=7, column=0)
# define a button to submit those changes:
submit_arr_btn = tk.Button(arrowH_opt_window, text='SAVE ALL', padx=20, pady=10, command=custom_submission)
submit_arr_btn.grid(row=8, column=0, pady=10)
# define a response funcction to autoscale toggle button
def scale_toggle_response():
global ascale
if ascale.get() == 0:
# the burron is off, and has been clicked therefore change the
# variable to an and the image to on
ascale.set(1)
ascale_toggle.configure(image=toggle_image_on)
ascale_toggle_LI.configure(image=toggle_image_on)
# for it to update, reclick whatever radiobutton is selected
# or, if stacks only is chosen, change it to both, to show some change
vect_type_response(tensor.get())
else:
# the button is on and has been clicked
# set it to off and change image
ascale.set(0)
ascale_toggle.configure(image=toggle_image_off)
ascale_toggle_LI.configure(image=toggle_image_off)
# for it to update, reclick whatever radiobutton is selected
# or, if stacks only is chosen, change it to both, to show some change
vect_type_response(tensor.get())
# define a function to respond to toggle for log scaling
def log_scale_toggle_response():
global logartmic_scale_bool
if logartmic_scale_tk.get() == 0:
# the burron is off, and has been clicked therefore change the
# variable to an and the image to on
logartmic_scale_tk.set(1)
logartmic_scale_bool = 1
logartmic_scale_toggle.configure(image=toggle_image_on)
else:
# the button is on and has been clicked
# set it to off and change image
logartmic_scale_tk.set(0)
logartmic_scale_bool = 0
logartmic_scale_toggle.configure(image=toggle_image_off)
''' POLAR PLOTS '''
# define a function to repond to plotting apolar grid
# takes the same field, but plots it on a polar grid
def Polar_grid_plot_response(tensor):
global xg, yg, u, v, s_max, pt_den_entry, polar_tracker
# set the polar tracker
polar_tracker = True
# set the number of sheets to use from input box
s_max = int(s_max_entry.get())
# the polar grid comes from global already defined
# to change it, change it in the poalr field window
# apart from size, this should be based on L
# therefore use it to redefine it with that.
L = float(L_entry.get())
# using these redefine the new polar grids
r = np.linspace(r_min, L, r_den)
theta = np.linspace(360/(theta_den-1), 360, theta_den) * np.pi/180
# mesh into a grid
rg, thetag = np.meshgrid(r, theta)
# convert grid to cartesian
xg = rg*np.cos(thetag)
yg = rg*np.sin(thetag)
# reevaluate the given fields with these new grids:
string_x = str(x_comp_entry.get())
string_y = str(y_comp_entry.get())
u, v = eq_to_comps(string_x, string_y, xg, yg)
# clear the plot that is already there:
main_axis.clear()
# deal with grids if user is in the LI tab
if tab_text == 'Line Integrals':
global LI_total, LI_coord, shape_area
# first, initialise variables again
LI_coord = []
LI_total = 0
flux = 0
shape_area = 0
ratio1 = 0
ratio2 = 0
# update the labels
LI_total_label.configure(text=LI_total)
flux_label.configure(text=flux)
shape_area_label.configure(text=shape_area)
ratio1_label.configure(text=ratio1)
ratio2_label.configure(text=ratio2)
# plot the grid
poly_grid_submit()
# use the selected tensor to determine what to plot:
# 0 is just stacks, 1 is for only arrows and 2 is for both
if tensor == 0:
arrows = False
stacks = True
elif tensor == 1:
arrows = True
stacks = False
elif tensor == 2:
arrows = True
stacks = True
# using those, create the plot and display it
stack_plot(xg, yg, main_axis, u, v, s_max, L, pt_den, fract, arrows, stacks, orientation, scale, w_head, h_head, 0, logartmic_scale_bool=logartmic_scale_bool)
canvas.draw()
# colour pt_den red to show that it is not approperiate to use it now
# need to def # of points along r and theta, in the additional window
pt_den_entry.configure(bg='red')
# colour the x and y boxes green to show that these plot
x_comp_entry.configure(bg='#C0F6BB')
y_comp_entry.configure(bg='#C0F6BB')
# deifne a response to the SAVE button in the polar grid customisation window
def save_polar_grid():
global r_min, | |
``ResourcePool``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``ResourcePool``.
If attribute is None, the system will use the resource pool of the
source virtual machine. If this results in a conflict due to other
placement parameters, the virtual machine clone operation will
fail.
:type host: :class:`str` or ``None``
:param host: Host onto which the cloned virtual machine should be placed.
If ``host`` and ``resourcePool`` are both specified,
``resourcePool`` must belong to ``host``.
If ``host`` and ``cluster`` are both specified, ``host`` must be a
member of ``cluster``.. This attribute was added in vSphere API
7.0.0.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``HostSystem``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``HostSystem``.
If this attribute is unset, if ``resourcePool`` is unset, the
cloned virtual machine will use the host of the source virtual
machine. if ``resourcePool`` is set, and the target is a standalone
host, the host is used. if ``resourcePool`` is set, and the target
is a DRS cluster, a host will be picked by DRS. if ``resourcePool``
is set, and the target is a cluster without DRS, InvalidArgument
will be thrown.
:type cluster: :class:`str` or ``None``
:param cluster: Cluster into which the cloned virtual machine should be placed.
If ``cluster`` and ``resourcePool`` are both specified,
``resourcePool`` must belong to ``cluster``.
If ``cluster`` and ``host`` are both specified, ``host`` must be a
member of ``cluster``.. This attribute was added in vSphere API
7.0.0.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``ClusterComputeResource``. When methods return a value of this
class as a return value, the attribute will be an identifier for
the resource type: ``ClusterComputeResource``.
If ``resourcePool`` or ``host`` is specified, it is recommended
that this attribute be None.
:type datastore: :class:`str` or ``None``
:param datastore: Datastore on which the cloned virtual machine's configuration state
should be stored. This datastore will also be used for any virtual
disks that are created as part of the virtual machine clone
operation unless individually overridden. This attribute was added
in vSphere API 7.0.0.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``Datastore``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``Datastore``.
If attribute is None, the system will use the datastore of the
source virtual machine.
"""
self.folder = folder
self.resource_pool = resource_pool
self.host = host
self.cluster = cluster
self.datastore = datastore
VapiStruct.__init__(self)
ClonePlacementSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.VM.clone_placement_spec', {
'folder': type.OptionalType(type.IdType()),
'resource_pool': type.OptionalType(type.IdType()),
'host': type.OptionalType(type.IdType()),
'cluster': type.OptionalType(type.IdType()),
'datastore': type.OptionalType(type.IdType()),
},
ClonePlacementSpec,
False,
None))
class CloneSpec(VapiStruct):
"""
Document-based clone spec. This class was added in vSphere API 7.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
source=None,
name=None,
placement=None,
disks_to_remove=None,
disks_to_update=None,
power_on=None,
guest_customization_spec=None,
):
"""
:type source: :class:`str`
:param source: Virtual machine to clone from. This attribute was added in vSphere
API 7.0.0.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``VirtualMachine``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``VirtualMachine``.
:type name: :class:`str`
:param name: Virtual machine name. This attribute was added in vSphere API
7.0.0.
:type placement: :class:`VM.ClonePlacementSpec` or ``None``
:param placement: Virtual machine placement information. This attribute was added in
vSphere API 7.0.0.
If this attribute is None, the system will use the values from the
source virtual machine. If specified, each field will be used for
placement. If the fields result in disjoint placement the operation
will fail. If the fields along with the placement values of the
source virtual machine result in disjoint placement the operation
will fail.
:type disks_to_remove: :class:`set` of :class:`str` or ``None``
:param disks_to_remove: Set of Disks to Remove. This attribute was added in vSphere API
7.0.0.
When clients pass a value of this class as a parameter, the
attribute must contain identifiers for the resource type:
``com.vmware.vcenter.vm.hardware.Disk``. When methods return a
value of this class as a return value, the attribute will contain
identifiers for the resource type:
``com.vmware.vcenter.vm.hardware.Disk``.
If None, all disks will be copied. If the same identifier is in
:attr:`VM.CloneSpec.disks_to_update` InvalidArgument fault will be
returned.
:type disks_to_update: (:class:`dict` of :class:`str` and :class:`VM.DiskCloneSpec`) or ``None``
:param disks_to_update: Map of Disks to Update. This attribute was added in vSphere API
7.0.0.
When clients pass a value of this class as a parameter, the key in
the attribute :class:`dict` must be an identifier for the resource
type: ``com.vmware.vcenter.vm.hardware.Disk``. When methods return
a value of this class as a return value, the key in the attribute
:class:`dict` will be an identifier for the resource type:
``com.vmware.vcenter.vm.hardware.Disk``.
If None, all disks will copied to the datastore specified in the
:attr:`VM.ClonePlacementSpec.datastore` attribute of
:attr:`VM.CloneSpec.placement`. If the same identifier is in
:attr:`VM.CloneSpec.disks_to_remove` InvalidArgument fault will be
thrown.
:type power_on: :class:`bool` or ``None``
:param power_on: Attempt to perform a :attr:`VM.CloneSpec.power_on` after clone.
This attribute was added in vSphere API 7.0.0.
If None, the virtual machine will not be powered on.
:type guest_customization_spec: :class:`VM.GuestCustomizationSpec` or ``None``
:param guest_customization_spec: Guest customization spec to apply to the virtual machine after the
virtual machine is deployed. This attribute was added in vSphere
API 7.0.0.
If None, the guest operating system is not customized after clone.
"""
self.source = source
self.name = name
self.placement = placement
self.disks_to_remove = disks_to_remove
self.disks_to_update = disks_to_update
self.power_on = power_on
self.guest_customization_spec = guest_customization_spec
VapiStruct.__init__(self)
CloneSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.VM.clone_spec', {
'source': type.IdType(resource_types='VirtualMachine'),
'name': type.StringType(),
'placement': type.OptionalType(type.ReferenceType(__name__, 'VM.ClonePlacementSpec')),
'disks_to_remove': type.OptionalType(type.SetType(type.IdType())),
'disks_to_update': type.OptionalType(type.MapType(type.IdType(), type.ReferenceType(__name__, 'VM.DiskCloneSpec'))),
'power_on': type.OptionalType(type.BooleanType()),
'guest_customization_spec': type.OptionalType(type.ReferenceType(__name__, 'VM.GuestCustomizationSpec')),
},
CloneSpec,
False,
None))
class DiskRelocateSpec(VapiStruct):
"""
Document-based disk relocate spec. This class was added in vSphere API
7.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
datastore=None,
):
"""
:type datastore: :class:`str` or ``None``
:param datastore: Destination datastore to relocate disk. This attribute was added in
vSphere API 7.0.0.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``Datastore``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``Datastore``.
This attribute is currently required. In the future, if this
attribute is unset, disk will use the datastore specified in
:attr:`VM.RelocatePlacementSpec.datastore` attribute of
:attr:`VM.RelocateSpec.placement`.
"""
self.datastore = datastore
VapiStruct.__init__(self)
DiskRelocateSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.VM.disk_relocate_spec', {
'datastore': type.OptionalType(type.IdType()),
},
DiskRelocateSpec,
False,
None))
class RelocatePlacementSpec(VapiStruct):
"""
The ``VM.RelocatePlacementSpec`` class contains information used to change
the placement of an existing virtual machine within the vCenter inventory.
This class was added in vSphere API 7.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
folder=None,
resource_pool=None,
host=None,
cluster=None,
datastore=None,
):
"""
:type folder: :class:`str` or ``None``
:param folder: Virtual machine folder into which the virtual machine should be
placed. This attribute was added in vSphere API 7.0.0.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type: ``Folder``.
When methods return a value of this class as a return value, the
attribute will be an identifier for the resource type: ``Folder``.
If this attribute is None, the virtual machine will stay in the
current folder.
:type resource_pool: :class:`str` or ``None``
:param resource_pool: Resource pool into | |
p2sec2))
ax.set_title(f"{abf.abfID} Pulse 1 [{timeNote}]")
addComments(abf)
ax.set_ylabel(abf.sweepLabelY)
ax.set_xlabel(abf.sweepLabelX)
plotFigSave(abf, tag=f"generic-paired-pulses", labelAxes=False)
# RATIO
plotFigNew(abf)
ax = plt.gcf().add_subplot(1, 1, 1) # pulse2/pulse1 ratio
ratioAvg = sweepAvgs2/sweepAvgs1
# how should this be measured?
ratioErr = np.sqrt(np.power(sweepErr1, 2)+np.power(sweepErr2, 2))
ratioErr = sweepErr2*np.nan
ax.errorbar(sweepTimes, ratioAvg, ratioErr, ms=20,
marker='.', ls='-', capsize=5, color='r')
ax.set_title(f"{abf.abfID} Paired Pulse Ratio [p2/p1]")
addComments(abf)
ax.set_ylabel(abf.sweepLabelY)
ax.set_xlabel(abf.sweepLabelX)
plotFigSave(abf, tag=f"generic-paired-pulse-ratio", labelAxes=False)
return
def generic_memtest_ramp(abf, msg=False):
"""analyzes the ramp part of a sweep to calculate Cm"""
log.debug("generic plot: Cm ramp")
assert(isinstance(abf,pyabf.ABF))
plotFigNew(abf)
# plot the memtest
ax1 = plt.gcf().add_subplot(121)
pyabf.plot.sweeps(abf, axis=ax1)
ax1.set_title("All Sweeps (overlay)")
if msg:
bbox = dict(facecolor='white', edgecolor='black',
boxstyle='round,pad=.4')
ax1.text(0.96, 0.96, msg, verticalalignment='top',
horizontalalignment='right', fontsize=12, bbox=bbox,
transform=plt.gca().transAxes, family='monospace')
# plot the ramp
ax2 = plt.gcf().add_subplot(222)
ax2.set_title("Cm Ramp (phase)")
for sweepNumber in abf.sweepList:
abf.setSweep(sweepNumber)
cmInfo = pyabf.memtest._cm_ramp_points_and_voltages(abf)
if not cmInfo:
continue
rampPoints, rampVoltages = cmInfo
rampData = abf.sweepY[rampPoints[0]:rampPoints[2]]
color = plt.get_cmap("winter")(sweepNumber/abf.sweepCount)
trace1 = rampData[:int(len(rampData)/2)][::-1]
trace2 = rampData[int(len(rampData)/2):]
ax2.plot(trace1, color=color, alpha=.2)
ax2.plot(trace2, color=color, alpha=.2)
ax2.set_ylabel("current (pA)")
ax2.set_xlabel("data point (index)")
# plot the cms
cms = pyabf.memtest.cm_ramp_valuesBySweep(abf)
cmAvg = np.mean(cms)
cmErr = np.std(cms)
ax4 = plt.gcf().add_subplot(224)
ax4.set_title("Cm = %.02f +/- %.02f pF" % (cmAvg, cmErr))
ax4.set_ylabel("capacitance (pA)")
ax4.set_xlabel("sweep number")
ax4.plot(cms, '.', ms=10, alpha=.8)
ax4.axhline(cmAvg, color='r', ls='--', lw=2, alpha=.5)
plotFigSave(abf, tag="memtest", labelAxes=False)
def generic_ap_freqPerSweep(abf):
"""
Create a plot showing the AP frequency by sweep.
"""
log.debug("generic plot: AP Frequency Per Sweep")
assert isinstance(abf, pyabf.ABF)
apsPerSweep = [0]*abf.sweepCount
sweepTimesSec = np.arange(abf.sweepCount)*abf.sweepLengthSec
for sweep in abf.sweepList:
abf.setSweep(sweep)
sweepApPoints = pyabf.ap.ap_points_currentSweep(abf)
apsPerSweep[sweep] = len(sweepApPoints)
plotFigNew(abf)
plt.grid(alpha=.5,ls='--')
plt.plot(sweepTimesSec, apsPerSweep, '.-', ms=10)
plt.ylabel("Sweep AP Count")
plt.xlabel("Experiment Time (seconds)")
addComments(abf)
plotFigSave(abf, tag="apFreqBySweep", labelAxes=False)
def generic_trace_before_after_drug(abf, minAfterDrug = 2, minBeforeDrug = .5, isolateEpoch=3):
"""create a plot showing the average of n sweeps before and after the first drug."""
assert isinstance(abf, pyabf.ABF)
for drugNumber in range(len(abf.tagComments)):
# determine ideal drug times for before/after drug applied
baselineSweepTimeMin = abf.tagTimesMin[drugNumber] - minBeforeDrug
baselineSweep = int(baselineSweepTimeMin*60/abf.sweepLengthSec)
baselineSweep = max(0, baselineSweep)
drugSweepTimeMin = abf.tagTimesMin[drugNumber] + minAfterDrug
drugSweep = int(drugSweepTimeMin*60/abf.sweepLengthSec)
drugSweep = min(drugSweep, abf.sweepCount-1)
# isolate just the part of the trace we are interested in
if (isolateEpoch):
i1 = pyabf.stimulus.epochPoints(abf)[isolateEpoch]
i2 = pyabf.stimulus.epochPoints(abf)[isolateEpoch+1]
else:
i1=0
i2=abf.sweepPointCount
# load ramp data from ideal times
pyabf.filter.gaussian(abf, 3)
abf.setSweep(baselineSweep)
rampBaseline = abf.sweepY[i1:i2]
abf.setSweep(drugSweep)
rampDrug = abf.sweepY[i1:i2]
rampDiff = rampDrug - rampBaseline
# create the plot
plotFigNew(abf)
ax1 = plt.gcf().add_subplot(211)
ax2 = plt.gcf().add_subplot(212)
ax1.set_title("Representative traces around drug %d (%s)"%(drugNumber, abf.tagComments[drugNumber]))
ax1.plot(abf.sweepX[i1:i2], rampBaseline, label="-%.02f min"%minBeforeDrug, lw=2, alpha=.7)
ax1.plot(abf.sweepX[i1:i2], rampDrug, label="+%.02f min"%minAfterDrug, lw=2, alpha=.7)
ax1.legend()
pyabf.filter.gaussian(abf, 3) # apply lowpass filter
ax2.set_title("Ramp Difference")
ax2.plot(abf.sweepX[i1:i2], rampDiff, lw=2, alpha=.7, color='C3')
ax2.axhline(0,color='k',ls='--')
ax2.legend()
plotFigSave(abf, tag="ramp-drug%02d"%drugNumber)
return
# Code defines which routines or generic graphs to use for each protocol
def unknown(abf):
"""unknown protocol."""
log.debug("running method for unknown protocol")
assert isinstance(abf, pyabf.ABF)
totalLengthSec = abf.sweepCount*abf.sweepLengthSec
if abf.sweepLengthSec < 10 and totalLengthSec < 60*2:
generic_overlay(abf, unknown=True)
else:
generic_continuous(abf, unknown=True)
generic_average_over_time(abf)
def protocol_0111(abf):
"""0111 continuous ramp.pro"""
assert isinstance(abf, pyabf.ABF)
msToPlot = 20
ptToPlot = msToPlot*abf.dataPointsPerMs
abf.setSweep(0)
segY = abf.sweepY[0:ptToPlot]
timeAPsec = 0
# isolate the 1st AP we find
for sweep in abf.sweepList:
abf.setSweep(sweep)
apPoints = pyabf.ap.ap_points_currentSweep(abf)
# ignore APs close to the start of the sweep
apPoints = [x for x in apPoints if x > ptToPlot]
if len(apPoints):
pt1 = int(apPoints[0]-ptToPlot/2)
segY = abf.sweepY[pt1:pt1+ptToPlot]
timeAPsec = apPoints[0]/abf.dataRate+sweep*abf.sweepLengthSec
break
# prepare the first derivative and X units
segYd = np.diff(segY)
segYd = np.append(segYd, segYd[-1])
segYd = segYd * abf.dataRate / 1000
segX = np.arange(len(segYd))-len(segYd)/2
segX = segX/abf.dataRate*1000
plotFigNew(abf)
# plot the first AP (mV)
ax1 = plt.gcf().add_subplot(2, 2, 1)
pyabf.plot.sweeps(abf, continuous=True, axis=ax1,
linewidth=1, color='C0', alpha=1)
zoomSec = .25
ax1.set_title("First AP: Voltage")
ax1.axis([timeAPsec-zoomSec, timeAPsec+zoomSec, None, None])
# plot the first AP (V/sec)
ax2 = plt.gcf().add_subplot(2, 2, 2)
ax2.set_title("First AP: Velocity")
ax2.set_ylabel("Velocity (mV/ms)")
ax2.set_xlabel("time (ms)")
ax2.axhline(-100, color='k', ls=':', lw=2, alpha=.2)
ax2.plot(segX, segYd, color='r')
ax2.margins(0, .05)
# plot the whole ABF
ax3 = plt.gcf().add_subplot(2, 2, 3)
pyabf.plot.sweeps(abf, continuous=True, axis=ax3,
linewidth=1, color='C0', alpha=1)
zoomSec = .25
ax3.set_title("Full Signal")
ax3.margins(0, .05)
# plot the first AP (V/sec)
ax4 = plt.gcf().add_subplot(2, 2, 4)
ax4.set_title("First AP: Phase Plot")
ax4.set_xlabel("Membrane Potential (mV)")
ax4.set_ylabel("Velocity (mV/ms)")
ax4.plot(segY, segYd, '.-', color='C1')
ax4.margins(.1, .1)
ax4.axis([ax1.axis()[2], ax1.axis()[3], ax2.axis()[2], ax2.axis()[3]])
plotFigSave(abf, tag=f"rampAP", labelAxes=False)
def protocol_0101(abf):
"""0112 0101 tau -10pA"""
assert isinstance(abf, pyabf.ABF)
generic_overlay_average(abf, baselineSec1=0, baselineSec2=0.1)
return
def protocol_0102(abf):
"""0102 IC sine sweep.pro"""
assert isinstance(abf, pyabf.ABF)
generic_overlay(abf)
return
def protocol_0112(abf):
"""0112 steps dual -50 to 150 step 10.pro"""
assert isinstance(abf, pyabf.ABF)
generic_ap_steps(abf)
protocol_0111(abf)
return
def protocol_0113(abf):
"""0113 steps dual -100 to 300 step 25.pro"""
assert isinstance(abf, pyabf.ABF)
generic_ap_steps(abf)
protocol_0111(abf)
return
def protocol_0114(abf):
"""0114 steps dual -100 to 2000 step 100.pro"""
assert isinstance(abf, pyabf.ABF)
generic_ap_steps(abf)
protocol_0111(abf)
return
def protocol_0121(abf):
"""0121 IC sine sweep 0 +- 20 pA.pro"""
assert isinstance(abf, pyabf.ABF)
generic_overlay(abf)
return
def protocol_0122(abf):
"""0122 steps single -50 to 150 step 10.pro"""
assert isinstance(abf, pyabf.ABF)
generic_ap_steps(abf)
return
def protocol_0201(abf):
"""0201 memtest.pro"""
assert isinstance(abf, pyabf.ABF)
msg = pyabf.memtest.step_summary(abf)
if 2 in abf._epochPerDacSection.nEpochType:
# there is a ramp and a step
generic_memtest_ramp(abf, msg)
else:
# there is no ramp
plotFigNew(abf)
ax1 = plt.gcf().add_subplot(111)
pyabf.plot.sweeps(abf, axis=ax1)
ax1.set_title("MemTest (without ramp)")
bbox = dict(facecolor='white', edgecolor='black',
boxstyle='round,pad=.4')
ax1.text(0.96, 0.96, msg, verticalalignment='top',
horizontalalignment='right',
transform=plt.gca().transAxes, fontsize=16,
bbox=bbox, family='monospace')
plotFigSave(abf, tag="memtest")
return
def protocol_0202(abf):
"""0202 IV dual"""
assert isinstance(abf, pyabf.ABF)
generic_iv(abf, .8, 1, 10, -110)
return
def protocol_0203(abf):
"""0203 IV fast.pro"""
assert isinstance(abf, pyabf.ABF)
generic_iv(abf, .8, 1, 5, -110)
return
def protocol_0204(abf):
"""0204 Cm ramp.pro"""
assert isinstance(abf, pyabf.ABF)
generic_memtest_ramp(abf)
return
def protocol_0221(abf):
"""0221 VC sine sweep 70 +- 5 mV.pro"""
assert isinstance(abf, pyabf.ABF)
generic_overlay(abf)
return
def protocol_0222(abf):
"""0222 VC sine sweep 70 +- 5 mV.pro"""
assert isinstance(abf, pyabf.ABF)
generic_overlay(abf)
return
def protocol_0301(abf):
"""0301 ic gap free.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
return
def protocol_0302(abf):
"""0302 IC 10s IC ramp drug.pro"""
assert isinstance(abf, pyabf.ABF)
generic_ap_freqPerSweep(abf)
generic_trace_before_after_drug(abf, isolateEpoch=None)
return
def protocol_0303(abf):
"""0303 IC 10s opto.pro"""
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
shadeDigitalOutput(abf, 4, color='g')
verticalOffset = 0
for sweep in abf.sweepList:
abf.setSweep(sweep)
if abf.sweepUnitsY == "mV":
traceColor = 'b'
else:
traceColor = 'r'
plt.plot(abf.sweepX, abf.sweepY + verticalOffset*sweep, color=traceColor, lw=.5, alpha=.5)
plt.margins(0,.1)
plt.title(f"OVerlay of {abf.sweepCount} sweeps")
plotFigSave(abf, tag="opto-stacked", labelAxes=True)
return
def protocol_0312(abf):
"""0312 ic cosine 10s.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_ap_freqPerSweep(abf)
generic_trace_before_after_drug(abf, isolateEpoch=None)
return
def protocol_0401(abf):
"""0401 VC 2s MT-70.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_average_over_time(abf, timeSec1=1)
generic_memtest_over_time(abf)
return
def protocol_0402(abf):
"""0402 VC 2s MT-50.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_average_over_time(abf, timeSec1=1)
generic_memtest_over_time(abf)
return
def protocol_0403(abf):
"""0402 VC 2s MT-70.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_average_over_time(abf, timeSec1=1)
generic_memtest_over_time(abf)
return
def protocol_0404(abf):
"""0404 VC 2s MT2-70 ramp -110-50.pro"""
assert isinstance(abf, pyabf.ABF)
generic_average_over_time(abf, timeSec1=1.5)
generic_trace_before_after_drug(abf)
generic_memtest_over_time(abf)
return
def protocol_0405(abf):
"""0404 VC 2s MT2-70 ramp -110-50.pro"""
assert isinstance(abf, pyabf.ABF)
generic_first_sweep(abf)
generic_continuous(abf)
generic_average_over_time(abf, timeSec1=1)
generic_memtest_over_time(abf)
return
def protocol_0406(abf):
"""0406 VC 10s MT-50.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_memtest_over_time(abf)
return
def protocol_0408(abf):
"""0408 VC 10s two step.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_memtest_over_time(abf)
return
def protocol_0409(abf):
"""0406 VC 10s MT-50.pro"""
assert isinstance(abf, pyabf.ABF)
generic_continuous(abf)
generic_average_over_time(abf, 0, .4)
generic_memtest_over_time(abf)
return
def protocol_0501(abf):
"""0501 opto -50.pro"""
assert isinstance(abf, pyabf.ABF)
timeSec1, timeSec2 = 1.10, 1.30
p1, p2 = int(timeSec1*abf.dataRate), int(timeSec2*abf.dataRate)
# plot every sweep and the average of all sweeps
plotFigNew(abf)
shadeDigitalOutput(abf, 4)
for sweep in abf.sweepList:
abf.setSweep(sweep)
abf.sweepY[:p1] = np.nan
abf.sweepY[p2:] = np.nan
plt.plot(abf.sweepX, abf.sweepY, alpha=.2, color='.5')
avg = pyabf.sweep.averageTrace(abf, timeSec1=timeSec1, timeSec2=timeSec2)
abf.sweepY *= np.nan
abf.sweepY[p1:p2] = avg
plt.plot(abf.sweepX, abf.sweepY)
plotFigSave(abf, tag="opto-avg", labelAxes=True)
# make stacked graph
plotFigNew(abf)
shadeDigitalOutput(abf, 4)
vertOffset = False
for sweep in abf.sweepList:
abf.setSweep(sweep)
if not vertOffset:
vertOffset = np.max(abf.sweepY[p1:p2]) - np.min(abf.sweepY[p1:p2])
vertOffset *= 1.2
plt.plot(abf.sweepX[p1:p2], abf.sweepY[p1:p2] +
vertOffset*sweep, color='b', alpha=.7)
plotFigSave(abf, tag="opto-stacked", labelAxes=True)
return
def protocol_0502(abf):
"""0502 opto 0.pro"""
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
shadeDigitalOutput(abf, 4, color='g')
verticalOffset = 0
for sweep in abf.sweepList:
abf.setSweep(sweep)
if abf.sweepUnitsY == "mV":
traceColor = 'b'
else:
traceColor = 'r'
plt.plot(abf.sweepX, abf.sweepY + verticalOffset*sweep, color=traceColor, lw=.5, alpha=.5)
plt.margins(0,.1)
plt.title(f"OVerlay of {abf.sweepCount} sweeps")
plotFigSave(abf, tag="opto-stacked", labelAxes=True)
return
def protocol_0912(abf):
"""0912 VC 20s stim PPR 40ms.pro"""
assert isinstance(abf, pyabf.ABF)
p1sec = 2.31703
p2sec = p1sec + .05
pulseWidth = .04
generic_continuous(abf)
generic_average_over_time(abf, timeSec1=5)
generic_first_sweep(abf, 2, 3)
generic_paired_pulse(abf, p1sec, p1sec+pulseWidth,
p2sec, p2sec+pulseWidth)
generic_memtest_over_time(abf)
def protocol_0xxx(abf):
"""Protocols are tagged with this during development."""
assert isinstance(abf, pyabf.ABF)
if abf.protocol in ["0xxx VC 10s MT-50 stim", "0xxx VC 10s MT-70 stim"]:
protocol_0912(abf)
else:
unknown(abf)
### These protocol's were made for Kyle and Haley's ABF1 aging project data
def protocol_KK01(abf):
"""Kyle's old experiments: memtest-like | |
"""
Evaluation function of an individual
"""
from __future__ import division
import os
import pandas as pd
import numpy as np
from cea.optimization.master import generation
from cea.optimization.master import summarize_network
from cea.optimization.constants import *
from cea.optimization.master import cost_model
from cea.optimization.slave import cooling_main
from cea.optimization.slave import heating_main
from cea.optimization import supportFn
from cea.technologies import substation
import check
from cea.optimization import slave_data
from cea.optimization.slave import electricity_main
from cea.optimization.slave.seasonal_storage import storage_main
from cea.optimization.slave import natural_gas_main
import summarize_individual
# +++++++++++++++++++++++++++++++++++++
# Main objective function evaluation
# ++++++++++++++++++++++++++++++++++++++
def evaluation_main(individual, building_names, locator, solar_features, network_features, gv, config, prices, lca,
ind_num, gen):
"""
This function evaluates an individual
:param individual: list with values of the individual
:param building_names: list with names of buildings
:param locator: locator class
:param solar_features: solar features call to class
:param network_features: network features call to class
:param gv: global variables class
:param optimization_constants: class containing constants used in optimization
:param config: configuration file
:param prices: class of prices used in optimization
:type individual: list
:type building_names: list
:type locator: string
:type solar_features: class
:type network_features: class
:type gv: class
:type optimization_constants: class
:type config: class
:type prices: class
:return: Resulting values of the objective function. costs, CO2, prim
:rtype: tuple
"""
# Check the consistency of the individual or create a new one
individual = check_invalid(individual, len(building_names), config)
# Initialize objective functions costs, CO2 and primary energy
costs_USD = 0
GHG_tonCO2 = 0
PEN_MJoil = 0
Q_heating_uncovered_design_W = 0
Q_heating_uncovered_annual_W = 0
# Create the string representation of the individual
DHN_barcode, DCN_barcode, DHN_configuration, DCN_configuration = supportFn.individual_to_barcode(individual, building_names)
if DHN_barcode.count("1") == gv.num_tot_buildings:
network_file_name_heating = "Network_summary_result_all.csv"
Q_DHNf_W = pd.read_csv(locator.get_optimization_network_all_results_summary('all'), usecols=["Q_DHNf_W"]).values
Q_heating_max_W = Q_DHNf_W.max()
elif DHN_barcode.count("1") == 0:
network_file_name_heating = "Network_summary_result_all.csv"
Q_heating_max_W = 0
else:
network_file_name_heating = "Network_summary_result_" + hex(int(str(DHN_barcode), 2)) + ".csv"
if not os.path.exists(locator.get_optimization_network_results_summary(DHN_barcode)):
total_demand = supportFn.createTotalNtwCsv(DHN_barcode, locator)
building_names = total_demand.Name.values
# Run the substation and distribution routines
substation.substation_main(locator, total_demand, building_names, DHN_configuration, DCN_configuration,
Flag=True)
summarize_network.network_main(locator, total_demand, building_names, config, gv, DHN_barcode)
Q_DHNf_W = pd.read_csv(locator.get_optimization_network_results_summary(DHN_barcode), usecols=["Q_DHNf_W"]).values
Q_heating_max_W = Q_DHNf_W.max()
if DCN_barcode.count("1") == gv.num_tot_buildings:
network_file_name_cooling = "Network_summary_result_all.csv"
if individual[N_HEAT * 2] == 1: # if heat recovery is ON, then only need to satisfy cooling load of space cooling and refrigeration
Q_DCNf_W = pd.read_csv(locator.get_optimization_network_all_results_summary('all'), usecols=["Q_DCNf_space_cooling_and_refrigeration_W"]).values
else:
Q_DCNf_W = pd.read_csv(locator.get_optimization_network_all_results_summary('all'), usecols=["Q_DCNf_space_cooling_data_center_and_refrigeration_W"]).values
Q_cooling_max_W = Q_DCNf_W.max()
elif DCN_barcode.count("1") == 0:
network_file_name_cooling = "Network_summary_result_all.csv"
Q_cooling_max_W = 0
else:
network_file_name_cooling = "Network_summary_result_" + hex(int(str(DCN_barcode), 2)) + ".csv"
if not os.path.exists(locator.get_optimization_network_results_summary(DCN_barcode)):
total_demand = supportFn.createTotalNtwCsv(DCN_barcode, locator)
building_names = total_demand.Name.values
# Run the substation and distribution routines
substation.substation_main(locator, total_demand, building_names, DHN_configuration, DCN_configuration,
Flag=True)
summarize_network.network_main(locator, total_demand, building_names, config, gv, DCN_barcode)
if individual[N_HEAT * 2] == 1: # if heat recovery is ON, then only need to satisfy cooling load of space cooling and refrigeration
Q_DCNf_W = pd.read_csv(locator.get_optimization_network_results_summary(DCN_barcode), usecols=["Q_DCNf_space_cooling_and_refrigeration_W"]).values
else:
Q_DCNf_W = pd.read_csv(locator.get_optimization_network_results_summary(DCN_barcode), usecols=["Q_DCNf_space_cooling_data_center_and_refrigeration_W"]).values
Q_cooling_max_W = Q_DCNf_W.max()
Q_heating_nom_W = Q_heating_max_W * (1 + Q_MARGIN_FOR_NETWORK)
Q_cooling_nom_W = Q_cooling_max_W * (1 + Q_MARGIN_FOR_NETWORK)
# Modify the individual with the extra GHP constraint
try:
check.GHPCheck(individual, locator, Q_heating_nom_W, gv)
except:
print "No GHP constraint check possible \n"
# Export to context
master_to_slave_vars = calc_master_to_slave_variables(individual, Q_heating_max_W, Q_cooling_max_W, building_names, ind_num, gen)
master_to_slave_vars.network_data_file_heating = network_file_name_heating
master_to_slave_vars.network_data_file_cooling = network_file_name_cooling
master_to_slave_vars.total_buildings = len(building_names)
master_to_slave_vars.DHN_barcode = DHN_barcode
master_to_slave_vars.DCN_barcode = DCN_barcode
if master_to_slave_vars.number_of_buildings_connected_heating > 1:
if DHN_barcode.count("0") == 0:
master_to_slave_vars.fNameTotalCSV = locator.get_total_demand()
else:
master_to_slave_vars.fNameTotalCSV = os.path.join(locator.get_optimization_network_totals_folder(),
"Total_%(DHN_barcode)s.csv" % locals())
else:
master_to_slave_vars.fNameTotalCSV = locator.get_optimization_substations_total_file(DHN_barcode)
if master_to_slave_vars.number_of_buildings_connected_cooling > 1:
if DCN_barcode.count("0") == 0:
master_to_slave_vars.fNameTotalCSV = locator.get_total_demand()
else:
master_to_slave_vars.fNameTotalCSV = os.path.join(locator.get_optimization_network_totals_folder(),
"Total_%(DCN_barcode)s.csv" % locals())
else:
master_to_slave_vars.fNameTotalCSV = locator.get_optimization_substations_total_file(DCN_barcode)
# Thermal Storage Calculations; Run storage optimization
costs_storage_USD, GHG_storage_tonCO2, PEN_storage_MJoil = storage_main.storage_optimization(locator, master_to_slave_vars, lca, prices, config)
costs_USD += costs_storage_USD
GHG_tonCO2 += GHG_storage_tonCO2
PEN_MJoil += PEN_storage_MJoil
# District Heating Calculations
if config.district_heating_network:
if DHN_barcode.count("1") > 0:
(PEN_heating_MJoil, GHG_heating_tonCO2, costs_heating_USD, Q_heating_uncovered_design_W,
Q_heating_uncovered_annual_W) = heating_main.heating_calculations_of_DH_buildings(locator,
master_to_slave_vars, gv,
config, prices, lca)
else:
GHG_heating_tonCO2 = 0
costs_heating_USD = 0
PEN_heating_MJoil = 0
else:
GHG_heating_tonCO2 = 0
costs_heating_USD = 0
PEN_heating_MJoil = 0
costs_USD += costs_heating_USD
GHG_tonCO2 += GHG_heating_tonCO2
PEN_MJoil += PEN_heating_MJoil
# District Cooling Calculations
if gv.ZernezFlag == 1:
costs_cooling_USD, GHG_cooling_tonCO2, PEN_cooling_MJoil = 0, 0, 0
elif config.district_cooling_network:
reduced_timesteps_flag = False
(costs_cooling_USD, GHG_cooling_tonCO2, PEN_cooling_MJoil) = cooling_main.cooling_calculations_of_DC_buildings(locator, master_to_slave_vars, network_features, prices, lca, config, reduced_timesteps_flag)
else:
costs_cooling_USD, GHG_cooling_tonCO2, PEN_cooling_MJoil = 0, 0, 0
costs_USD += costs_cooling_USD
GHG_tonCO2 += GHG_cooling_tonCO2
PEN_MJoil += PEN_cooling_MJoil
# District Electricity Calculations
(costs_electricity_USD, GHG_electricity_tonCO2, PEN_electricity_MJoil) = electricity_main.electricity_calculations_of_all_buildings(DHN_barcode, DCN_barcode, locator, master_to_slave_vars, network_features, gv, prices, lca, config)
costs_USD += costs_electricity_USD
GHG_tonCO2 += GHG_electricity_tonCO2
PEN_MJoil += PEN_electricity_MJoil
# Natural Gas Import Calculations. Prices, GHG and PEN are already included in the various sections.
# This is to save the files for further processing and plots
natural_gas_main.natural_gas_imports(master_to_slave_vars, locator, config)
# Capex Calculations
print "Add extra costs"
(costs_additional_USD, GHG_additional_tonCO2, PEN_additional_MJoil) = cost_model.addCosts(building_names, locator, master_to_slave_vars, Q_heating_uncovered_design_W,
Q_heating_uncovered_annual_W, solar_features, network_features, gv, config, prices, lca)
costs_USD += costs_additional_USD
GHG_tonCO2 += GHG_additional_tonCO2
PEN_MJoil += PEN_additional_MJoil
summarize_individual.summarize_individual_main(master_to_slave_vars, building_names, individual, solar_features, locator, config)
# Converting costs into float64 to avoid longer values
costs_USD = np.float64(costs_USD)
GHG_tonCO2 = np.float64(GHG_tonCO2)
PEN_MJoil = np.float64(PEN_MJoil)
print ('Total costs = ' + str(costs_USD))
print ('Total CO2 = ' + str(GHG_tonCO2))
print ('Total prim = ' + str(PEN_MJoil))
# Saving capacity details of the individual
return costs_USD, GHG_tonCO2, PEN_MJoil, master_to_slave_vars, individual
#+++++++++++++++++++++++++++++++++++
# Boundary conditions
#+++++++++++++++++++++++++++++
def check_invalid(individual, nBuildings, config):
"""
This function rejects individuals out of the bounds of the problem
It can also generate a new individual, to replace the rejected individual
:param individual: individual sent for checking
:param nBuildings: number of buildings
:param gv: global variables class
:type individual: list
:type nBuildings: int
:type gv: class
:return: new individual if necessary
:rtype: list
"""
valid = True
for i in range(N_HEAT):
if individual[2 * i] > 0 and individual[2 * i + 1] < 0.01:
oldValue = individual[2 * i + 1]
shareGain = oldValue - 0.01
individual[2 * i + 1] = 0.01
for rank in range(N_HEAT):
if individual[2 * rank] > 0 and i != rank:
individual[2 * rank + 1] += individual[2 * rank + 1] / (1 - oldValue) * shareGain
elif individual[2*i] == 0:
individual[2*i + 1] = 0
frank = N_HEAT * 2 + N_HR
for i in range(N_SOLAR):
if individual[frank + 2 * i + 1] < 0:
individual[frank + 2 * i + 1] = 0
sharePlants = 0
for i in range(N_HEAT):
sharePlants += individual[2 * i + 1]
if abs(sharePlants - 1) > 1E-3:
valid = False
shareSolar = 0
nSol = 0
for i in range(N_SOLAR):
nSol += individual[frank + 2 * i]
shareSolar += individual[frank + 2 * i + 1]
if nSol > 0 and abs(shareSolar - 1) > 1E-3:
valid = False
if config.district_cooling_network: # This is a temporary fix, need to change it in an elaborate method
for i in range(N_SOLAR - 1):
solar = i + 1
individual[2 * N_HEAT + N_HR + 2*solar] = 0
individual[2 * N_HEAT + N_HR + 2 * solar + 1] = 0
heating_part = 2 * N_HEAT + N_HR + 2 * N_SOLAR + INDICES_CORRESPONDING_TO_DHN
for i in range(N_COOL):
if individual[heating_part + 2 * i] > 0 and individual[heating_part + 2 * i + 1] < 0.01:
oldValue = individual[heating_part + 2 * i + 1]
shareGain = oldValue - 0.01
individual[heating_part + 2 * i + 1] = 0.01
for rank in range(N_COOL):
if individual[heating_part + 2 * rank] > 0 and i != rank:
individual[heating_part + 2 * rank + 1] += individual[heating_part + 2 * rank + 1] / (1 - oldValue) * shareGain
elif individual[heating_part + 2*i] == 0:
individual[heating_part + 2 * i + 1] = 0
sharePlants = 0
for i in range(N_COOL):
sharePlants += individual[heating_part + 2 * i + 1]
if abs(sharePlants - 1) > 1E-3:
valid = False
if not valid:
newInd = generation.generate_main(nBuildings, config)
L = (N_HEAT + N_SOLAR) * 2 + N_HR
for i in range(L):
individual[i] = newInd[i]
return individual
def calc_master_to_slave_variables(individual, Q_heating_max_W, Q_cooling_max_W, building_names, ind_num, gen):
"""
This function reads the list encoding a configuration and implements the corresponding
for the slave routine's to use
:param individual: list with inidividual
:param Q_heating_max_W: peak heating | |
"""Module for launching MAPDL locally or connecting to a remote instance with gRPC."""
import platform
from glob import glob
import re
import warnings
import os
import appdirs
import tempfile
import socket
import time
import subprocess
from ansys.mapdl import core as pymapdl
from ansys.mapdl.core.misc import is_float, random_string, create_temp_dir, threaded
from ansys.mapdl.core.errors import (
LockFileException,
VersionError,
)
from ansys.mapdl.core.mapdl_grpc import MapdlGrpc
from ansys.mapdl.core.licensing import LicenseChecker, ALLOWABLE_LICENSES
from ansys.mapdl.core.mapdl import _MapdlCore
from ansys.mapdl.core import LOG
# settings directory
SETTINGS_DIR = appdirs.user_data_dir("ansys_mapdl_core")
if not os.path.isdir(SETTINGS_DIR):
try:
os.makedirs(SETTINGS_DIR)
except:
warnings.warn(
"Unable to create settings directory.\n"
+ "Will be unable to cache MAPDL executable location"
)
CONFIG_FILE = os.path.join(SETTINGS_DIR, "config.txt")
ALLOWABLE_MODES = ["corba", "console", "grpc"]
LOCALHOST = "127.0.0.1"
MAPDL_DEFAULT_PORT = 50052
INTEL_MSG = """Due to incompatibilities between 'DMP', Windows and VPN connections,
the flat '-mpi INTELMPI' is overwritten by '-mpi msmpi'.
If you still want to use 'INTEL', set:
launch_mapdl(..., force_intel=True, additional_switches='-mpi INTELMPI')
Be aware of possible errors or unexpected behavior with this configuration.
"""
def _is_ubuntu():
"""Determine if running as Ubuntu
It's a bit complicated because sometimes the distribution is
Ubuntu, but the kernel has been recompiled and no longer has the
word "ubuntu" in it.
"""
# must be running linux for this to be True
if os.name != "posix":
return False
# gcc is installed by default
proc = subprocess.Popen("gcc --version", shell=True, stdout=subprocess.PIPE)
if 'ubuntu' in proc.stdout.read().decode().lower():
return True
# try lsb_release as this is more reliable
try:
import lsb_release
if lsb_release.get_distro_information()["ID"].lower() == "ubuntu":
return True
except ImportError:
# finally, check platform
return "ubuntu" in platform.platform().lower()
def _version_from_path(path):
"""Extract ansys version from a path. Generally, the version of
ANSYS is contained in the path:
C:/Program Files/ANSYS Inc/v202/ansys/bin/win64/ANSYS202.exe
/usr/ansys_inc/v211/ansys/bin/mapdl
Note that if the MAPDL executable, you have to rely on the version
in the path.
Parameters
----------
path : str
Path to the MAPDL executable
Returns
-------
int
Integer version number (e.g. 211).
"""
# expect v<ver>/ansys
# replace \\ with / to account for possible windows path
matches = re.findall(r"v(\d\d\d).ansys", path.replace("\\", "/"), re.IGNORECASE)
if not matches:
raise RuntimeError(f"Unable to extract Ansys version from {path}")
return int(matches[-1])
def close_all_local_instances(port_range=None):
"""Close all MAPDL instances within a port_range.
This function can be used when cleaning up from a failed pool or
batch run.
Parameters
----------
port_range : list, optional
Defaults to ``range(50000, 50200)``. Expand this range if
there are many potential instances of MAPDL in gRPC mode.
Examples
--------
Close all instances on in the range of 50000 and 50199.
>>> import ansys.mapdl.core as pymapdl
>>> pymapdl.close_all_local_instances()
"""
if port_range is None:
port_range = range(50000, 50200)
@threaded
def close_mapdl(port, name='Closing mapdl thread.'):
try:
mapdl = MapdlGrpc(port=port, set_no_abort=False)
mapdl.exit()
except OSError:
pass
ports = check_ports(port_range)
for port, state in ports.items():
if state:
close_mapdl(port)
def check_ports(port_range, ip="localhost"):
"""Check the state of ports in a port range"""
ports = {}
for port in port_range:
ports[port] = port_in_use(port, ip)
return ports
def port_in_use(port, host=LOCALHOST):
"""Returns True when a port is in use at the given host.
Must actually "bind" the address. Just checking if we can create
a socket is insufficient as it's possible to run into permission
errors like:
- An attempt was made to access a socket in a way forbidden by its
access permissions.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
sock.bind((host, port))
return False
except:
return True
def create_ip_file(ip, path):
"""Create 'mylocal.ip' file required for ansys to change the IP of the gRPC server."""
file_name = os.path.join(path, "mylocal.ip")
with open(file_name, "w") as f:
f.write(ip)
def launch_grpc(
exec_file="",
jobname="file",
nproc=2,
ram=None,
run_location=None,
port=MAPDL_DEFAULT_PORT,
ip=LOCALHOST,
additional_switches="",
override=True,
timeout=20,
verbose=False,
) -> tuple:
"""Start MAPDL locally in gRPC mode.
Parameters
----------
exec_file : str, optional
The location of the MAPDL executable. Will use the cached
location when left at the default ``None``.
jobname : str, optional
MAPDL jobname. Defaults to ``'file'``.
nproc : int, optional
Number of processors. Defaults to 2.
ram : float, optional
Fixed amount of memory to request for MAPDL. If ``None``,
then MAPDL will use as much as available on the host machine.
run_location : str, optional
MAPDL working directory. Defaults to a temporary working
directory.
port : int
Port to launch MAPDL gRPC on. Final port will be the first
port available after (or including) this port.
additional_switches : str, optional
Additional switches for MAPDL, for example ``"-p aa_r"``, the
academic research license, would be added with:
- ``additional_switches="-p aa_r"``
Avoid adding switches like ``"-i"`` ``"-o"`` or ``"-b"`` as
these are already included to start up the MAPDL server. See
the notes section for additional details.
custom_bin : str, optional
Path to the MAPDL custom executable.
override : bool, optional
Attempts to delete the lock file at the run_location.
Useful when a prior MAPDL session has exited prematurely and
the lock file has not been deleted.
verbose : bool, optional
Print all output when launching and running MAPDL. Not
recommended unless debugging the MAPDL start. Default
``False``.
Returns
-------
port : int
Returns the port number that the gRPC instance started on.
Notes
-----
These are the MAPDL switch options as of 2020R2 applicable for
running MAPDL as a service via gRPC. Excluded switches such as
``"-j"`` either not applicable or are set via keyword arguments.
-acc <device> : Enables the use of GPU hardware. See GPU
Accelerator Capability in the Parallel Processing Guide for more
information.
-amfg : Enables the additive manufacturing capability. Requires
an additive manufacturing license. For general information about
this feature, see AM Process Simulation in ANSYS Workbench.
-ansexe <executable> : Activates a custom mechanical APDL executable.
In the ANSYS Workbench environment, activates a custom
Mechanical APDL executable.
-custom <executable> : Calls a custom Mechanical APDL executable
See Running Your Custom Executable in the Programmer's Reference
for more information.
-db value : Initial memory allocation
Defines the portion of workspace (memory) to be used as the
initial allocation for the database. The default is 1024
MB. Specify a negative number to force a fixed size throughout
the run; useful on small memory systems.
-dis : Enables Distributed ANSYS
See the Parallel Processing Guide for more information.
-dvt : Enables ANSYS DesignXplorer advanced task (add-on).
Requires DesignXplorer.
-l <language> : Specifies a language file to use other than English
This option is valid only if you have a translated message file
in an appropriately named subdirectory in
``/ansys_inc/v201/ansys/docu`` or
``Program Files\\ANSYS\\Inc\\V201\\ANSYS\\docu``
-m <workspace> : Specifies the total size of the workspace
Workspace (memory) in megabytes used for the initial
allocation. If you omit the ``-m`` option, the default is 2 GB
(2048 MB). Specify a negative number to force a fixed size
throughout the run.
-machines <IP> : Specifies the distributed machines
Machines on which to run a Distributed ANSYS analysis. See
Starting Distributed ANSYS in the Parallel Processing Guide for
more information.
-mpi <value> : Specifies the type of MPI to use.
See the Parallel Processing Guide for more information.
-mpifile <appfile> : Specifies an existing MPI file
Specifies an existing MPI file (appfile) to be used in a
Distributed ANSYS run. See Using MPI Files in the Parallel
Processing Guide for more information.
-na <value>: Specifies the number of GPU accelerator devices
Number of GPU devices per machine or compute node when running
with the GPU accelerator feature. See GPU Accelerator Capability
in the Parallel Processing Guide for more information.
-name <value> : Defines Mechanical APDL parameters
Set mechanical APDL parameters at program start-up. The parameter
name must be at least two characters long. For details about
parameters, see the ANSYS Parametric Design Language Guide.
-p <productname> : ANSYS session product
Defines the ANSYS session product that will run during the
session. For more detailed information about the ``-p`` option,
see Selecting an ANSYS Product via the Command Line.
-ppf <license feature name> : HPC license
Specifies which HPC license to use during a parallel processing
run. See HPC Licensing in the Parallel Processing Guide for more
information.
-smp : Enables shared-memory parallelism.
See the Parallel Processing Guide for more information.
Examples
--------
Launch MAPDL using the default | |
<reponame>jonathanj/mantissa<filename>xmantissa/test/test_interstore.py
"""
Tests for inter-store messaging module, L{xmantissa.messaging}.
This module contains tests for persistent messaging between different accounts.
"""
import gc
from datetime import timedelta
from zope.interface import implements
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.protocols.amp import Box, Command, Integer, String
from epsilon.extime import Time
from axiom.iaxiom import IScheduler
from axiom.store import Store
from axiom.errors import UnsatisfiedRequirement
from axiom.item import Item, POWERUP_BEFORE
from axiom.attributes import text, bytes, integer, boolean, inmemory
from axiom.userbase import LoginSystem, LoginMethod, LoginAccount
from axiom.dependency import installOn
from axiom.scheduler import TimedEvent
from xmantissa.interstore import (
# Public Names
MessageQueue, AMPMessenger, LocalMessageRouter, Value,
AMPReceiver, commandMethod, answerMethod, errorMethod,
SenderArgument, TargetArgument,
# Constants
AMP_MESSAGE_TYPE, AMP_ANSWER_TYPE, DELIVERY_ERROR,
# Error Types
ERROR_REMOTE_EXCEPTION, ERROR_NO_SHARE, ERROR_NO_USER, ERROR_BAD_SENDER,
# Private Names
_RETRANSMIT_DELAY, _QueuedMessage, _AlreadyAnswered, _FailedAnswer,
_AMPExposer, _AMPErrorExposer)
from xmantissa.sharing import getEveryoneRole, Identifier
from xmantissa.error import (
MessageTransportError, BadSender, UnknownMessageType, RevertAndRespond,
MalformedMessage)
from xmantissa.ixmantissa import IMessageReceiver, IMessageRouter
class SampleException(Exception):
"""
Something didn't happen because of a problem.
"""
class StubReceiver(Item):
"""
This is a message receiver that will store a message sent to it for
inspection by tests.
"""
implements(IMessageReceiver)
messageType = text(
doc="""
The message type which C{messageReceived} should put into its return
value.
""")
messageData = bytes(
doc="""
The message data which C{messageReceived} should put into its return
value.
""")
inconsistent = boolean(
doc="""
This value is set to True during the execution of C{messageReceived},
but False afterwards. If everything is properly transactional it
should never be observably false by other code.
""")
buggy = boolean(allowNone=False, default=False,
doc="""
C{messageReceived} should raise a L{SampleException}.
""")
badReturn = boolean(allowNone=False, default=False,
doc="""
C{messageReceived} should return L{None}.
""")
receivedCount = integer(default=0,
doc="""
This is a counter of the number of messages
received by C{messageReceived}.
""")
reciprocate = boolean(allowNone=False, default=False,
doc="""
C{messageReceived} should respond to its C{sender}
parameter with a symmetric message in addition to
answering.
""")
revertType = text(allowNone=True,
doc="""
If set, this specifies the type of the
L{RevertAndRespond} exception that C{messageReceived}
should raise.
""")
revertData = bytes(allowNone=True,
doc="""
If C{revertType} is set, this specifies the data of the
L{RevertAndRespond} exception that C{messageReceived}
should raise.
""")
def messageQueue(self):
"""
This is a temporary workaround; see ticket #2640 for details on the way
this method should be implemented in the future.
"""
return self.store.findUnique(MessageQueue)
def messageReceived(self, value, sender, receiver):
"""
A message was received. Increase the message counter and store its
contents.
"""
self.receivedCount += 1
self.messageType = value.type
self.messageData = value.data
self.inconsistent = True
if self.buggy:
raise SampleException("Sample Message")
if self.revertType is not None:
raise RevertAndRespond(Value(self.revertType,
self.revertData))
self.inconsistent = False
if self.badReturn:
return None
if self.reciprocate:
self.messageQueue().queueMessage(
receiver, sender, Value(value.type + u'.response',
value.data + ' response'))
return Value(u"custom.message.type", "canned response")
class StubSlowRouter(Item):
"""
Like L{LocalMessageRouter}, but don't actually deliver the messages until
the test forces them to be delivered.
By way of several parameters to `flushMessages`, this stub implementation
allows for all of the arbitrary ways in which a potential networked
implementation is allowed to behave - dropping messages, repeating
messages, and even failing in buggy ways.
Note: this must be kept in memory for the duration of any test using it.
@ivar messages: a list of (sender, target, value, messageID) tuples
received by routeMessage.
@ivar acks: a list of (deferred, (sender, target, value, messageID))
tuples, representing an answer received by routeAnswer and the deferred
that was returned to indicate its delivery.
"""
dummy = integer(
doc="""
No state on this item is persistent; this is just to satisfy Axiom's schema
requirement.
""")
messages = inmemory()
acks = inmemory()
def localRouter(self):
"""
Return a L{LocalMessageRouter} for this slow router's store.
"""
return LocalMessageRouter(self.store.findUnique(LoginSystem))
def activate(self):
"""
Initialize temporary list to queue messages.
"""
self.messages = []
self.acks = []
def routeMessage(self, sender, target, value, messageID):
"""
Stub implementation of L{IMessageRouter.routeMessage} that just appends
to a list in memory, and later delegates from that list to the local
router.
"""
self.messages.append((sender, target, value, messageID))
def routeAnswer(self, originalSender, originalTarget, value, messageID):
"""
Stub implementation of L{IMessageRouter.routeAnswer} that just
appends to a list in memory.
"""
D = Deferred()
self.acks.append((D, (originalSender, originalTarget, value,
messageID)))
return D
def flushMessages(self, dropAcks=False,
dropAckErrorType=MessageTransportError,
stallAcks=False,
repeatAcks=False):
"""
Delegate all messages queued in memory with routeMessage to the
specified local router.
@param dropAcks: a boolean, indicating whether to drop the answers
queued by routeAnswer.
@param dropAckErrorType: an exception type, indicating what exception
to errback the Deferreds returned by routeAnswer with.
@param stallAcks: a boolean, indicating whether to keep, but not act,
on the answers queued by routeAnswer.
@param repeatAcks: a boolean, indicating whether to repeat all of the
acks the next time flushMessages is called.
"""
m = self.messages[:]
self.messages = []
for message in m:
self.localRouter().routeMessage(*message)
if dropAcks:
for D, ack in self.acks:
D.errback(dropAckErrorType())
self.acks = []
if not stallAcks:
for D, ack in self.acks:
self.localRouter().routeAnswer(*ack).chainDeferred(D)
if repeatAcks:
# the Deferreds are used up, so we need a fresh batch for the
# next run-through (although these will be ignored)
self.acks = [(Deferred(), ack) for (D, ack) in self.acks]
else:
self.acks = []
def spuriousDeliveries(self):
"""
Simulate a faulty transport, and deliver all the currently pending
messages without paying attention to their results.
"""
for message in self.messages:
self.localRouter().routeMessage(*message)
class StubDeliveryConsequence(Item):
"""
This implements a delivery consequence.
@ivar responses: a tuple of (answer-type, answer-data, message-type,
message-data, sender, target), listing all the answers received by
answerReceived.
@ivar bucket: a list which will have this L{StubDeliveryConsequence}
appended to it when a successful message is processed.
"""
responses = inmemory()
bucket = inmemory()
invocations = integer(
"""
Counter, keeping track of how many times this consequence has been
invoked.
""",
default=0, allowNone=False)
succeeded = boolean(
"""
Did the action succeed? None if it hasn't completed, True if yes,
False if no.
""")
inconsistent = boolean(
"""
This should never be set to True. It's set to None by default, False
when the callback fully succeeds.
""")
buggy = boolean(
"""
Set this to cause 'success' to raise an exception.
""",
default=False,
allowNone=False)
def activate(self):
"""
Initialize the list of received responses.
"""
self.responses = []
self.bucket = []
def success(self):
"""
A response was received to the message. This will be executed in a
transaction. Raise an exception if this consequence is buggy.
"""
self.bucket.append(self)
self.inconsistent = True
self.invocations += 1
self.succeeded = True
if self.buggy:
raise SampleException()
self.inconsistent = False
def failure(self):
"""
The message could not be delivered for some reason. This will be
executed in a transaction. Raise an exception if this consequence is
buggy.
@param reason: an exception.
"""
self.invocations += 1
self.succeeded = False
def answerReceived(self, answerValue, originalValue,
originalSender, originalTarget):
"""
An answer was received.
"""
if answerValue.type == DELIVERY_ERROR:
self.failure()
else:
self.success()
# It's important that this happen after the "application" logic so that
# the tests will not see this set if an exception has been raised.
self.responses.append((answerValue.type, answerValue.data,
originalValue.type, originalValue.data,
originalSender, originalTarget))
class TimeFactory(object):
"""
Make a fake time factory.
"""
def __init__(self):
"""
Create a time factory with some default values.
"""
self.currentSeconds = 0.0
def advance(self):
"""
Advance the current time by one second.
"""
self.currentSeconds += 1.0
def next(self):
"""
Produce the next time in the sequence, then advance.
"""
self.advance()
return Time.fromPOSIXTimestamp(self.currentSeconds)
def peek(self):
"""
Return the value that will come from the next call to 'next'.
"""
return Time.fromPOSIXTimestamp(self.currentSeconds + 1)
class SingleSiteMessagingTests(TestCase):
"""
These are tests for messaging within a single configured site store.
"""
def setUp(self):
"""
Create a site store with two users that can send messages to each
other.
"""
self.siteStore = Store()
self.time = TimeFactory()
self.loginSystem = LoginSystem(store=self.siteStore)
installOn(self.loginSystem, self.siteStore)
self.aliceAccount = self.loginSystem.addAccount(
u"alice", u"example.com", u"asdf", internal=True)
self.bobAccount = self.loginSystem.addAccount(
u"bob", u"example.com", u"asdf", internal=True)
self.aliceStore, self.aliceQueue = self.accountify(
self.aliceAccount.avatars.open())
self.bobStore, self.bobQueue = self.accountify(
self.bobAccount.avatars.open())
# I need to make a target object with a message receiver installed on
# it. Then I need to share that object.
self.receiver = StubReceiver(store=self.bobStore)
getEveryoneRole(self.bobStore).shareItem(self.receiver, u"suitcase")
self.retransmitDelta = timedelta(seconds=_RETRANSMIT_DELAY)
def accountify(self, userStore):
"""
Add a MessageQueue to the given user | |
# - *- coding: utf-8 -*-
from unittest import TestCase
from nose.tools import ok_, eq_
from datetime import date
from daynextprev import (
prev_month,
next_month,
is_less_ym,
months,
months_backward,
is_leapyear,
days_of_month,
next_day,
prev_day,
days,
days_backward,
this_week,
next_week,
prev_week,
W_MONDAY,
W_TUESDAY,
W_WEDNESDAY,
W_THURSDAY,
W_FRIDAY,
W_SATURDAY,
W_SUNDAY
)
class DayNextPrevTestCase(TestCase):
def test_constants(self):
eq_(0, W_MONDAY)
eq_(1, W_TUESDAY)
eq_(2, W_WEDNESDAY)
eq_(3, W_THURSDAY)
eq_(4, W_FRIDAY)
eq_(5, W_SATURDAY)
eq_(6, W_SUNDAY)
def test_prev_month(self):
eq_((2017, 12), prev_month(2018, 1))
eq_((2018, 1), prev_month(2018, 2))
eq_((2018, 2), prev_month(2018, 3))
eq_((2018, 3), prev_month(2018, 4))
eq_((2018, 4), prev_month(2018, 5))
eq_((2018, 5), prev_month(2018, 6))
eq_((2018, 6), prev_month(2018, 7))
eq_((2018, 7), prev_month(2018, 8))
eq_((2018, 8), prev_month(2018, 9))
eq_((2018, 9), prev_month(2018, 10))
eq_((2018, 10), prev_month(2018, 11))
eq_((2018, 11), prev_month(2018, 12))
def test_next_month(self):
eq_((2018, 2), next_month(2018, 1))
eq_((2018, 3), next_month(2018, 2))
eq_((2018, 4), next_month(2018, 3))
eq_((2018, 5), next_month(2018, 4))
eq_((2018, 6), next_month(2018, 5))
eq_((2018, 7), next_month(2018, 6))
eq_((2018, 8), next_month(2018, 7))
eq_((2018, 9), next_month(2018, 8))
eq_((2018, 10), next_month(2018, 9))
eq_((2018, 11), next_month(2018, 10))
eq_((2018, 12), next_month(2018, 11))
eq_((2019, 1), next_month(2018, 12))
def test_is_less_ym(self):
# ym1 < ym2
ok_(is_less_ym((2018, 1), (2018, 2)))
ok_(is_less_ym((2015, 10), (2017, 3)))
# ym1 == ym2
ok_(not is_less_ym((2018, 1), (2018, 1)))
ok_(not is_less_ym((2000, 3), (2000, 3)))
# ym1 > ym2
ok_(not is_less_ym((2018, 2), (2018, 1)))
ok_(not is_less_ym((2015, 10), (2014, 12)))
def test_months(self):
months1 = months((2018, 1), (2018, 5), include_end=True)
months1_1 = []
for ym in months1:
months1_1.append(ym)
eq_(
[
(2018, 1),
(2018, 2),
(2018, 3),
(2018, 4),
(2018, 5)
],
months1_1
)
months1_2 = []
for ym in months1:
months1_2.append(ym)
eq_(
[
(2018, 1),
(2018, 2),
(2018, 3),
(2018, 4),
(2018, 5)
],
months1_2
)
months2 = list(months((2018, 1), (2018, 5), include_end=False))
eq_(
[
(2018, 1),
(2018, 2),
(2018, 3),
(2018, 4)
],
months2
)
months3 = list(months((2018, 2), (2018, 6)))
eq_(
[
(2018, 2),
(2018, 3),
(2018, 4),
(2018, 5),
(2018, 6)
],
months3
)
def test_months_backward(self):
months1 = months_backward((2018, 2), (2017, 11), include_end=True)
months1_1 = []
for ym in months1:
months1_1.append(ym)
eq_(
[
(2018, 2),
(2018, 1),
(2017, 12),
(2017, 11)
],
months1_1
)
months1_2 = list(months1)
eq_(
[
(2018, 2),
(2018, 1),
(2017, 12),
(2017, 11)
],
months1_2
)
months2 = list(months_backward((2018, 2), (2017, 11), include_end=False))
eq_(
[
(2018, 2),
(2018, 1),
(2017, 12)
],
months2
)
months3 = list(months_backward((2018, 3), (2018 ,1)))
eq_(
[
(2018, 3),
(2018, 2),
(2018, 1)
],
months3
)
def test_is_leapyear(self):
# leap year (y % 400 == 0)
ok_(is_leapyear(1600))
ok_(is_leapyear(2000))
ok_(is_leapyear(2400))
# NOT leap year (y % 100 == 0)
ok_(not is_leapyear(1700))
ok_(not is_leapyear(1800))
ok_(not is_leapyear(1900))
ok_(not is_leapyear(2100))
ok_(not is_leapyear(2200))
ok_(not is_leapyear(2300))
# leap year (y % 4 == 0)
ok_(is_leapyear(1704))
ok_(is_leapyear(1784))
ok_(is_leapyear(1820))
ok_(is_leapyear(1896))
ok_(is_leapyear(1912))
ok_(is_leapyear(1924))
ok_(is_leapyear(2004))
ok_(is_leapyear(2056))
# NOT leap year
ok_(not is_leapyear(1711))
ok_(not is_leapyear(1757))
ok_(not is_leapyear(1791))
ok_(not is_leapyear(1805))
ok_(not is_leapyear(1822))
ok_(not is_leapyear(1861))
ok_(not is_leapyear(1918))
ok_(not is_leapyear(1942))
ok_(not is_leapyear(1997))
ok_(not is_leapyear(2035))
ok_(not is_leapyear(2078))
ok_(not is_leapyear(2095))
ok_(not is_leapyear(2101))
ok_(not is_leapyear(2149))
ok_(not is_leapyear(2189))
def test_days_of_month(self):
eq_(29, days_of_month(2016, 2))
eq_(28, days_of_month(2015, 2))
eq_(29, days_of_month(2000, 2))
eq_(28, days_of_month(1900, 2))
eq_(31, days_of_month(2017, 1))
eq_(28, days_of_month(2017, 2))
eq_(31, days_of_month(2017, 3))
eq_(30, days_of_month(2017, 4))
eq_(31, days_of_month(2017, 5))
eq_(30, days_of_month(2017, 6))
eq_(31, days_of_month(2017, 7))
eq_(31, days_of_month(2017, 8))
eq_(30, days_of_month(2017, 9))
eq_(31, days_of_month(2017, 10))
eq_(30, days_of_month(2017, 11))
eq_(31, days_of_month(2017, 12))
def test_next_day(self):
eq_((2016, 2, 28), next_day(2016, 2, 27))
eq_(date(2016, 2, 28), next_day(date(2016, 2, 27)))
eq_((2016, 2, 29), next_day(2016, 2, 28))
eq_(date(2016, 2, 29), next_day(date(2016, 2, 28)))
eq_((2016, 3, 1), next_day(2016, 2, 29))
eq_(date(2016, 3, 1), next_day(date(2016, 2, 29)))
eq_((2015, 2, 28), next_day(2015, 2, 27))
eq_(date(2015, 2, 28), next_day(date(2015, 2, 27)))
eq_((2015, 3, 1), next_day(2015, 2, 28))
eq_(date(2015, 3, 1), next_day(date(2015, 2, 28)))
eq_((2000, 2, 28), next_day(2000, 2, 27))
eq_(date(2000, 2, 28), next_day(date(2000, 2, 27)))
eq_((2000, 2, 29), next_day(2000, 2, 28))
eq_(date(2000, 2, 29), next_day(date(2000, 2, 28)))
eq_((2000, 3, 1), next_day(2000, 2, 29))
eq_(date(2000, 3, 1), next_day(date(2000, 2, 29)))
eq_((1900, 2, 28), next_day(1900, 2, 27))
eq_(date(1900, 2, 28), next_day(date(1900, 2, 27)))
eq_((1900, 3, 1), next_day(1900, 2, 28))
eq_(date(1900, 3, 1), next_day(date(1900, 2, 28)))
eq_((2017, 1, 1), next_day(2016, 12, 31))
eq_(date(2017, 1, 1), next_day(date(2016, 12, 31)))
eq_((2017, 2, 1), next_day(2017, 1, 31))
eq_(date(2017, 2, 1), next_day(date(2017, 1, 31)))
eq_((2017, 3, 1), next_day(2017, 2, 28))
eq_(date(2017, 3, 1), next_day(date(2017, 2, 28)))
eq_((2017, 4, 1), next_day(2017, 3, 31))
eq_(date(2017, 4, 1), next_day(date(2017, 3, 31)))
eq_((2017, 5, 1), next_day(2017, 4, 30))
eq_(date(2017, 5, 1), next_day(date(2017, 4, 30)))
eq_((2017, 6, 1), next_day(2017, 5, 31))
eq_(date(2017, 6, 1), next_day(date(2017, 5, 31)))
eq_((2017, 7, 1), next_day(2017, 6, 30))
eq_(date(2017, 7, 1), next_day(date(2017, 6, 30)))
eq_((2017, 7, 31), next_day(2017, 7, 30))
eq_(date(2017, 7, 31), next_day(date(2017, 7, 30)))
eq_((2017, 8, 1), next_day(2017, 7, 31))
eq_(date(2017, 8, 1), next_day(date(2017, 7, 31)))
eq_((2017, 9, 1), next_day(2017, 8, 31))
eq_(date(2017, 9, 1), next_day(date(2017, 8, 31)))
eq_((2017, 10, 1), next_day(2017, 9, 30))
eq_(date(2017, 10, 1), next_day(date(2017, 9, 30)))
eq_((2017, 11, 1), next_day(2017, 10, 31))
eq_(date(2017, 11, 1), next_day(date(2017, 10, 31)))
eq_((2017, 12, 1), next_day(2017, 11, 30))
eq_(date(2017, 12, 1), next_day(date(2017, 11, 30)))
eq_((2017, 12, 31), next_day(2017, 12, 30))
eq_(date(2017, 12, 31), next_day(date(2017, 12, 30)))
eq_((2018, 1, 1), next_day(2017, 12, 31))
eq_(date(2018, 1, 1), next_day(date(2017, 12, 31)))
eq_((2017, 5, 13), next_day(2017, 5, 12))
eq_(date(2017, 5, 13), next_day(date(2017, 5, 12)))
eq_((2017, 5, 14), next_day(2017, 5, 13))
eq_(date(2017, 5, 14), next_day(date(2017, 5, 13)))
eq_((2017, 5, 15), next_day(2017, 5, 14))
eq_(date(2017, 5, 15), next_day(date(2017, 5, 14)))
eq_((2017, 5, 16), next_day(2017, 5, 15))
eq_(date(2017, 5, 16), next_day(date(2017, 5, 15)))
def test_prev_day(self):
eq_((2016, 2, 27), prev_day(2016, 2, 28))
eq_(date(2016, 2, 27), prev_day(date(2016, 2, 28)))
eq_((2016, 2, 28), prev_day(2016, 2, 29))
eq_(date(2016, 2, 28), prev_day(date(2016, 2, 29)))
eq_((2016, 2, 29), prev_day(2016, 3, 1))
eq_(date(2016, 2, 29), prev_day(date(2016, 3, 1)))
eq_((2015, 2, 27), prev_day(2015, 2, 28))
eq_(date(2015, 2, 27), prev_day(date(2015, 2, 28)))
eq_((2015, 2, 28), prev_day(2015, 3, 1))
eq_(date(2015, 2, 28), prev_day(date(2015, 3, 1)))
eq_((2000, 2, 27), prev_day(2000, 2, 28))
eq_(date(2000, 2, 27), prev_day(date(2000, 2, 28)))
eq_((2000, 2, 28), prev_day(2000, 2, 29))
eq_(date(2000, 2, 28), prev_day(date(2000, 2, 29)))
eq_((2000, 2, 29), prev_day(2000, 3, 1))
eq_(date(2000, 2, 29), prev_day(date(2000, 3, 1)))
eq_((1900, 2, 27), prev_day(1900, 2, 28))
eq_(date(1900, 2, 27), prev_day(date(1900, 2, 28)))
eq_((1900, 2, 28), prev_day(1900, 3, 1))
eq_(date(1900, 2, 28), prev_day(date(1900, 3, 1)))
eq_((2016, 12, 31), prev_day(2017, 1, 1))
eq_(date(2016, 12, 31), prev_day(date(2017, 1, 1)))
eq_((2017, 1, 31), prev_day(2017, 2, 1))
eq_(date(2017, 1, 31), prev_day(date(2017, 2, 1)))
eq_((2017, 2, 28), prev_day(2017, 3, 1))
eq_(date(2017, 2, 28), prev_day(date(2017, 3, 1)))
eq_((2017, 3, 31), prev_day(2017, 4, 1))
eq_(date(2017, 3, 31), prev_day(date(2017, 4, 1)))
eq_((2017, 4, 30), prev_day(2017, 5, 1))
eq_(date(2017, 4, 30), prev_day(date(2017, 5, 1)))
eq_((2017, 5, 31), prev_day(2017, 6, 1))
eq_(date(2017, 5, 31), prev_day(date(2017, 6, 1)))
eq_((2017, 6, 30), prev_day(2017, 7, 1))
eq_(date(2017, 6, 30), prev_day(date(2017, 7, 1)))
eq_((2017, 7, 30), prev_day(2017, 7, 31))
eq_(date(2017, 7, 30), prev_day(date(2017, 7, 31)))
eq_((2017, 7, 31), prev_day(2017, 8, 1))
eq_(date(2017, 7, 31), prev_day(date(2017, 8, 1)))
eq_((2017, 8, 31), prev_day(2017, 9, 1))
eq_(date(2017, 8, 31), prev_day(date(2017, 9, 1)))
eq_((2017, 9, 30), prev_day(2017, 10, 1))
eq_(date(2017, 9, 30), prev_day(date(2017, 10, 1)))
eq_((2017, 10, 31), prev_day(2017, 11, 1))
eq_(date(2017, 10, 31), prev_day(date(2017, 11, 1)))
eq_((2017, 11, 30), prev_day(2017, 12, 1))
eq_(date(2017, 11, 30), prev_day(date(2017, 12, 1)))
eq_((2017, 12, 30), prev_day(2017, 12, 31))
eq_(date(2017, 12, 30), prev_day(date(2017, 12, 31)))
eq_((2017, 12, 31), prev_day(2018, 1, 1))
eq_(date(2017, 12, 31), prev_day(date(2018, 1, 1)))
eq_((2017, 5, 12), prev_day(2017, 5, 13))
eq_(date(2017, 5, 12), prev_day(date(2017, 5, 13)))
eq_((2017, 5, 13), prev_day(2017, 5, 14))
eq_(date(2017, 5, 13), prev_day(date(2017, 5, 14)))
eq_((2017, 5, 14), prev_day(2017, 5, 15))
eq_(date(2017, 5, 14), prev_day(date(2017, 5, 15)))
eq_((2017, 5, 15), prev_day(2017, 5, 16))
eq_(date(2017, 5, 15), prev_day(date(2017, 5, 16)))
def test_days(self):
days1 = days((2018, 1, 30), (2018, 2, 3), include_end=True)
days1_1 = []
for d in days1:
days1_1.append(d)
eq_(
[
(2018, 1, 30),
(2018, 1, 31),
(2018, 2, 1),
(2018, 2, 2),
(2018, 2, 3)
],
days1_1
)
days1_2 = list(days1)
eq_(
[
(2018, 1, 30),
(2018, 1, 31),
(2018, 2, 1),
(2018, 2, 2),
(2018, 2, 3)
],
days1_2
)
days2 = days((2018, 1, 30), (2018, 2, 3))
| |
"""
Unit tests for meta/networks/splitting/multi_splitting_base.py.
"""
import math
import random
from itertools import product
from typing import Dict, Any, List
import numpy as np
from scipy import stats
import torch
import torch.nn.functional as F
from gym.spaces import Box
from meta.networks.utils import init_base
from meta.networks.splitting import BaseMultiTaskSplittingNetwork
from meta.utils.estimate import alpha_to_threshold
from tests.helpers import DEFAULT_SETTINGS, get_obs_batch
from tests.networks.splitting import BASE_SETTINGS
from tests.networks.splitting.templates import (
TOL,
gradients_template,
backward_template,
grad_diffs_template,
grad_stats_template,
score_template,
)
def test_forward_shared() -> None:
"""
Test forward() when all regions of the splitting network are fully shared. The
function computed by the network should be f(x) = 3 * tanh(2 * tanh(x + 1) + 2) + 3.
"""
# Set up case.
dim = BASE_SETTINGS["obs_dim"] + BASE_SETTINGS["num_tasks"]
observation_subspace = Box(
low=-np.inf, high=np.inf, shape=(BASE_SETTINGS["obs_dim"],)
)
observation_subspace.seed(DEFAULT_SETTINGS["seed"])
hidden_size = dim
# Construct network.
network = BaseMultiTaskSplittingNetwork(
input_size=dim,
output_size=dim,
num_tasks=BASE_SETTINGS["num_tasks"],
num_layers=BASE_SETTINGS["num_layers"],
hidden_size=hidden_size,
device=BASE_SETTINGS["device"],
)
# Set network weights.
state_dict = network.state_dict()
for i in range(BASE_SETTINGS["num_layers"]):
weight_name = "regions.%d.0.0.weight" % i
bias_name = "regions.%d.0.0.bias" % i
state_dict[weight_name] = torch.Tensor((i + 1) * np.identity(dim))
state_dict[bias_name] = torch.Tensor((i + 1) * np.ones(dim))
network.load_state_dict(state_dict)
# Construct batch of observations concatenated with one-hot task vectors.
obs, task_indices = get_obs_batch(
batch_size=BASE_SETTINGS["num_processes"],
obs_space=observation_subspace,
num_tasks=BASE_SETTINGS["num_tasks"],
)
# Get output of network.
output = network(obs, task_indices)
# Computed expected output of network.
expected_output = 3 * torch.tanh(2 * torch.tanh(obs + 1) + 2) + 3
# Test output of network.
assert torch.allclose(output, expected_output)
def test_forward_single() -> None:
"""
Test forward() when all regions of the splitting network are fully shared except
one. The function computed by the network should be f(x) = 3 * tanh(2 * tanh(x + 1)
+ 2) + 3 for tasks 0 and 1 and f(x) = 3 * tanh(-2 * tanh(x + 1) - 2) + 3 for tasks 2
and 3.
"""
# Set up case.
dim = BASE_SETTINGS["obs_dim"] + BASE_SETTINGS["num_tasks"]
observation_subspace = Box(
low=-np.inf, high=np.inf, shape=(BASE_SETTINGS["obs_dim"],)
)
observation_subspace.seed(DEFAULT_SETTINGS["seed"])
hidden_size = dim
# Construct network.
network = BaseMultiTaskSplittingNetwork(
input_size=dim,
output_size=dim,
num_tasks=BASE_SETTINGS["num_tasks"],
num_layers=BASE_SETTINGS["num_layers"],
hidden_size=hidden_size,
device=BASE_SETTINGS["device"],
)
# Split the network at the second layer. Tasks 0 and 1 stay assigned to the original
# copy and tasks 2 and 3 are assigned to the new copy.
network.split(1, 0, [0, 1], [2, 3])
# Set network weights.
state_dict = network.state_dict()
for i in range(BASE_SETTINGS["num_layers"]):
weight_name = "regions.%d.0.0.weight" % i
bias_name = "regions.%d.0.0.bias" % i
state_dict[weight_name] = torch.Tensor((i + 1) * np.identity(dim))
state_dict[bias_name] = torch.Tensor((i + 1) * np.ones(dim))
weight_name = "regions.1.1.0.weight"
bias_name = "regions.1.1.0.bias"
state_dict[weight_name] = torch.Tensor(-2 * np.identity(dim))
state_dict[bias_name] = torch.Tensor(-2 * np.ones(dim))
network.load_state_dict(state_dict)
# Construct batch of observations concatenated with one-hot task vectors.
obs, task_indices = get_obs_batch(
batch_size=BASE_SETTINGS["num_processes"],
obs_space=observation_subspace,
num_tasks=BASE_SETTINGS["num_tasks"],
)
# Get output of network.
output = network(obs, task_indices)
# Computed expected output of network.
expected_output = torch.zeros(obs.shape)
for i, (ob, task) in enumerate(zip(obs, task_indices)):
if task in [0, 1]:
expected_output[i] = 3 * torch.tanh(2 * torch.tanh(ob + 1) + 2) + 3
elif task in [2, 3]:
expected_output[i] = 3 * torch.tanh(-2 * torch.tanh(ob + 1) - 2) + 3
else:
raise NotImplementedError
# Test output of network.
assert torch.allclose(output, expected_output)
def test_forward_multiple() -> None:
"""
Test forward() when none of the layers are fully shared. The function computed by
the network should be:
- f(x) = 3 * tanh(2 * tanh(x + 1) + 2) + 3 for task 0
- f(x) = -3 * tanh(-2 * tanh(x + 1) - 2) - 3 for task 1
- f(x) = -3 * tanh(1/2 * tanh(-x - 1) + 1/2) - 3 for task 2
- f(x) = 3 * tanh(-2 * tanh(-x - 1) - 2) + 3 for task 3
"""
# Set up case.
dim = BASE_SETTINGS["obs_dim"] + BASE_SETTINGS["num_tasks"]
observation_subspace = Box(
low=-np.inf, high=np.inf, shape=(BASE_SETTINGS["obs_dim"],)
)
observation_subspace.seed(DEFAULT_SETTINGS["seed"])
hidden_size = dim
# Construct network.
network = BaseMultiTaskSplittingNetwork(
input_size=dim,
output_size=dim,
num_tasks=BASE_SETTINGS["num_tasks"],
num_layers=BASE_SETTINGS["num_layers"],
hidden_size=hidden_size,
device=BASE_SETTINGS["device"],
)
# Split the network at the second layer. Tasks 0 and 1 stay assigned to the original
# copy and tasks 2 and 3 are assigned to the new copy.
network.split(0, 0, [0, 1], [2, 3])
network.split(1, 0, [0, 2], [1, 3])
network.split(1, 0, [0], [2])
network.split(2, 0, [0, 3], [1, 2])
# Set network weights.
state_dict = network.state_dict()
for i in range(BASE_SETTINGS["num_layers"]):
for j in range(3):
weight_name = "regions.%d.%d.0.weight" % (i, j)
bias_name = "regions.%d.%d.0.bias" % (i, j)
if weight_name not in state_dict:
continue
if j == 0:
state_dict[weight_name] = torch.Tensor((i + 1) * np.identity(dim))
state_dict[bias_name] = torch.Tensor((i + 1) * np.ones(dim))
elif j == 1:
state_dict[weight_name] = torch.Tensor(-(i + 1) * np.identity(dim))
state_dict[bias_name] = torch.Tensor(-(i + 1) * np.ones(dim))
elif j == 2:
state_dict[weight_name] = torch.Tensor(1 / (i + 1) * np.identity(dim))
state_dict[bias_name] = torch.Tensor(1 / (i + 1) * np.ones(dim))
else:
raise NotImplementedError
network.load_state_dict(state_dict)
# Construct batch of observations concatenated with one-hot task vectors.
obs, task_indices = get_obs_batch(
batch_size=BASE_SETTINGS["num_processes"],
obs_space=observation_subspace,
num_tasks=BASE_SETTINGS["num_tasks"],
)
# Get output of network.
output = network(obs, task_indices)
# Computed expected output of network.
expected_output = torch.zeros(obs.shape)
for i, (ob, task) in enumerate(zip(obs, task_indices)):
if task == 0:
expected_output[i] = 3 * torch.tanh(2 * torch.tanh(ob + 1) + 2) + 3
elif task == 1:
expected_output[i] = -3 * torch.tanh(-2 * torch.tanh(ob + 1) - 2) - 3
elif task == 2:
expected_output[i] = (
-3 * torch.tanh(1 / 2 * torch.tanh(-ob - 1) + 1 / 2) - 3
)
elif task == 3:
expected_output[i] = 3 * torch.tanh(-2 * torch.tanh(-ob - 1) - 2) + 3
else:
raise NotImplementedError
# Test output of network.
assert torch.allclose(output, expected_output)
def test_split_single() -> None:
"""
Test that split() correctly sets new parameters when we perform a single split.
"""
# Set up case.
dim = BASE_SETTINGS["obs_dim"] + BASE_SETTINGS["num_tasks"]
observation_subspace = Box(
low=-np.inf, high=np.inf, shape=(BASE_SETTINGS["obs_dim"],)
)
observation_subspace.seed(DEFAULT_SETTINGS["seed"])
hidden_size = dim
# Construct network.
network = BaseMultiTaskSplittingNetwork(
input_size=dim,
output_size=dim,
num_tasks=BASE_SETTINGS["num_tasks"],
num_layers=BASE_SETTINGS["num_layers"],
hidden_size=hidden_size,
device=BASE_SETTINGS["device"],
)
# Split the network at the last layer, so that tasks 0 and 2 stay assigned to the
# original copy and tasks 1 and 3 are assigned to the new copy.
network.split(2, 0, [0, 2], [1, 3])
# Check the parameters of the network.
param_names = [name for name, param in network.named_parameters()]
# Construct expected parameters of network.
region_copies = {i: [0] for i in range(BASE_SETTINGS["num_layers"])}
region_copies[2].append(1)
expected_params = []
for region, copies in region_copies.items():
for copy in copies:
expected_params.append("regions.%d.%d.0.weight" % (region, copy))
expected_params.append("regions.%d.%d.0.bias" % (region, copy))
# Test actual parameter names.
assert set(param_names) == set(expected_params)
def test_split_multiple() -> None:
"""
Test that split() correctly sets new parameters when we perform multiple splits.
"""
# Set up case.
dim = BASE_SETTINGS["obs_dim"] + BASE_SETTINGS["num_tasks"]
observation_subspace = Box(
low=-np.inf, high=np.inf, shape=(BASE_SETTINGS["obs_dim"],)
)
observation_subspace.seed(DEFAULT_SETTINGS["seed"])
hidden_size = dim
# Construct network.
network = BaseMultiTaskSplittingNetwork(
input_size=dim,
output_size=dim,
num_tasks=BASE_SETTINGS["num_tasks"],
num_layers=BASE_SETTINGS["num_layers"],
hidden_size=hidden_size,
device=BASE_SETTINGS["device"],
)
# Split the network at the first layer once and the last layer twice.
network.split(0, 0, [0, 1], [2, 3])
network.split(2, 0, [0, 2], [1, 3])
network.split(2, 1, [1], [3])
# Check the parameters of the network.
param_names = [name for name, param in network.named_parameters()]
# Construct expected parameters of network.
region_copies = {i: [0] for i in range(BASE_SETTINGS["num_layers"])}
region_copies[0].extend([1])
region_copies[2].extend([1, 2])
expected_params = []
for region, copies in region_copies.items():
for copy in copies:
expected_params.append("regions.%d.%d.0.weight" % (region, copy))
expected_params.append("regions.%d.%d.0.bias" % (region, copy))
# Test actual parameter names.
assert set(param_names) == set(expected_params)
def test_backward_shared() -> None:
"""
Test that the backward() function correctly computes gradients in the case of a
fully shared network.
"""
splits_args = []
backward_template(BASE_SETTINGS, splits_args)
def test_backward_single() -> None:
"""
Test that the backward() function correctly computes gradients in the case of a
single split.
"""
splits_args = [
{"region": 1, "copy": 0, "group1": [0, 3], "group2": [1, 2]},
]
backward_template(BASE_SETTINGS, splits_args)
def test_backward_multiple() -> None:
"""
Test that the backward() function correctly computes gradients in the case of
multiple splits.
"""
splits_args = [
{"region": 0, "copy": 0, "group1": [0, 1], "group2": [2, 3]},
{"region": 1, "copy": 0, "group1": [0, 2], "group2": [1, 3]},
| |
"""Test the SSDP integration."""
import asyncio
from datetime import timedelta
from ipaddress import IPv4Address, IPv6Address
from unittest.mock import patch
import aiohttp
from async_upnp_client.search import SSDPListener
from async_upnp_client.utils import CaseInsensitiveDict
import pytest
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
MATCH_ALL,
)
from homeassistant.core import CoreState, callback
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, mock_coro
def _patched_ssdp_listener(info, *args, **kwargs):
listener = SSDPListener(*args, **kwargs)
async def _async_callback(*_):
await listener.async_callback(info)
listener.async_start = _async_callback
return listener
async def _async_run_mocked_scan(hass, mock_ssdp_response, mock_get_ssdp):
def _generate_fake_ssdp_listener(*args, **kwargs):
return _patched_ssdp_listener(
mock_ssdp_response,
*args,
**kwargs,
)
with patch(
"homeassistant.components.ssdp.async_get_ssdp",
return_value=mock_get_ssdp,
), patch(
"homeassistant.components.ssdp.SSDPListener",
new=_generate_fake_ssdp_listener,
), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
assert await async_setup_component(hass, ssdp.DOMAIN, {ssdp.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
await hass.async_block_till_done()
return mock_init
async def test_scan_match_st(hass, caplog):
"""Test matching based on ST."""
mock_ssdp_response = {
"st": "mock-st",
"location": None,
"usn": "mock-usn",
"server": "mock-server",
"ext": "",
}
mock_get_ssdp = {"mock-domain": [{"st": "mock-st"}]}
mock_init = await _async_run_mocked_scan(hass, mock_ssdp_response, mock_get_ssdp)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_SSDP
}
assert mock_init.mock_calls[0][2]["data"] == {
ssdp.ATTR_SSDP_ST: "mock-st",
ssdp.ATTR_SSDP_LOCATION: None,
ssdp.ATTR_SSDP_USN: "mock-usn",
ssdp.ATTR_SSDP_SERVER: "mock-server",
ssdp.ATTR_SSDP_EXT: "",
}
assert "Failed to fetch ssdp data" not in caplog.text
async def test_partial_response(hass, caplog):
"""Test location and st missing."""
mock_ssdp_response = {
"usn": "mock-usn",
"server": "mock-server",
"ext": "",
}
mock_get_ssdp = {"mock-domain": [{"st": "mock-st"}]}
mock_init = await _async_run_mocked_scan(hass, mock_ssdp_response, mock_get_ssdp)
assert len(mock_init.mock_calls) == 0
@pytest.mark.parametrize(
"key", (ssdp.ATTR_UPNP_MANUFACTURER, ssdp.ATTR_UPNP_DEVICE_TYPE)
)
async def test_scan_match_upnp_devicedesc(hass, aioclient_mock, key):
"""Test matching based on UPnP device description data."""
aioclient_mock.get(
"http://1.1.1.1",
text=f"""
<root>
<device>
<{key}>Paulus</{key}>
</device>
</root>
""",
)
mock_get_ssdp = {"mock-domain": [{key: "Paulus"}]}
mock_ssdp_response = {
"st": "mock-st",
"location": "http://1.1.1.1",
}
mock_init = await _async_run_mocked_scan(hass, mock_ssdp_response, mock_get_ssdp)
# If we get duplicate respones, ensure we only look it up once
assert len(aioclient_mock.mock_calls) == 1
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_SSDP
}
async def test_scan_not_all_present(hass, aioclient_mock):
"""Test match fails if some specified attributes are not present."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>Paulus</deviceType>
</device>
</root>
""",
)
mock_ssdp_response = {
"st": "mock-st",
"location": "http://1.1.1.1",
}
mock_get_ssdp = {
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_UPNP_MANUFACTURER: "Paulus",
}
]
}
mock_init = await _async_run_mocked_scan(hass, mock_ssdp_response, mock_get_ssdp)
assert not mock_init.mock_calls
async def test_scan_not_all_match(hass, aioclient_mock):
"""Test match fails if some specified attribute values differ."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>Paulus</deviceType>
<manufacturer>Paulus</manufacturer>
</device>
</root>
""",
)
mock_ssdp_response = {
"st": "mock-st",
"location": "http://1.1.1.1",
}
mock_get_ssdp = {
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_UPNP_MANUFACTURER: "Not-Paulus",
}
]
}
mock_init = await _async_run_mocked_scan(hass, mock_ssdp_response, mock_get_ssdp)
assert not mock_init.mock_calls
@pytest.mark.parametrize("exc", [asyncio.TimeoutError, aiohttp.ClientError])
async def test_scan_description_fetch_fail(hass, aioclient_mock, exc):
"""Test failing to fetch description."""
aioclient_mock.get("http://1.1.1.1", exc=exc)
mock_ssdp_response = {
"st": "mock-st",
"usn": "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL::urn:mdx-netflix-com:service:target:3",
"location": "http://1.1.1.1",
}
mock_get_ssdp = {
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_UPNP_MANUFACTURER: "Paulus",
}
]
}
mock_init = await _async_run_mocked_scan(hass, mock_ssdp_response, mock_get_ssdp)
assert not mock_init.mock_calls
assert ssdp.async_get_discovery_info_by_st(hass, "mock-st") == [
{
"UDN": "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL",
"ssdp_location": "http://1.1.1.1",
"ssdp_st": "mock-st",
"ssdp_usn": "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL::urn:mdx-netflix-com:service:target:3",
}
]
async def test_scan_description_parse_fail(hass, aioclient_mock):
"""Test invalid XML."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>INVALIDXML
""",
)
mock_ssdp_response = {
"st": "mock-st",
"location": "http://1.1.1.1",
}
mock_get_ssdp = {
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_UPNP_MANUFACTURER: "Paulus",
}
]
}
mock_init = await _async_run_mocked_scan(hass, mock_ssdp_response, mock_get_ssdp)
assert not mock_init.mock_calls
async def test_invalid_characters(hass, aioclient_mock):
"""Test that we replace bad characters with placeholders."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>ABC</deviceType>
<serialNumber>\xff\xff\xff\xff</serialNumber>
</device>
</root>
""",
)
mock_ssdp_response = {
"st": "mock-st",
"location": "http://1.1.1.1",
}
mock_get_ssdp = {
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "ABC",
}
]
}
mock_init = await _async_run_mocked_scan(hass, mock_ssdp_response, mock_get_ssdp)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_SSDP
}
assert mock_init.mock_calls[0][2]["data"] == {
"ssdp_location": "http://1.1.1.1",
"ssdp_st": "mock-st",
"deviceType": "ABC",
"serialNumber": "ÿÿÿÿ",
}
@patch("homeassistant.components.ssdp.SSDPListener.async_start")
@patch("homeassistant.components.ssdp.SSDPListener.async_search")
async def test_start_stop_scanner(async_start_mock, async_search_mock, hass):
"""Test we start and stop the scanner."""
assert await async_setup_component(hass, ssdp.DOMAIN, {ssdp.DOMAIN: {}})
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=200))
await hass.async_block_till_done()
assert async_start_mock.call_count == 1
assert async_search_mock.call_count == 1
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=200))
await hass.async_block_till_done()
assert async_start_mock.call_count == 1
assert async_search_mock.call_count == 1
async def test_unexpected_exception_while_fetching(hass, aioclient_mock, caplog):
"""Test unexpected exception while fetching."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>ABC</deviceType>
<serialNumber>\xff\xff\xff\xff</serialNumber>
</device>
</root>
""",
)
mock_ssdp_response = {
"st": "mock-st",
"location": "http://1.1.1.1",
}
mock_get_ssdp = {
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "ABC",
}
]
}
with patch(
"homeassistant.components.ssdp.descriptions.ElementTree.fromstring",
side_effect=ValueError,
):
mock_init = await _async_run_mocked_scan(
hass, mock_ssdp_response, mock_get_ssdp
)
assert len(mock_init.mock_calls) == 0
assert "Failed to fetch ssdp data from: http://1.1.1.1" in caplog.text
async def test_scan_with_registered_callback(hass, aioclient_mock, caplog):
"""Test matching based on callback."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>Paulus</deviceType>
</device>
</root>
""",
)
mock_ssdp_response = {
"st": "mock-st",
"location": "http://1.1.1.1",
"usn": "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL::urn:mdx-netflix-com:service:target:3",
"server": "mock-server",
"x-rincon-bootseq": "55",
"ext": "",
}
not_matching_intergration_callbacks = []
intergration_match_all_callbacks = []
intergration_match_all_not_present_callbacks = []
intergration_callbacks = []
intergration_callbacks_from_cache = []
match_any_callbacks = []
@callback
def _async_exception_callbacks(info):
raise ValueError
@callback
def _async_intergration_callbacks(info):
intergration_callbacks.append(info)
@callback
def _async_intergration_match_all_callbacks(info):
intergration_match_all_callbacks.append(info)
@callback
def _async_intergration_match_all_not_present_callbacks(info):
intergration_match_all_not_present_callbacks.append(info)
@callback
def _async_intergration_callbacks_from_cache(info):
intergration_callbacks_from_cache.append(info)
@callback
def _async_not_matching_intergration_callbacks(info):
not_matching_intergration_callbacks.append(info)
@callback
def _async_match_any_callbacks(info):
match_any_callbacks.append(info)
def _generate_fake_ssdp_listener(*args, **kwargs):
listener = SSDPListener(*args, **kwargs)
async def _async_callback(*_):
await listener.async_callback(mock_ssdp_response)
@callback
def _callback(*_):
hass.async_create_task(listener.async_callback(mock_ssdp_response))
listener.async_start = _async_callback
listener.async_search = _callback
return listener
with patch(
"homeassistant.components.ssdp.SSDPListener",
new=_generate_fake_ssdp_listener,
):
hass.state = CoreState.stopped
assert await async_setup_component(hass, ssdp.DOMAIN, {ssdp.DOMAIN: {}})
await hass.async_block_till_done()
ssdp.async_register_callback(hass, _async_exception_callbacks, {})
ssdp.async_register_callback(
hass,
_async_intergration_callbacks,
{"st": "mock-st"},
)
ssdp.async_register_callback(
hass,
_async_intergration_match_all_callbacks,
{"x-rincon-bootseq": MATCH_ALL},
)
ssdp.async_register_callback(
hass,
_async_intergration_match_all_not_present_callbacks,
{"x-not-there": MATCH_ALL},
)
ssdp.async_register_callback(
hass,
_async_not_matching_intergration_callbacks,
{"st": "not-match-mock-st"},
)
ssdp.async_register_callback(
hass,
_async_match_any_callbacks,
)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=200))
ssdp.async_register_callback(
hass,
_async_intergration_callbacks_from_cache,
{"st": "mock-st"},
)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
hass.state = CoreState.running
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=200))
await hass.async_block_till_done()
assert hass.state == CoreState.running
assert len(intergration_callbacks) == 3
assert len(intergration_callbacks_from_cache) == 3
assert len(intergration_match_all_callbacks) == 3
assert len(intergration_match_all_not_present_callbacks) == 0
assert len(match_any_callbacks) == 3
assert len(not_matching_intergration_callbacks) == 0
assert intergration_callbacks[0] == {
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_SSDP_EXT: "",
ssdp.ATTR_SSDP_LOCATION: "http://1.1.1.1",
ssdp.ATTR_SSDP_SERVER: "mock-server",
ssdp.ATTR_SSDP_ST: "mock-st",
ssdp.ATTR_SSDP_USN: "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL::urn:mdx-netflix-com:service:target:3",
ssdp.ATTR_UPNP_UDN: "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL",
"x-rincon-bootseq": "55",
}
assert "Failed to callback info" in caplog.text
async def test_scan_second_hit(hass, aioclient_mock, caplog):
"""Test matching on second scan."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>Paulus</deviceType>
</device>
</root>
""",
)
mock_ssdp_response = CaseInsensitiveDict(
**{
"ST": "mock-st",
"LOCATION": "http://1.1.1.1",
"USN": "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL::urn:mdx-netflix-com:service:target:3",
"SERVER": "mock-server",
"EXT": "",
}
)
mock_get_ssdp = {"mock-domain": [{"st": "mock-st"}]}
intergration_callbacks = []
@callback
def _async_intergration_callbacks(info):
intergration_callbacks.append(info)
def _generate_fake_ssdp_listener(*args, **kwargs):
listener = SSDPListener(*args, **kwargs)
async def _async_callback(*_):
pass
@callback
def _callback(*_):
hass.async_create_task(listener.async_callback(mock_ssdp_response))
listener.async_start = _async_callback
listener.async_search = _callback
return listener
with patch(
"homeassistant.components.ssdp.async_get_ssdp",
return_value=mock_get_ssdp,
), patch(
"homeassistant.components.ssdp.SSDPListener",
new=_generate_fake_ssdp_listener,
), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
assert await async_setup_component(hass, ssdp.DOMAIN, {ssdp.DOMAIN: {}})
await hass.async_block_till_done()
remove = ssdp.async_register_callback(
hass,
_async_intergration_callbacks,
{"st": "mock-st"},
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=200))
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=200))
await hass.async_block_till_done()
remove()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=200))
await hass.async_block_till_done()
assert len(intergration_callbacks) == 2
assert intergration_callbacks[0] == {
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_SSDP_EXT: "",
ssdp.ATTR_SSDP_LOCATION: "http://1.1.1.1",
ssdp.ATTR_SSDP_SERVER: "mock-server",
ssdp.ATTR_SSDP_ST: "mock-st",
ssdp.ATTR_SSDP_USN: "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL::urn:mdx-netflix-com:service:target:3",
ssdp.ATTR_UPNP_UDN: "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL",
}
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_SSDP
}
assert mock_init.mock_calls[0][2]["data"] == {
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_SSDP_ST: "mock-st",
ssdp.ATTR_SSDP_LOCATION: "http://1.1.1.1",
ssdp.ATTR_SSDP_SERVER: "mock-server",
ssdp.ATTR_SSDP_EXT: "",
ssdp.ATTR_SSDP_USN: "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL::urn:mdx-netflix-com:service:target:3",
ssdp.ATTR_UPNP_UDN: "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL",
}
assert "Failed to fetch ssdp data" not in caplog.text
udn_discovery_info = ssdp.async_get_discovery_info_by_st(hass, "mock-st")
discovery_info = udn_discovery_info[0]
assert discovery_info[ssdp.ATTR_SSDP_LOCATION] == "http://1.1.1.1"
assert discovery_info[ssdp.ATTR_SSDP_ST] == "mock-st"
assert (
discovery_info[ssdp.ATTR_UPNP_UDN]
== "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL"
)
assert (
discovery_info[ssdp.ATTR_SSDP_USN]
== "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL::urn:mdx-netflix-com:service:target:3"
)
st_discovery_info = ssdp.async_get_discovery_info_by_udn(
hass, "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL"
)
discovery_info = st_discovery_info[0]
assert discovery_info[ssdp.ATTR_SSDP_LOCATION] == "http://1.1.1.1"
assert discovery_info[ssdp.ATTR_SSDP_ST] == "mock-st"
assert (
discovery_info[ssdp.ATTR_UPNP_UDN]
== "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL"
)
assert (
discovery_info[ssdp.ATTR_SSDP_USN]
== "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL::urn:mdx-netflix-com:service:target:3"
)
discovery_info = ssdp.async_get_discovery_info_by_udn_st(
hass, "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL", "mock-st"
)
assert discovery_info[ssdp.ATTR_SSDP_LOCATION] == "http://1.1.1.1"
assert discovery_info[ssdp.ATTR_SSDP_ST] == "mock-st"
assert (
discovery_info[ssdp.ATTR_UPNP_UDN]
== "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL"
)
assert (
discovery_info[ssdp.ATTR_SSDP_USN]
== "uuid:TIVRTLSR7ANF-D6E-1557809135086-RETAIL::urn:mdx-netflix-com:service:target:3"
)
assert ssdp.async_get_discovery_info_by_udn_st(hass, "wrong", "mock-st") is None
_ADAPTERS_WITH_MANUAL_CONFIG = [
{
"auto": True,
"default": False,
"enabled": True,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"auto": True,
"default": False,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"auto": False,
"default": False,
"enabled": False,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_async_detect_interfaces_setting_empty_route(hass):
"""Test without default interface config and the route returns nothing."""
mock_get_ssdp = {
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "ABC",
}
]
}
create_args = []
def _generate_fake_ssdp_listener(*args, **kwargs):
create_args.append([args, | |
<filename>src/skmultiflow/meta/classifier_chains.py
import numpy as np
import copy
from skmultiflow.core.base import StreamModel
from sklearn.linear_model import LogisticRegression, SGDClassifier
from skmultiflow.utils import check_random_state
class ClassifierChain(StreamModel):
""" Classifier Chains for multi-label learning.
Parameters
----------
base_estimator: StreamModel or sklearn model
This is the ensemble classifier type, each ensemble classifier is going
to be a copy of the base_estimator.
order : str
`None` to use default order, 'random' for random order.
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by `np.random`.
Examples
--------
>>> from skmultiflow.data import make_logical
>>>
>>> X, Y = make_logical(random_state=1)
>>>
>>> print("TRUE: ")
>>> print(Y)
>>> print("vs")
>>>
>>> print("CC")
>>> cc = ClassifierChain(SGDClassifier(max_iter=100, loss='log', random_state=1))
>>> cc.fit(X, Y)
>>> print(cc.predict(X))
>>>
>>> print("RCC")
>>> cc = ClassifierChain(SGDClassifier(max_iter=100, loss='log', random_state=1), order='random', random_state=1)
>>> cc.fit(X, Y)
>>> print(cc.predict(X))
>>>
>>> print("MCC")
>>> mcc = MCC(SGDClassifier(max_iter=100, loss='log', random_state=1), M=1000)
>>> mcc.fit(X, Y)
>>> Yp = mcc.predict(X, M=50)
>>> print("with 50 iterations ...")
>>> print(Yp)
>>> Yp = mcc.predict(X, 'default')
>>> print("with default (%d) iterations ..." % 1000)
>>> print(Yp)
>>>
>>> print("PCC")
>>> pcc = ProbabilisticClassifierChain(SGDClassifier(max_iter=100, loss='log', random_state=1))
>>> pcc.fit(X, Y)
>>> print(pcc.predict(X))
TRUE:
[[1. 0. 1.]
[1. 1. 0.]
[0. 0. 0.]
[1. 1. 0.]]
vs
CC
[[1. 0. 1.]
[1. 1. 0.]
[0. 0. 0.]
[1. 1. 0.]]
RCC
[[1. 0. 1.]
[1. 1. 0.]
[0. 0. 0.]
[1. 1. 0.]]
MCC
with 50 iterations ...
[[1. 0. 1.]
[1. 1. 0.]
[0. 0. 0.]
[1. 1. 0.]]
with default (1000) iterations ...
[[1. 0. 1.]
[1. 1. 0.]
[0. 0. 0.]
[1. 1. 0.]]
PCC
[[1. 0. 1.]
[1. 1. 0.]
[0. 0. 0.]
[1. 1. 0.]]
Notes
-----
Classifier Chains [1]_ is a popular method for multi-label learning. It exploits correlation between labels
by incrementally building binary classifiers for each label.
scikit-learn also includes 'ClassifierChain'. A difference is probabilistic extensions are included here.
References
----------
.. [1] Read, Jesse, <NAME>, <NAME>, and <NAME>. "Classifier chains for multi-label
classification." In Joint European Conference on Machine Learning and Knowledge Discovery in Databases,
pp. 254-269. Springer, Berlin, Heidelberg, 2009.
"""
# TODO: much of this can be shared with Regressor Chains, probably should use a base class to inherit here.
def __init__(self, base_estimator=LogisticRegression(), order=None, random_state=None):
super().__init__()
self.base_estimator = base_estimator
self.order = order
self.chain = None
self.ensemble = None
self.L = None
self._init_random_state = random_state
self.__configure()
def __configure(self):
self.ensemble = None
self.L = -1
self.random_state = check_random_state(self._init_random_state)
def fit(self, X, Y):
""" fit
"""
N, self.L = Y.shape
L = self.L
N, D = X.shape
self.chain = np.arange(L)
if self.order == 'random':
self.random_state.shuffle(self.chain)
# Set the chain order
Y = Y[:, self.chain]
# Train
self.ensemble = [copy.deepcopy(self.base_estimator) for _ in range(L)]
XY = np.zeros((N, D + L-1))
XY[:, 0:D] = X
XY[:, D:] = Y[:, 0:L-1]
for j in range(self.L):
self.ensemble[j].fit(XY[:, 0:D + j], Y[:, j])
return self
def partial_fit(self, X, Y):
""" partial_fit
N.B. Assume that fit has already been called
(i.e., this is more of an 'update')
"""
if self.ensemble is None:
# This was not the first time that the model is fit
self.fit(X, Y)
return self
N, self.L = Y.shape
L = self.L
N, D = X.shape
# Set the chain order
Y = Y[:, self.chain]
XY = np.zeros((N, D + L-1))
XY[:, 0:D] = X
XY[:, D:] = Y[:, 0:L-1]
for j in range(L):
self.ensemble[j].partial_fit(XY[:, 0:D + j], Y[:, j])
return self
def predict(self, X):
""" predict
Returns predictions for X
"""
N, D = X.shape
Y = np.zeros((N, self.L))
for j in range(self.L):
if j > 0:
X = np.column_stack([X, Y[:, j-1]])
Y[:, j] = self.ensemble[j].predict(X)
# Unset the chain order (back to default)
return Y[:, np.argsort(self.chain)]
def predict_proba(self, X):
""" predict_proba
Returns marginals [P(y_1=1|x),...,P(y_L=1|x,y_1,...,y_{L-1})]
i.e., confidence predictions given inputs, for each instance.
N.B. This function suitable for multi-label (binary) data
only at the moment (may give index-out-of-bounds error if
uni- or multi-target (of > 2 values) data is used in training).
"""
N, D = X.shape
Y = np.zeros((N, self.L))
for j in range(self.L):
if j > 0:
X = np.column_stack([X, Y[:, j-1]])
Y[:, j] = self.ensemble[j].predict_proba(X)[:, 1]
return Y
def score(self, X, y):
raise NotImplementedError
def reset(self):
self.__configure()
def get_info(self):
return 'ClassifierChain Classifier:' \
' - base_estimator: {}'.format(self.base_estimator) + \
' - order: {}'.format(self.order) + \
' - random_state: {}'.format(self._init_random_state)
def P(y, x, cc, payoff=np.prod):
""" Payoff function, P(Y=y|X=x)
What payoff do we get for predicting y | x, under model cc.
Parameters
----------
x: input instance
y: its true labels
cc: a classifier chain
payoff: payoff function
Returns
-------
A single number; the payoff of predicting y | x.
"""
D = len(x)
L = len(y)
p = np.zeros(L)
xy = np.zeros(D + L)
xy[0:D] = x.copy()
for j in range(L):
P_j = cc.ensemble[j].predict_proba(xy[0:D+j].reshape(1, -1))[0] # e.g., [0.9, 0.1] wrt 0, 1
xy[D+j] = y[j] # e.g., 1
p[j] = P_j[y[j]] # e.g., 0.1
# or, y[j] = 0 is predicted with probability p[j] = 0.9
return payoff(p)
class ProbabilisticClassifierChain(ClassifierChain):
""" Probabilistic Classifier Chains (PCC)
Parameters
----------
base_estimator: StreamModel or sklearn model
This is the ensemble classifier type, each ensemble classifier is going
to be a copy of the base_estimator.
order : str
`None` to use default order, 'random' for random order.
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by `np.random`.
"""
def __init__(self, base_estimator=LogisticRegression(), order=None, random_state=None):
super().__init__(base_estimator=base_estimator, order=order, random_state=random_state)
def predict(self, X):
""" Predict
Explores all possible branches of the probability tree.
(i.e., all possible 2^L label combinations).
Returns
-------
Predictions Y.
"""
N, D = X.shape
Yp = np.zeros((N, self.L))
# for each instance
for n in range(N):
w_max = 0.
# for each and every possible label combination
for b in range(2**self.L):
# put together a label vector
y_ = np.array(list(map(int, np.binary_repr(b, width=self.L))))
# ... and gauge a probability for it (given x)
w_ = P(y_, X[n], self)
# if it performs well, keep it, and record the max
if w_ > w_max:
Yp[n, :] = y_[:].copy()
w_max = w_
return Yp
def get_info(self):
return 'ProbabilisticClassifierChain Classifier:' \
' - base_estimator: {}'.format(self.base_estimator) + \
' - order: {}'.format(self.order) + \
' - random_state: {}'.format(self._init_random_state)
class MCC(ProbabilisticClassifierChain):
""" Monte Carlo Sampling Classifier Chains
PCC, using Monte Carlo sampling, published as 'MCC'.
M samples are taken from the posterior distribution. Therefore we need
a probabilistic interpretation of the output, and thus, this is a
particular variety of ProbabilisticClassifierChain.
N.B. Multi-label (binary) only at this moment.
Parameters
----------
base_estimator: StreamModel or sklearn model
This is the ensemble classifier type, each ensemble classifier is going
to be a copy of the base_estimator.
M: int
Number of samples to take from the posterior distribution.
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by `np.random`.
"""
def __init__(self, base_estimator=LogisticRegression(), M=10, random_state=None):
# Do M iterations, unless overridden by M at prediction time
ClassifierChain.__init__(self, base_estimator, random_state=random_state)
self.M = M
def sample(self, x):
"""
Sample y ~ P(y|x)
Returns
-------
y: a sampled label vector
p: the associated probabilities, i.e., p(y_j=1)=p_j
"""
D = len(x)
p = np.zeros(self.L)
y = np.zeros(self.L)
xy = np.zeros(D + self.L)
xy[0:D] = x.copy()
for j in range(self.L):
P_j = self.ensemble[j].predict_proba(xy[0:D + j].reshape(1, -1))[0]
y_j = self.random_state.choice(2, 1, p=P_j)
xy[D+j] = y_j
y[j] = y_j
p[j] = P_j[y_j]
return y, p
def predict(self, X, M='default'):
""" | |
= self._main_grid.get_mid_point(self.get_grid_coord_from_points_coords(start_point),
self.get_grid_coord_from_points_coords(end_point))
return list(filter(lambda x: x > 1, self._main_grid.get_adjacent_values(mid_point)))
def get_compartments_for_line_duplicates(self, line):
'''
Finding the compartment connected to a specified line.
:return:
'''
start_point = self._point_dict['point' + str(self._line_dict[line][0])]
end_point = self._point_dict['point' + str(self._line_dict[line][1])]
mid_point = self._main_grid.get_mid_point(self.get_grid_coord_from_points_coords(start_point),
self.get_grid_coord_from_points_coords(end_point))
return list(filter(lambda x: x > 1, self._main_grid.get_adjacent_values_duplicates(mid_point)))
def get_point_canvas_coord(self, point_no):
'''
Returning the canvas coordinates of the point. This value will change with slider.
'''
point_coord_x = self._canvas_draw_origo[0] + self._point_dict[point_no][0] * self._canvas_scale
point_coord_y = self._canvas_draw_origo[1] - self._point_dict[point_no][1] * self._canvas_scale
return [point_coord_x, point_coord_y]
def get_point_actual_coord(self, point_no):
'''
Returning actutual (real world coordinates of a point.
'''
return [self._point_dict[point_no][0], self._point_dict[point_no][1]]
def get_actual_elevation_from_grid_coords(self,grid_col):
'''
Converts coordinates
:param canv_elevation:
:return:
'''
y_coord = (self._main_grid.get_grid_height() - grid_col)/self._base_scale_factor
self._main_grid.get_grid_height()
return y_coord
def get_grid_coord_from_points_coords(self, point_coord):
'''
Converts coordinates to be used in the grid. Returns (row,col). This value will not change with slider.
:param point:
:return:
'''
row = self._canvas_base_origo[1] - point_coord[1]*self._base_scale_factor
col = point_coord[0]*self._base_scale_factor
return (row,col)
def get_point_coords_from_grid_coords(self, grid_coord):
'''
Converts coordinates to be used in the as points. Returns (x,y). This value will not change with slider.
:param point:
:return:
'''
x_coord = grid_coord[1]/self._base_scale_factor
y_coord = (self._main_grid.get_grid_height() - grid_coord[0])/self._base_scale_factor
self._main_grid.get_grid_height()
self._main_grid.get_grid_width()
return x_coord,y_coord
def get_canvas_coords_from_point_coords(self, actual_coords):
'''
Returns tuple of canvas points from actual (x,y)
:param actual_coords:
:return:
'''
canvas_coord_x = self._canvas_draw_origo[0] + actual_coords[0] * self._canvas_scale
canvas_coord_y = self._canvas_draw_origo[1] - actual_coords[1] * self._canvas_scale
return (canvas_coord_x, canvas_coord_y)
def get_line_low_elevation(self,line):
'''
Finding elevation of a line. Used to calculate pressures in load combinations.
:param line:
:return:
'''
return min([self._point_dict['point'+str(point)][1] for point in self._line_dict[line]])
def get_line_radial_mid(self,line):
'''
Getting the horizontal coordinates in the middle of a line.
:param line:
:return:
'''
return sum([self._point_dict['point' + str(point)][0] for point in self._line_dict[line]])/2
def get_pressures_calc_coord(self, line):
''' Returning coordinates of the pressures calculation basis of a selected line. '''
p1 = self._point_dict['point'+str(self._line_dict[line][0])]
p2 = self._point_dict['point'+str(self._line_dict[line][1])]
if p1[1] <= p2[1]:
start_point = p1
end_point = p2
elif p1[1] == p2[1]:
if p1[0] <= p2[0]:
start_point = p1
end_point = p2
else:
start_point = p2
end_point = p1
else:
start_point = p2
end_point = p1
vector = [end_point[0]-start_point[0], end_point[1]-start_point[1]]
return start_point[0]+vector[0]*1/3, start_point[1]+vector[1]*1/3
def get_points(self):
return self._point_dict
def get_closest_point(self,given_point):
'''
Finding the closest point to av given value.
Real coordinates used (meters).
Returning point name, coordinates and distance.
:param coordx:
:param coordy:
:return:
'''
current_dist = float('inf')
current_point = None
for point,coords in self._point_dict.items():
if dist([coords[0],coords[1]], [given_point[0],given_point[1]]) < current_dist:
current_dist = dist([coords[0],coords[1]], [given_point[0],given_point[1]])
current_point = point
return current_point, self._point_dict[current_point], current_dist
def get_lines(self):
return self._line_dict
def get_unique_plates_and_beams(self):
beams, plates = list(), list()
if self._line_to_struc != {}:
for line, data in self._line_to_struc.items():
this_beam = data[0].get_beam_string()
this_plate = data[0].get_pl_thk()*1000
if this_beam not in beams:
beams.append(this_beam)
if this_plate not in plates:
plates.append(this_plate)
return {'plates':plates, 'beams': beams}
def make_point_point_line_string(self, point1, point2):
'''
For a line, this method makes a string 'p1p2' and 'p2p1'. Ensuring that lines are not overwritten.
:param point1:
:param point2:
:return:
'''
return ['p' + str(point1) + 'p' + str(point2), 'p' + str(point2) + 'p' + str(point1)]
def reset(self):
'''
Resetting the script.
:return:
'''
self._line_dict = {}
self._point_dict = {}
self._line_to_struc = {}
self._line_point_to_point_string = []
self._load_dict = {}
self._new_load_comb_dict = {}
self._line_is_active = False
self._active_line = ''
self._point_is_active = False
self._active_point = ''
self.delete_all_tanks()
self._main_canvas.delete('all')
self._prop_canvas.delete('all')
self._result_canvas.delete('all')
self._pending_grid_draw = {}
self._p1_p2_select = False
self._line_is_active = False # True when a line is clicked
self._active_line = '' # Name of the clicked point
self._point_is_active = False # True when a point is clicked
self._active_point = '' # Name of the clicked point
self.controls() # Function to activate mouse clicks
self._line_point_to_point_string = [] # This one ensures that a line is not created on top of a line
self._accelerations_dict = {'static':9.81, 'dyn_loaded':0, 'dyn_ballast':0}
self._multiselect_lines = []
self._PULS_results = None
self.update_frame()
# Initsializing the calculation grid used for tank definition
self._main_grid = grid.Grid(self._grid_dimensions[0], self._grid_dimensions[1])
self._grid_calc = None
def controls(self):
'''
Specifying the controls to be used.
:return:
'''
self._main_canvas.bind('<Button-1>', self.button_1_click)
self._main_canvas.bind('<Button-2>', self.button_2_click)
self._main_canvas.bind('<Button-3>', self.button_3_click)
self._main_canvas.bind("<B2-Motion>", self.button_2_click_and_drag)
self._main_canvas.bind("<MouseWheel>", self.mouse_scroll)
self._parent.bind('<Control-z>', self.undo)
#self._parent.bind('<Control-y>', self.redo)
#self._parent.bind('<Control-p>', self.delete_point)
self._parent.bind('<Control-l>', self.delete_line)
self._parent.bind('<Control-p>', self.copy_point)
self._parent.bind('<Control-m>', self.move_point)
self._parent.bind('<Control-n>', self.move_line)
self._parent.bind('<Control-a>', self.select_all_lines)
self._parent.bind('<Control-t>', self.select_all_lines)
self._parent.bind('<Control-q>', self.new_line)
self._parent.bind('<Control-s>', self.new_structure)
self._parent.bind('<Delete>', self.delete_key_pressed)
self._parent.bind('<Control-Delete>', self.delete_properties_pressed)
self._parent.bind('<Control-e>', self.copy_property)
self._parent.bind('<Control-d>', self.paste_property)
self._parent.bind('<Left>', self.left_arrow)
self._parent.bind('<Right>', self.right_arrow)
self._parent.bind('<Down>', self.up_arrow)
self._parent.bind('<Up>', self.down_arrow)
#self._parent.bind('<Enter>', self.enter_key_pressed)
def left_arrow(self, event):
if self._active_line == '':
return
else:
idx = list(self._line_dict.keys()).index(self._active_line)
if idx -1 >= 0:
self._active_line =list(self._line_dict.keys())[idx-1]
else:
self._active_line = list(self._line_dict.keys())[-1]
self.update_frame()
def right_arrow(self, event):
if self._active_line == '':
return
else:
idx = list(self._line_dict.keys()).index(self._active_line)
if idx + 1 < len(list(self._line_dict.keys())):
self._active_line = list(self._line_dict.keys())[idx+1]
else:
self._active_line = list(self._line_dict.keys())[0]
self.update_frame()
def up_arrow(self, event):
if self._active_point == '':
return
else:
idx = list(self._point_dict.keys()).index(self._active_point)
if idx - 1 >= 0:
self._active_point = list(self._point_dict.keys())[idx - 1]
else:
self._active_point = list(self._point_dict.keys())[-1]
self.update_frame()
def down_arrow(self, event):
if self._active_point == '':
return
else:
idx = list(self._point_dict.keys()).index(self._active_point)
if idx + 1 < len(list(self._point_dict.keys())):
self._active_point = list(self._point_dict.keys())[idx + 1]
else:
self._active_point = list(self._point_dict.keys())[0]
self.update_frame()
def select_all_lines(self, event=None):
if self._toggle_btn.config('relief')[-1] == "sunken":
for line in self._line_to_struc.keys():
if line not in self._multiselect_lines:
if event.keysym == 't':
if self._line_to_struc[line][1].get_structure_type() == self._new_stucture_type.get():
self._multiselect_lines.append(line)
else:
self._multiselect_lines.append(line)
else:
tk.messagebox.showinfo('CTRL-A and CTRL-T', 'CTRL-A and CTRL-T is used to select all lines \n'
'with the intension to change a single variable in all lines.\n'
'Press the Toggle select multiple button.')
self.update_frame()
def mouse_scroll(self,event):
self._canvas_scale += event.delta/50
self._canvas_scale = 0 if self._canvas_scale < 0 else self._canvas_scale
try:
state = self.get_color_and_calc_state()
except AttributeError:
state = None
self.update_frame()
def button_2_click(self, event):
self._previous_drag_mouse = [event.x, event.y]
def button_2_click_and_drag(self,event):
self._canvas_draw_origo = (self._canvas_draw_origo[0]-(self._previous_drag_mouse[0]-event.x),
self._canvas_draw_origo[1]-(self._previous_drag_mouse[1]-event.y))
self._previous_drag_mouse = (event.x,event.y)
try:
state = self.get_color_and_calc_state()
except AttributeError:
state = None
self.update_frame()
#self.draw_canvas(state=state)
def button_1_click(self, event = None):
'''
When clicking the right button, this method is called.
method is referenced in
'''
self._previous_drag_mouse = [event.x, event.y]
click_x = self._main_canvas.winfo_pointerx() - self._main_canvas.winfo_rootx()
click_y = self._main_canvas.winfo_pointery() - self._main_canvas.winfo_rooty()
self._prop_canvas.delete('all')
stop = False
self._active_line = ''
self._line_is_active = False
if len(self._line_dict) > 0:
for key, value in self._line_dict.items():
if stop:
break
coord1x = self.get_point_canvas_coord('point' + str(value[0]))[0]
coord2x = self.get_point_canvas_coord('point' + str(value[1]))[0]
coord1y = self.get_point_canvas_coord('point' + str(value[0]))[1]
coord2y = self.get_point_canvas_coord('point' + str(value[1]))[1]
vector = [coord2x - coord1x, coord2y - coord1y]
click_x_range = [ix for ix in range(click_x - 10, click_x + 10)]
click_y_range = [iy for iy in range(click_y - 10, click_y + 10)]
distance = int(dist([coord1x, coord1y], [coord2x, coord2y]))
# checking along the line if the click is witnin +- 10 around the click
for dist_mult in range(1, distance - 1):
dist_mult = dist_mult / distance
x_check = int(coord1x) + int(round(vector[0] * dist_mult, 0))
y_check = int(coord1y) + int(round(vector[1] * dist_mult, 0))
if x_check in click_x_range and y_check in click_y_range:
self._line_is_active = True
self._active_line = key
stop = True
break
self._new_delete_line.set(get_num(key))
if self._line_is_active and self._active_line not in self._line_to_struc.keys():
p1 = self._point_dict['point'+str(self._line_dict[self._active_line][0])]
p2 = self._point_dict['point'+str(self._line_dict[self._active_line][1])]
self._new_field_len.set(dist(p1,p2))
if self._toggle_btn.config('relief')[-1] == 'sunken':
if self._active_line not in self._multiselect_lines:
self._multiselect_lines.append(self._active_line)
else:
self._multiselect_lines = []
try:
state = self.get_color_and_calc_state()
except AttributeError:
state = None
self.update_frame()
self._combination_slider.set(1)
if self._line_is_active:
try:
self.gui_load_combinations(self._combination_slider.get())
except (KeyError, AttributeError):
pass
def button_1_click_comp_box(self,event):
'''
Action when clicking the compartment box.
:param event:
:return:
'''
self._selected_tank.config(text='',font = self._text_size['Text 12 bold'],fg='red')
self._tank_acc_label.config(text='Accelerations [m/s^2]: ',font = self._text_size['Text 8 bold'])
if len(self._tank_dict)!=0:
current_comp = self._tank_dict['comp' + str(self._compartments_listbox.get('active'))]
self._selected_tank.config(text=str(self._compartments_listbox.get('active')))
self._new_density.set(self._tank_dict['comp' + str(self._compartments_listbox.get('active'))]
.get_density())
self._new_overpresure.set(self._tank_dict['comp' + str(self._compartments_listbox.get('active'))]
.get_overpressure())
self._new_content_type.set(self._tank_dict['comp' + str(self._compartments_listbox.get('active'))]
.get_content())
self._new_max_el.set(self._tank_dict['comp' + str(self._compartments_listbox.get('active'))]
.get_highest_elevation())
self._new_min_el.set(self._tank_dict['comp' + str(self._compartments_listbox.get('active'))]
.get_lowest_elevation())
acc = (self._tank_dict['comp' + str(self._compartments_listbox.get('active'))].get_accelerations())
self._tank_acc_label.config(text='Accelerations [m/s^2]: \n'
+'static: ' + str(acc[0])+' , '
+'dynamic loaded: ' + str(acc[1])+' , '
+'dynamic ballast: ' + str(acc[2]), font = self._text_size['Text 8 bold'])
def button_3_click(self, event = None):
'''
Identifies enclosed compartments in the canvas.
:return:
'''
click_x = self._main_canvas.winfo_pointerx() - self._main_canvas.winfo_rootx()
click_y = self._main_canvas.winfo_pointery() - self._main_canvas.winfo_rooty()
self._pt_frame.place_forget()
self._point_is_active = False
margin = 10
self._active_point = ''
for point, coords in self._point_dict.items():
point_coord = self.get_point_canvas_coord(point)
if point_coord[0]-margin < click_x < point_coord[0]+margin and\
point_coord[1]-margin < click_y < point_coord[1]+margin:
self._active_point = point
self._point_is_active = True
self._new_delete_point.set(get_num(point))
if not self._p1_p2_select:
self._new_line_p1.set(get_num(point))
self._p1_p2_select = True
else:
self._new_line_p2.set(get_num(point))
self._p1_p2_select = False
self._new_point_x.set(round(self._point_dict[self._active_point][0]*1000, 1))
self._new_point_y.set(round(self._point_dict[self._active_point][1]*1000, 1))
if | |
<reponame>lkusch/Kratos<filename>applications/SwimmingDEMApplication/python_scripts/daitche_quadrature/quadrature.py
import math
import cmath
import mpmath
import matplotlib.pyplot as plt
from bigfloat import *
import sys
# *****************************************************************************************************************************************************************************************
# EXACT EVALUATIONS
# *****************************************************************************************************************************************************************************************
def ExactIntegrationOfSinus(t, a = None, b = None):
with precision(300):
if a == None and b == None:
return 0.5 * math.pi * math.sqrt(t) * (mpmath.angerj(0.5, t) - mpmath.angerj(- 0.5, t))
elif a == None and b != None:
a = 0
elif a == 'MinusInfinity' and b != None:
return math.sqrt(0.5 * math.pi) * (math.sin(b) - math.cos(b))
elif a == 'MinusInfinity' and b == None:
return math.sqrt(0.5 * math.pi) * (math.sin(t) - math.cos(t))
elif b == None:
b = t
mpmath.mp.dps = 50
mpmath.mp.pretty = True
pi = mpmath.mp.pi
pi = +pi
fcos = mpmath.fresnelc
fsin = mpmath.fresnels
arg_a = mpmath.sqrt(2 * (t - a) / pi)
arg_b = mpmath.sqrt(2 * (t - b) / pi)
return mpmath.sqrt(2 * mpmath.mp.pi) * ((fsin(arg_b) - fsin(arg_a)) * mpmath.cos(t) + (fcos(arg_a) - fcos(arg_b)) * mpmath.sin(t))
def ExactIntegrationOfSinusWithExponentialKernel(t, ti, alpha = None, beta = None):
#print('alpha', alpha)
#print('beta', beta)
#print('t', t)
a = sqrt(exp(1) / ti)
b = - 0.5 / ti
if alpha == 'MinusInfinity':
return - a / (b ** 2 + 1) * exp(b * (t - beta)) * (b * sin(beta) + cos(beta))
else:
return a / (b ** 2 + 1) * (exp(b * (t - alpha)) * (b * sin(alpha) + cos(alpha)) - exp(b * (t - beta)) * (b * sin(beta) + cos(beta)))
def ExactIntegrationOfTail(end_time, end_time_minus_tw, initial_time, ais, tis):
F_tail = 0.0
for i in range(len(ais)):
ti = tis[i]
F_tail += ais[i] * ExactIntegrationOfSinusWithExponentialKernel(end_time, ti, initial_time, end_time_minus_tw)
return F_tail
# *****************************************************************************************************************************************************************************************
# QUADRATURES
# *****************************************************************************************************************************************************************************************
# Approximate Quadrature BEGINS
# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def ApproximateQuadrature(times, f):
values = [0.0 for t in times]
acc_sum = 2 * math.sqrt(times[-1] - times[-2]) * f(times[-1])
for i in range(len(values) - 1):
if i == 0:
delta_t = times[1] - times[0]
else:
delta_t = times[i] - times[i - 1]
acc_sum += 0.5 * delta_t * (f(times[i]) + f(times[i - 1])) / math.sqrt(times[-1] - times[i])
return acc_sum
# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Approximate Quadrature ENDS
# Naive Quadrature BEGINS
# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def NaiveQuadrature(times, f):
values = [0.0 for t in times]
acc_sum = 0.0
for i in range(len(values) - 1):
if i == 0:
delta_t = times[1] - times[0]
acc_sum += 0.5 * delta_t * (f(times[i]) + f(times[i + 1])) / math.sqrt(times[-1] - times[i])
return acc_sum
# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Naive Quadrature ENDS
# Daitche BEGINS
# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def Alpha(n, j):
four_thirds = 4. / 3
exponent = 1.5
if 0 < j and j < n:
return four_thirds * ((j - 1) ** exponent + (j + 1) ** exponent - 2 * j ** exponent)
elif j == 0:
return four_thirds
else:
return four_thirds * ((n - 1) ** exponent - n ** exponent + exponent * sqrt(n))
def Beta(n, j):
with precision(200):
one = BigFloat(1)
sqrt_2 = math.sqrt(one * 2)
sqrt_3 = math.sqrt(one * 3)
sqrt_n = math.sqrt(one * n)
j = one * j
if n >= 4:
if 2 < j and j < n - 1:
return 8. / (one * 15) * ( (j + 2) ** 2.5 - 3 * (j + 1) ** 2.5 + 3 * j ** 2.5 - (j - 1) ** 2.5)\
+ 2. / (one * 3) * (- (j + 2) ** 1.5 + 3 * (j + 1) ** 1.5 - 3 * j ** 1.5 + (j - 1) ** 1.5)
elif j == 0:
return 4. / (one * 5) * sqrt_2
elif j == 1:
return 14. / (one * 5) * sqrt_3 - 12. / (one * 5) * sqrt_2
elif j == 2:
return 176. / (one * 15) - 42. / 5 * sqrt_3 + 12. / (one * 5) * sqrt_2
elif j == n - 1:
return 8. / (one * 15) * (- 2 * n ** 2.5 + 3 * (n - 1) ** 2.5 - (n - 2) ** 2.5)\
+ 2. / (one * 3) * ( 4 * n ** 1.5 - 3 * (n - 1) ** 1.5 + (n - 2) ** 1.5)
else:
return 8. / (one * 15) * (n ** 2.5 - (n - 1) ** 2.5) + 2. / 3 * (- 3 * n ** 1.5 + (n - 1) ** 1.5) + 2 * sqrt_n
elif n == 2:
if j == 0:
return 12. / 15 * sqrt_2
elif j == 1:
return 16. / 15 * sqrt_2
else:
return 2. / 15 * sqrt_2
else:
if j == 0:
return 4. / 5 * sqrt_2
elif j == 1:
return 14. / 5 * sqrt_3 - 12. / 5 * sqrt_2
elif j == 2:
return - 8. / 5 * sqrt_3 + 12. / 5 * sqrt_2
else:
return 4. / 5 * sqrt_3 - 4. / 5 * sqrt_2
def Gamma(n, j):
with precision(200):
one = BigFloat(1)
sqrt_2 = sqrt(2 * one)
sqrt_3 = sqrt(3 * one)
sqrt_5 = sqrt(5 * one)
sqrt_6 = sqrt(6 * one)
sqrt_n = sqrt(n * one)
j = one * j
if n >= 7:
if 3 < j and j < n - 3:
return 16. / (one * 105) * ( (j + 2) ** (one * 3.5) + (j - 2) ** (one * 3.5) - 4 * (j + 1) ** (one * 3.5) - 4 * (j - 1) ** (one * 3.5) + 6 * j ** (one * 3.5))\
+ 2. / (one * 9) * (4 * (j + 1) ** (one * 1.5) + 4 * (j - 1) ** (one * 1.5) - (j + 2) ** (one * 1.5) - (j - 2) ** (one * 1.5) - 6 * j ** (one * 1.5))
elif j == 0:
return 244. / (one * 315) * sqrt_2
elif j == 1:
return 362. / (one * 105) * sqrt_3 - 976. / (one * 315) * sqrt_2
elif j == 2:
return 5584. / (one * 315) - 1448. / (one * 105) * sqrt_3 + 488. / (one * 105) * sqrt_2
elif j == 3:
return 1130. / (one * 63) * sqrt_5 - 22336. / (one * 315) + 724. / (one * 35) * sqrt_3 - 976. / (one * 315) * sqrt_2
elif j == n - 3:
return 16. / (one * 105) * (n ** (one * 3.5) - 4 * (n - 2) ** (one * 3.5) + 6 * (n - 3) ** (one * 3.5) - 4 * (n - 4) ** (one * 3.5) + (n - 5) ** (one * 3.5))\
- 8. / (one * 15) * n ** (one * 2.5) + 4. / (one * 9) * n ** (one * 1.5) + 8. / (one * 9) * (n - 2) ** (one * 1.5) - 4. / (one * 3) * (n - 3) ** (one * 1.5) + 8. / (one * 9) * (n - 4) ** (one * 1.5) - 2. / (one * 9) * (n - 5) ** (one * 1.5)
elif j == n - 2:
return 16. / (one * 105) * ((n - 4) ** (one * 3.5) - 4 * (n - 3) ** (one * 3.5) + 6 * (n - 2) ** (one * 3.5) - 3 * n ** (one * 3.5))\
+ 32. / (one * 15) * n ** (one * 2.5) - 2 * n ** (one * 1.5) - 4. / (one * 3) * (n - 2) ** (one * 1.5) + 8. / (one * 9) * (n - 3) ** (one * 1.5) - 2. / (one * 9) * (n - 4) ** (one * 1.5)
elif | |
#@+leo-ver=5-thin
#@+node:ekr.20211021200745.1: * @file ../plugins/picture_viewer.py
#@+<< docstring (picture_viewer.py) >>
#@+node:ekr.20211021202710.1: ** << docstring (picture_viewer.py) >>
"""
Display image files in a directory tree as a slide show.
This plugin will display all files in a directory tree that have image
extensions. By default the recognized extensions are '.jpeg', '.jpg', and
'.png'. Other types of image files can be displayed as long as the they are
types known by the Qt PixMap class, including '.gif' and '.bmp'. See, for
example:
https://doc.qt.io/qt-5/qpixmap.html#reading-and-writing-image-files
This plugin should be called from a script (or @command or @button node) as follows:
from leo.plugins.picture_viewer import Slides
Slides().run(c) # See below for defaults.
*Note*: do not enable this plugin. It will be loaded by the calling script.
**Key bindings**
Plain keys control the display of slides:
space: show the next slide.
backspace: show the previous slide.
escape: end the slideshow
=: zoom in
-: zoom out
arrows keys: pan the slide
d: prompt to move the slide to the trash
h: show the help message
m: move the file.
r: restart: choose another folder
**Defaults**
The following keyword arguments may be supplied to the run method:
background_color = "black", # Default background color.
delay = 100, # Delay between slides, in seconds.
extensions = ['.jpeg', '.jpg', '.png'], # List of file extensions.
full_screen = True, # True: start in full-screen mode.
height = 900, # Window height (pixels) when not in full screen mode.
path = None, # If none, display a dialog.
reset_zoom = True, # True, reset zoom factor when changing slides.
sort_kind = 'random', # 'date', 'name', 'none', 'random', or 'size'
width = 1500, # Window width (pixels) when not un full screen mode.
"""
#@-<< docstring (picture_viewer.py) >>
#@+<< imports (picture_viewer.py) >>
#@+node:ekr.20211021202633.1: ** << imports (picture_viewer.py) >>
import argparse
import os
import pathlib
import sys
import random
import textwrap
# Leo imports
from leo.core import leoGlobals as g
try:
from leo.core.leoQt import isQt5, isQt6, QtCore, QtGui, QtWidgets
from leo.core.leoQt import ButtonRole, Information
except ImportError:
QtWidgets = None
#@-<< imports (picture_viewer.py) >>
# Globals to retain references to objects.
gApp = None
gWidget = None
#@+others
#@+node:ekr.20211021202802.1: ** init (picture_viewer.py)
def init():
"""Return True if the plugin has loaded successfully."""
return g.app.gui.guiName().lower().startswith('qt')
#@+node:tom.20211023221408.1: ** get_args & checkers
def get_args():
# Automatically implements the --help option.
description = "usage: python -m picture-viewer [options]"
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawTextHelpFormatter)
# Add args.
add = parser.add_argument
add('--background', dest='background', metavar='COLOR',
help='Background color')
add('--delay', dest='delay', metavar='DELAY',
help='Delay (seconds)')
add('--extensions', dest='extensions', nargs='*', metavar='TYPES',
help='List of image file extensions.')
# Default: .jpeg,.jpg,.png (no spaces allowed)
add('--full-screen', dest='fullscreen', action='store_true',
help='Start in full-screen mode')
add('--height', dest='height', metavar='PIXELS',
help='Height of window')
add('--path', dest='path', metavar='DIRECTORY',
help='Path to root directory')
add('--reset-zoom', dest='reset_zoom', action='store_false',
help='Reset zoom factor when changing slides')
add('--scale', dest='scale', metavar='FLOAT',
help='Initial scale (zoom) factor')
add('--sort-kind', dest='sort_kind', metavar="KIND",
help='Sort kind: (date, name, none, random, or size)')
add('--starting-directory', dest='starting_directory', metavar='DIRECTORY',
help='Starting directory for file dialogs')
add('--verbose', dest='verbose', action='store_true',
help='Enable status messages')
add('--width', dest='width', metavar='PIXELS',
help='Width of window')
# Parse the options, and remove them from sys.argv.
args = parser.parse_args()
# Check and return the args.
return {
'background_color': args.background or "black",
'delay': get_delay(args.delay),
'extensions': get_extensions(args.extensions),
'full_screen': args.fullscreen,
'height': get_pixels('height', args.height),
'path': get_path(args.path),
'reset_zoom': args.reset_zoom,
'scale': get_scale(args.scale),
'sort_kind': get_sort_kind(args.sort_kind),
'starting_directory': get_path(args.starting_directory),
'verbose': args.verbose,
'width': get_pixels('width', args.width)
}
#@+node:ekr.20211101064157.1: *3* get_delay
def get_delay(delay):
if delay is None:
return None
try:
return float(delay)
except ValueError:
print(f"Bad delay value: {delay!r}")
return None
#@+node:ekr.20211024034921.1: *3* get_extensions
def get_extensions(aList):
# Ensure extensions start with '.'
return [
z if z.startswith('.') else f".{z}"
for z in aList or []
]
#@+node:ekr.20211024041658.1: *3* get_path
def get_path(path):
if path and not os.path.exists(path):
print(f"--path: not found: {path!r}")
path = None
return path
#@+node:ekr.20211024035501.1: *3* get_pixels
def get_pixels(kind, pixels):
if pixels is None:
return None
try:
return int(pixels)
except ValueError:
print(f"Bad --{kind} value: {pixels!r}")
return None
#@+node:ekr.20211024041359.1: *3* get_scale
def get_scale(scale):
try:
return float(scale or 1.0)
except ValueError:
print(f"Bad --scale: {scale!r}")
return 1.0
#@+node:ekr.20211024040842.1: *3* get_sort_kind
def get_sort_kind(kind):
if not kind:
return None
kind = kind.lower()
if kind not in ('date', 'name', 'none', 'random', 'size'):
print(f"bad --sort-kind: {kind!r}")
kind = 'none'
return kind
#@+node:ekr.20211023201914.1: ** main
def main():
global gApp
gApp = QtWidgets.QApplication(sys.argv)
args = get_args()
ok = Slides().run(c = None, **args)
if ok:
if isQt5:
sys.exit(gApp.exec_())
else:
sys.exit(gApp.exec())
#@+node:ekr.20211021202356.1: ** class Slides
if QtWidgets:
class Slides(QtWidgets.QWidget):
slide_number = -1
timer = QtCore.QBasicTimer()
#@+others
#@+node:ekr.20211024030844.1: *3* Slides.closeEvent
def closeEvent(self, event):
"""Override QWidget.closeEvent."""
self.quit()
#@+node:ekr.20211021200821.4: *3* Slides.delete
send_to_trash_warning_given = False
def delete(self):
"""Issue a prompt and delete the file if the user agrees."""
try:
from send2trash import send2trash
except Exception:
if not self.send_to_trash_warning_given:
self.send_to_trash_warning_given = True
print("Deleting files requires send2trash")
print("pip install Send2Trash")
return
file_name = self.files_list[self.slide_number]
# Create the dialog without relying on g.app.gui.
dialog = QtWidgets.QMessageBox(self)
dialog.setStyleSheet("background: white;")
yes = dialog.addButton('Yes', ButtonRole.YesRole)
dialog.addButton('No', ButtonRole.NoRole)
dialog.setWindowTitle("Delete File?")
dialog.setText( f"Delete file {g.shortFileName(file_name)}?")
dialog.setIcon(Information.Warning)
dialog.setDefaultButton(yes)
dialog.raise_()
result = dialog.exec() if isQt6 else dialog.exec_()
if result == 0:
# Move the file to the trash.
send2trash(file_name)
del self.files_list[self.slide_number]
print(f"Deleted {file_name}")
self.slide_number = max(0, self.slide_number - 1)
self.next_slide()
self.raise_()
#@+node:ekr.20211021200821.2: *3* Slides.get_files
def get_files(self, path):
"""Return all files in path, including all subdirectories."""
return [
str(z) for z in pathlib.Path(path).rglob('*')
if z.is_file()
and os.path.splitext(str(z))[1].lower() in self.extensions
]
#@+node:ekr.20211021200821.5: *3* Slides.keyPressEvent
def keyPressEvent (self, event):
i = event.key()
s = event.text()
# mods = event.modifiers()
if s == 'd':
self.delete()
elif s == 'f':
self.toggle_full_screen()
elif s == 'h':
self.show_help()
elif s == 'm':
self.move_to()
elif s == 'n' or i == 32: # ' '
self.next_slide()
elif s == 'p' or s == '\b':
self.prev_slide()
elif s == 'q' or s == '\x1b': # ESC.
self.quit()
elif s == 'r':
self.restart()
elif s in '=+':
self.zoom_in()
elif s in '-_':
self.zoom_out()
elif i == 16777235:
self.move_up()
elif i == 16777237:
self.move_down()
elif i == 16777234:
self.move_left()
elif i == 16777236:
self.move_right()
else:
print(f"picture_viewer.py: ignoring {s!r} {i}")
#@+node:ekr.20211021200821.6: *3* Slides.move_up/down/left/right
def move_down(self):
self.scroll_area.scrollContentsBy(0, -400 * self.scale)
def move_left(self):
self.scroll_area.scrollContentsBy(400 * self.scale, 0)
def move_right(self):
self.scroll_area.scrollContentsBy(-400 * self.scale, 0)
def move_up(self):
self.scroll_area.scrollContentsBy(0, 400 * self.scale)
#@+node:ekr.20211021200821.7: *3* Slides.move_to
def move_to(self):
"""Issue a prompt and move the file if the user agrees."""
file_name = self.files_list[self.slide_number]
path = QtWidgets.QFileDialog().getExistingDirectory()
if path:
new_path = os.path.join(path, os.path.basename(file_name))
if os.path.exists(new_path):
print("File exists:", new_path)
else:
pathlib.Path(file_name).rename(new_path)
del self.files_list[self.slide_number]
self.slide_number = max(0, self.slide_number - 1)
self.next_slide()
self.raise_()
#@+node:ekr.20211021200821.8: *3* Slides.next_slide
def next_slide(self):
if self.slide_number + 1 < len(self.files_list):
self.slide_number += 1 # Don't wrap.
if self.reset_zoom:
self.scale = 1.0
self.show_slide()
#@+node:ekr.20211021200821.9: *3* Slides.prev_slide
def prev_slide(self):
if self.slide_number > 0: # Don't wrap.
self.slide_number -= 1
if self.reset_zoom:
self.scale = 1.0
self.show_slide()
#@+node:ekr.20211021200821.10: *3* Slides.quit
def quit(self):
global gApp
self.timer.stop()
self.destroy()
if gApp: # Running externally.
gApp.exit()
gApp = None
if self.verbose:
print('picture_viewer: done')
#@+node:ekr.20211029020533.1: *3* Slides.restart
def restart(self):
dialog = QtWidgets.QFileDialog(directory=self.starting_directory)
path = dialog.getExistingDirectory()
if not path:
if self.verbose:
print("No path given")
self.quit()
return
self.starting_directory = path
os.chdir(path)
self.files_list = self.get_files(path)
self.slide_number = -1
self.sort(self.sort_kind)
self.next_slide() # show_slide resets the timer.
#@+node:ekr.20211021200821.11: *3* Slides.run & helper
def run(self,
c, # Required. The commander for this slideshow.
background_color = None, # Default background color.
delay = None, # Delay between slides, in seconds. Default 100.
extensions = None, # List of file extensions.
full_screen = False, # True: start in full-screen mode.
height = None, # Window height (default 1500 pixels) when not in full screen mode.
path = None, # Root directory.
scale = None, # Initial scale factor. Default 1.0
reset_zoom = True, # True: reset zoom factor when changing slides.
sort_kind = None, # 'date', 'name', 'none', 'random', or 'size'. Default is 'random'.
starting_directory = None, # Starting directory for file dialogs.
verbose = False, # True, print info messages.
width = None, # Window width (default 1500 pixels) when not in full screen mode.
):
"""
Create the widgets and run the slideshow.
Return True if any pictures were found.
"""
# Keep a reference to this class!
global gWidget
gWidget = self
# Init ivars.
w = self
self.c = c
self.background_color = background_color or "black"
self.delay = delay or 100
self.extensions = extensions or ['.jpeg', | |
model.item_input: item_input[i][:, None],
# model.labels: labels[i][:, None]}
# train_loss += sess.run(model.loss, feed_dict)
# else:
# for i in range(len(labels)):
# feed_dict = {model.user_input: user_input[i][:, None],
# model.item_input: item_input[i],
# model.labels: labels[i][:, None]}
# loss = sess.run(model.loss, feed_dict)
# # train_loss += sess.run(model.loss, feed_dict)
# train_loss += loss
# return train_loss / num_batch
def init_logging_and_result(args):
global filename
path_log = 'Log'
if not os.path.exists(path_log):
os.makedirs(path_log)
# define factors
F_model = args.model
F_dataset = args.dataset
F_embedding = args.embed_size
F_topK = args.topK
F_layer_num = args.layer_num
F_num_neg = args.num_neg
F_trail_id = args.trial_id
F_optimizer = args.optimizer + str(args.lr)
F_loss_weight = args.loss_coefficient
F_beta = args.beta
F_alpha = args.alpha
F_en_MC = args.en_MC
F_dropout = args.dropout
F_reg = args.regs
F_b_num = args.b_num
F_b_2_type = args.b_2_type
F_half = args.half_behave
F_buy_loss = args.buy_loss
if args.training_type == 'cascade':
F_cascade = 'C'
F_cascade_mode = args.cascade_mode
else:
F_cascade = 'X'
F_cascade_mode = 'X'
if F_model not in ['pure_NCF', 'pure_MLP', 'Multi_NCF', 'Multi_MLP', 'GMF_FC', 'NCF_FC']:
F_layer_num = 'X'
if F_model not in ['Multi_MLP', 'Multi_NCF', 'Multi_GMF']:
F_b_2_type = 'X'
if (F_model != 'Multi_BPR'):
F_dropout = 'X'
if (F_model != 'Multi_BPR') and (F_en_MC != 'yes'):
F_beta = 'X'
if F_num_neg == 4:
F_num_neg = 'D'
# if F_optimizer == 'Adagrad0.01':
# F_optimizer = 'D'
if F_loss_weight == '[1/3,1/3,1/3]':
F_loss_weight = 'D'
else:
F_loss_weight = F_loss_weight.replace('/', '-')
if F_model != 'FISM':
F_alpha = 'X'
if F_b_num == 3:
F_b_2_type = 'X'
if F_half == 'no':
if F_buy_loss == 'no':
filename = "log-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-b-%s-a-%s-%s%s" %(
F_model, F_dataset, F_embedding, F_topK, F_layer_num, F_num_neg, F_loss_weight,\
F_optimizer, F_trail_id, F_beta, F_dropout, F_reg, F_b_2_type, F_alpha, F_cascade, F_cascade_mode)
else:
filename = "log-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-b-%s-a-%s-bloss-%s%s" %(
F_model, F_dataset, F_embedding, F_topK, F_layer_num, F_num_neg, F_loss_weight,\
F_optimizer, F_trail_id, F_beta, F_dropout, F_reg, F_b_2_type, F_alpha, F_cascade, F_cascade_mode)
else:
filename = "log-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s-b-%s-a-%s-half-%s%s" %(
F_model, F_dataset, F_embedding, F_topK, F_layer_num, F_num_neg, F_loss_weight,\
F_optimizer, F_trail_id, F_beta, F_dropout, F_reg, F_b_2_type, F_alpha, F_cascade, F_cascade_mode)
logging.basicConfig(filename=path_log+'/'+filename, level=logging.INFO)
logging.info('Use Multiprocess to Evaluate: %s' %args.multiprocess)
def save_results(args, cascade = False):
if args.recover == 'yes':
path_result = 'Recover'
else:
path_result = 'Result'
if not os.path.exists(path_result):
os.makedirs(path_result)
if args.recover == 'yes':
with open(path_result+'/'+filename, 'w') as output:
output.write('HR:%.4f,NDCG:%.4f' %(hr_recover, ndcg_recover))
else:
if cascade:
pass
else:
with open(path_result+'/'+filename, 'w') as output:
for i in range(len(loss_list)):
output.write('%.4f,%.4f,%.4f\n' %(loss_list[i], hr_list[i], ndcg_list[i]))
rank_path = ''
if __name__ == '__main__':
args = parse_args()
dataset = None
filename = None
hr_recover = None
ndcg_recover = None
eval_queue = JoinableQueue()
job_num = Semaphore(0)
job_lock = Lock()
rank_result = []
if 'FC' in args.model:
loss_list = range(3 * args.epochs)
hr_list = range(3 * args.epochs)
ndcg_list = range(3 * args.epochs)
else:
loss_list = range(args.epochs)
hr_list = range(args.epochs)
ndcg_list = range(args.epochs)
# initialize logging and configuration
print('------ %s ------' %(args.process_name))
#setproctitle.setproctitle(args.process_name)
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
init_logging_and_result(args)
# load data
print('--- data generation start ---')
data_gen_begin = time()
if args.dataset == 'bb1':
print('load bb1 data')
path = '../data/Beibei/beibei'
elif args.dataset == 'bb2':
print('load bb2 data')
path = '/data3/gaochen/gandahua/Data/'
elif args.dataset == 'bb3':
print('load bb3 data')
pass
elif args.dataset == 'ali':
print('load ali data')
path = '../data/taobao/taobao'
elif args.dataset == 'ali2':
print('load ali 2 data')
path = '/home/stu/gandahua/MBR/Data/ali'
elif args.dataset == 'ali3':
print('load ali 3 data')
path = '/home/stu/gandahua/MBR/Data_ali/ali'
else:
pass
if ('BPR' in args.model) or (args.en_MC == 'yes') or (args.model == 'FISM') or ('CMF' in args.model):
dataset_all = Dataset(path = path, load_type = 'dict')
else:
dataset_ipv = Dataset(path = path, b_type = 'ipv', en_half=args.half_behave)
dataset_cart = Dataset(path = path, b_type = 'cart', en_half=args.half_behave)
dataset_buy = Dataset(path = path, b_type = 'buy', en_half=args.half_behave)
dataset_all = (dataset_ipv, dataset_cart, dataset_buy)
print('data generation [%.1f s]' %(time()-data_gen_begin))
# model training and evaluating
if args.model == 'Multi_GMF':
model = Multi_GMF(dataset_all[0].num_users, dataset_all[0].num_items, args)
print('num_users:%d num_items:%d' %(dataset_ipv.num_users, dataset_ipv.num_items))
model.build_graph()
dataset = dataset_all
# recover result or not
if args.recover == 'yes':
eval_from_saved_model(model, args)
else:
if args.multiprocess == 'yes':
print('start multiprocess')
train_process = Process(target = training, args = (model, args))
train_process.start()
# evaluate
# initialize for Evaluate
EvalDict = EvalUser.gen_feed_dict(dataset[0])
cpu_num = 3
eval_pool = Pool(cpu_num)
for _ in range(cpu_num):
eval_pool.apply_async(do_eval_job, (args, EvalDict))
train_process.join()
eval_queue.close()
eval_queue.join()
else:
print('start single process')
if args.training_type == 'independent':
training(model, args)
else:
training(model, args, behave_type='ipv')
rank_result = []
loss_list = range(args.epochs)
hr_list = range(args.epochs)
ndcg_list = range(args.epochs)
model.build_graph()
training(model, args, behave_type='cart')
rank_result = []
loss_list = range(args.epochs)
hr_list = range(args.epochs)
ndcg_list = range(args.epochs)
model.build_graph()
training(model, args, behave_type='buy')
rank_result = []
loss_list = range(args.epochs)
hr_list = range(args.epochs)
ndcg_list = range(args.epochs)
elif args.model == 'Multi_MLP':
model = Multi_MLP(dataset_all[0].num_users, dataset_all[0].num_items, args)
print('num_users:%d num_items:%d' %(dataset_ipv.num_users, dataset_ipv.num_items))
model.build_graph()
dataset = dataset_all
# recover result or not
if args.recover == 'yes':
eval_from_saved_model(model, args)
else:
if args.multiprocess == 'yes':
print('start multiprocess')
train_process = Process(target = training, args = (model, args))
train_process.start()
# evaluate
# initialize for Evaluate
EvalDict = EvalUser.gen_feed_dict(dataset[0])
cpu_num = 3
eval_pool = Pool(cpu_num)
for _ in range(cpu_num):
eval_pool.apply_async(do_eval_job, (args, EvalDict))
train_process.join()
eval_queue.close()
eval_queue.join()
else:
print('start single process')
if args.training_type == 'independent':
training(model, args)
else:
training(model, args, behave_type='ipv')
rank_result = []
loss_list = range(args.epochs)
hr_list = range(args.epochs)
ndcg_list = range(args.epochs)
model.build_graph()
training(model, args, behave_type='cart')
rank_result = []
loss_list = range(args.epochs)
hr_list = range(args.epochs)
ndcg_list = range(args.epochs)
model.build_graph()
training(model, args, behave_type='buy')
rank_result = []
loss_list = range(args.epochs)
hr_list = range(args.epochs)
ndcg_list = range(args.epochs)
elif args.model == 'Multi_NCF':
# model = Multi_NCF(dataset_all[0].num_users, dataset_all[0].num_items, args)
model = Multi_NCF_2(dataset_all[0].num_users, dataset_all[0].num_items, args)
print('num_users:%d num_items:%d' %(dataset_ipv.num_users, dataset_ipv.num_items))
model.build_graph()
dataset = dataset_all
# recover result or not
if args.recover == 'yes':
eval_from_saved_model(model, args)
else:
if args.multiprocess == 'yes':
print('start multiprocess')
train_process = Process(target = training, args = (model, args))
train_process.start()
# evaluate
# initialize for Evaluate
EvalDict = EvalUser.gen_feed_dict(dataset[0])
cpu_num = 3
eval_pool = Pool(cpu_num)
for _ in range(cpu_num):
eval_pool.apply_async(do_eval_job, (args, EvalDict))
train_process.join()
eval_queue.close()
eval_queue.join()
else:
print('start single process')
if args.training_type == 'independent':
training(model, args)
else:
training(model, args, behave_type='ipv')
rank_result = []
loss_list = range(args.epochs)
hr_list = range(args.epochs)
ndcg_list = range(args.epochs)
model.build_graph()
training(model, args, behave_type='cart')
rank_result = []
loss_list = range(args.epochs)
hr_list = range(args.epochs)
ndcg_list = range(args.epochs)
if args.b_num == 3:
model.build_graph()
training(model, args, behave_type='buy')
rank_result = []
loss_list = range(args.epochs)
hr_list = range(args.epochs)
ndcg_list = range(args.epochs)
elif args.model == 'pure_GMF':
if args.en_MC == 'yes':
dataset = dataset_all
else:
dataset = dataset_buy
model = pure_GMF(dataset.num_users, dataset.num_items, args)
print('num_users:%d num_items:%d' %(dataset.num_users, dataset.num_items))
model.build_graph()
# recover result or not
if args.recover == 'yes':
eval_from_saved_model(model, args)
else:
if args.multiprocess == 'yes':
pass
else:
print('start single process')
training(model, args, behave_type='buy')
# training(model, args, behave_type='buy')
elif args.model == 'pure_MLP':
if args.en_MC == 'yes':
dataset = dataset_all
else:
dataset = dataset_buy
model = pure_MLP(dataset.num_users, dataset.num_items, args)
print('num_users:%d num_items:%d' %(dataset.num_users, dataset.num_items))
model.build_graph()
# recover result or not
if args.recover == 'yes':
eval_from_saved_model(model, args)
else:
if args.multiprocess == 'yes':
pass
else:
print('start single process')
training(model, args, behave_type='buy')
elif args.model == 'pure_NCF':
if args.en_MC == 'yes':
dataset = dataset_all
else:
dataset = dataset_buy
# model = pure_NCF(dataset.num_users, dataset.num_items, args)
model = pure_NCF_2(dataset.num_users, dataset.num_items, args)
print('num_users:%d num_items:%d' %(dataset.num_users, dataset.num_items))
model.build_graph()
# recover result or not
if args.recover == 'yes':
eval_from_saved_model(model, args)
else:
if args.multiprocess == 'yes':
pass
else:
print('start single process')
training(model, args, behave_type='buy')
elif args.model == 'FISM':
model = FISM(dataset_all.num_items, dataset_all.num_users, dataset_all.max_rate, args)
print('num_users:%d num_items:%d max_rate:%d' %(
dataset_all.num_users, dataset_all.num_items, dataset_all.max_rate))
model.build_graph()
dataset = dataset_all
# recover result or not
if args.recover == 'yes':
eval_from_saved_model(model, args)
else:
if args.multiprocess == 'yes':
print('start multiprocess')
train_process = Process(target = training, args = (model, args))
train_process.start()
# evaluate
# initialize for Evaluate
EvalDict = EvalUser.gen_feed_dict(dataset)
cpu_num = 3
eval_pool = Pool(cpu_num)
for _ in range(cpu_num):
eval_pool.apply_async(do_eval_job, (args, EvalDict))
train_process.join()
eval_queue.close()
eval_queue.join()
else:
print('start single process')
training(model, args)
elif args.model == 'CMF':
model = CMF(dataset_all.num_users, dataset_all.num_items, args)
print('num_users:%d num_items:%d' %(dataset_all.num_users, dataset_all.num_items))
model.build_graph()
dataset = dataset_all
print('start single process')
training(model, args)
elif 'BPR' in args.model:
model = BPR(dataset_all.num_users, dataset_all.num_items, args)
print('num_users:%d num_items:%d' %(dataset_all.num_users, dataset_all.num_items))
model.build_graph()
dataset = dataset_all
# recover result or not
if args.recover == 'yes':
eval_from_saved_model(model, args)
else:
if args.multiprocess == 'yes':
print('start multiprocess')
train_process = Process(target = training, args = (model, args))
train_process.start()
# evaluate
# initialize for | |
noqa
correct_cb3 = """
{# correct_cb3 #}
{% set version = "1.14.5" %}
{% set build_number = 1 %}
{% set variant = "openblas" %}
{% set build_number = build_number + 200 %}
package:
name: numpy
version: {{ version }}
source:
url: https://github.com/numpy/numpy/releases/download/v{{ version }}/numpy-{{ version }}.tar.gz
sha256: 1b4a02758fb68a65ea986d808867f1d6383219c234aef553a8741818e795b529
build:
number: {{ build_number }}
skip: true # [win32 or (win and py27)]
features:
- blas_{{ variant }}
requirements:
build:
- {{ compiler('fortran') }}
- {{ compiler('c') }}
- {{ compiler('cxx') }}
host:
- python
- pip
- cython
- blas 1.1 {{ variant }}
- openblas
run:
- python
- blas 1.1 {{ variant }}
- openblas
test:
requires:
- nose
commands:
- f2py -h
- conda inspect linkages -p $PREFIX $PKG_NAME # [not win]
- conda inspect objects -p $PREFIX $PKG_NAME # [osx]
imports:
- numpy
- numpy.linalg.lapack_lite
about:
home: http://numpy.scipy.org/
license: BSD 3-Clause
license_file: LICENSE.txt
summary: 'Array processing for numbers, strings, records, and objects.'
doc_url: https://docs.scipy.org/doc/numpy/reference/
dev_url: https://github.com/numpy/numpy
extra:
recipe-maintainers:
- jakirkham
- msarahan
- pelson
- rgommers
- ocefpaf
""" # noqa
sample_r_base = """
{# sample_r_base #}
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
number: 1
rpaths:
- lib/R/lib/
- lib/
skip: True # [win32]
requirements:
build:
- r-base
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
""" # noqa
updated_r_base = """
{# updated_r_base #}
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
noarch: generic
number: 2
rpaths:
- lib/R/lib/
- lib/
requirements:
build:
- r-base
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
""" # noqa
sample_r_base2 = """
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
number: 1
rpaths:
- lib/R/lib/
- lib/
skip: True # [win32]
requirements:
build:
- r-base
- {{ compiler('c') }}
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
""" # noqa
updated_r_base2 = """
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
number: 2
rpaths:
- lib/R/lib/
- lib/
skip: True # [win32]
requirements:
build:
- r-base
- {{ compiler('c') }}
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
""" # noqa
# Test that filepaths to various licenses are updated for a noarch recipe
sample_r_licenses_noarch = """
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
number: 1
rpaths:
- lib/R/lib/
- lib/
skip: True # [win32]
requirements:
build:
- r-base
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
about:
license_family: GPL3
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-3' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\GPL-3' # [win]
license_family: MIT
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/MIT' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\MIT' # [win]
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\LGPL-2' # [win]
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2.1' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\LGPL-2.1' # [win]
license_family: BSD
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\BSD_3_clause' # [win]
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-2' # [unix]
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause' # [unix]
""" # noqa
updated_r_licenses_noarch = """
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
noarch: generic
number: 2
rpaths:
- lib/R/lib/
- lib/
requirements:
build:
- r-base
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
about:
license_family: GPL3
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-3'
license_family: MIT
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/MIT'
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2'
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2.1'
license_family: BSD
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause'
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-2'
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause'
""" # noqa
# Test that filepaths to various licenses are updated for a compiled recipe
sample_r_licenses_compiled = """
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
number: 1
rpaths:
- lib/R/lib/
- lib/
skip: True # [win32]
requirements:
build:
- r-base
- {{ compiler('c') }}
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
about:
license_family: GPL3
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-3' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\GPL-3' # [win]
license_family: MIT
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/MIT' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\MIT' # [win]
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\LGPL-2' # [win]
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2.1' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\LGPL-2.1' # [win]
license_family: BSD
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\BSD_3_clause' # [win]
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-2' # [unix]
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause' # [unix]
""" # noqa
updated_r_licenses_compiled = """
{% set version = '0.7-1' %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-stabledist
version: {{ version|replace("-", "_") }}
source:
fn: stabledist_{{ version }}.tar.gz
url:
- https://cran.r-project.org/src/contrib/stabledist_{{ version }}.tar.gz
- https://cran.r-project.org/src/contrib/Archive/stabledist/stabledist_{{ version }}.tar.gz
sha256: 06c5704d3a3c179fa389675c537c39a006867bc6e4f23dd7e406476ed2c88a69
build:
number: 2
rpaths:
- lib/R/lib/
- lib/
skip: True # [win32]
requirements:
build:
- r-base
- {{ compiler('c') }}
run:
- r-base
test:
commands:
- $R -e "library('stabledist')" # [not win]
- "\\"%R%\\" -e \\"library('stabledist')\\"" # [win]
about:
license_family: GPL3
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-3'
license_family: MIT
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/MIT'
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2'
license_family: LGPL
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/LGPL-2.1'
license_family: BSD
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause'
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-2'
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/BSD_3_clause'
""" # noqa
sample_noarch = """
{# sample_noarch #}
{% set name = "xpdan" %}
{% set version = "0.3.3" %}
{% set sha256 = "3f1a84f35471aa8e383da3cf4436492d0428da8ff5b02e11074ff65d400dd076" %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
fn: {{ name }}-{{ version }}.tar.gz
url: https://github.com/xpdAcq/{{ name }}/releases/download/{{ version }}/{{ version }}.tar.gz
sha256: {{ sha256 }}
build:
number: 0
script: python -m pip install --no-deps --ignore-installed .
requirements:
build:
- python >=3
- pip
run:
- python >=3
- numpy
- scipy
- matplotlib
- pyyaml
- scikit-beam
- pyfai
- pyxdameraulevenshtein
- xray-vision
- databroker
- bluesky
- streamz_ext
- xpdsim
- shed
- xpdview
- ophyd
- xpdconf
test:
imports:
- xpdan
- xpdan.pipelines
about:
home: http://github.com/xpdAcq/xpdAn
license: BSD-3-Clause
license_family: BSD
license_file: LICENSE
summary: 'Analysis Tools for XPD'
doc_url: http://xpdacq.github.io/xpdAn/
dev_url: http://github.com/xpdAcq/xpdAn
extra:
recipe-maintainers:
- CJ-Wright
""" # noqa
updated_noarch = """
{# updated_noarch #}
{% set name = "xpdan" %}
{% set version = "0.3.3" %}
{% set sha256 = "3f1a84f35471aa8e383da3cf4436492d0428da8ff5b02e11074ff65d400dd076" %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
fn: {{ name }}-{{ version }}.tar.gz
url: https://github.com/xpdAcq/{{ name }}/releases/download/{{ version }}/{{ version }}.tar.gz
sha256: {{ sha256 }}
build:
noarch: python
number: 1
script: python -m pip install --no-deps --ignore-installed .
requirements:
host:
- python >=3
- pip
run:
- python >=3
- numpy
- scipy
- matplotlib
- pyyaml
| |
manager
#doc.AddUndo(UNDO_BITS, obj)
doc.SetActiveObject(obj, c4d.SELECTION_ADD)
def SelectObjects(objs):
for obj in objs:
Select(obj)
def DeselectAll(inObjMngr=False):
""" Not the same as ``BaseSelect.DeselectAll()``.
:param bool inObjMngr: if True, run the deselect command for the
Object Manager, else the general one for the editor viewport.
"""
if inObjMngr is True:
c4d.CallCommand(100004767) # deselect all (Object Manager)
else:
c4d.CallCommand(12113) # deselect all
def GroupObjects(objs, name="Group"):
""" CallCommand based grouping of objects from a list.
Generally unreliable, because selection state matters.
Use insertUnderNull for better effect.
"""
DeselectAll(True)
result = None
if objs is None:
return result
if not isinstance(objs, list):
objs = [objs]
else:
return result
for o in objs:
Select(o)
if DEBUG: print("creating group %s" % name)
c4d.CallCommand(100004772) # group objects
doc = documents.GetActiveDocument()
grp = doc.GetActiveObject()
grp.SetName(name)
result = grp
return result
def GroupSelected(name="Group"):
""" CallCommand based grouping of selected objects.
Generally unreliable, because selection state matters.
Use insertUnderNull for better effect.
"""
if DEBUG: print("creating group %s" % name)
c4d.CallCommand(100004772) # group objects
doc = documents.GetActiveDocument()
grp = doc.GetActiveObject()
grp.SetName(name)
result = grp
return result
def RecurseBranch(obj):
child = obj.GetDown()
while child:
child = child.GetNext()
return RecurseBranch(child)
def GetNextObject(obj, stop_objs=None):
""" Return the next object in the hierarchy using a depth-first traversal scheme.
If stop_objs is a c4d.BaseObject or a list of c4d.BaseObjects and the next
operation would encounter this object (or the first object in the list) None
will be returned. This is so that this function can be used in a while loop.
"""
if stop_objs and not isinstance(stop_objs, list):
stop_objs = [stop_objs]
elif stop_objs is None:
stop_objs = []
if obj == None: return None
if obj.GetDown():
if (obj.GetNext() in stop_objs or
obj.GetDown() in stop_objs):
return None
return obj.GetDown()
if obj in stop_objs:
return None
if len(stop_objs) == 0:
while not obj.GetNext() and obj.GetUp():
obj = obj.GetUp()
else:
while (not obj.GetNext() and
obj.GetUp() and
obj.GetUp() not in stop_objs):
if (obj in stop_objs or
obj.GetUp() in stop_objs):
return None
obj = obj.GetUp()
if obj.GetNext() and obj.GetNext() in stop_objs:
return None
else:
return obj.GetNext()
def GetActiveObjects(doc):
""" Same as BaseDocument.GetSelection(), where
GetSelection also selects tags and materials.
"""
lst = list()
obj = doc.GetFirstObject()
while obj:
if obj.GetBit(c4d.BIT_ACTIVE) == True:
lst.append(obj)
obj = GetNextObject(obj)
return lst
def FindObject(name, start=None, matchfunc=None, *args, **kwargs):
""" Find object with name 'name'.
:param start: a c4d.BaseObject or a str representing the name
of a c4d.BaseObject from where the search should begin.
:type start: ``c4d.BaseObject``
:param matchfunc: can be used to customize the matching logic
by providing the name of a custom function. This function
will be passed a potential candidate object plus any
remaining args. It should return True or False.
:type matchfunc: ``function``
"""
if name is None: return None
if not isinstance(name, (str, unicode)):
raise TypeError("E: expected string or unicode, got %s" % type(name))
doc = documents.GetActiveDocument()
if not doc: return None
result = None
if start is None:
startop = doc.GetFirstObject()
else:
if isinstance(start, str):
# warning: doesn't distinguish between objects with same name
startop = doc.SearchObject(start)
elif isinstance(start, c4d.BaseObject):
startop = start
else:
raise TypeError("E: parameter 'start' must be one of " +
"[str, c4d.BaseObject], but is %s" % type(start))
if not startop: return None
if start:
print("Finding %s under %r" % (name, startop.GetName()))
curname = startop.GetName()
if startop:
if matchfunc and matchfunc(startop, *args, **kwargs):
return startop
elif curname == name:
return startop
obj = GetNextObject(startop, startop)
while obj:
curname = obj.GetName()
if matchfunc and matchfunc(obj, *args, **kwargs):
return obj
elif curname == name:
return obj
obj = GetNextObject(obj, startop)
return result
def FindObjects(name=None, uip=None):
""" Find all objects in the scene, either with the name ``name``
and/or the unique IP ``uip``.
"""
if name is None and uip is None:
return None
if not isinstance(name, (str, unicode)):
raise TypeError("E: expected string or unicode, got %s" % type(name))
doc = documents.GetActiveDocument()
if not doc:
return None
result = []
obj = doc.GetFirstObject()
if not obj:
return result
while obj:
curname = obj.GetName()
curip = obj.GetUniqueIP()
if name and uip:
if curname == name and uip == curip:
result.append(obj)
elif uip and name is None:
if uip == curip:
result.append(obj)
elif name and uip is None:
if name == curname:
result.append(obj)
obj = GetNextObject(obj)
return result
def CreateObject(typ, name, undo=True):
""" Create a object of type 'typ', with name 'name'.
This calls c4d.StopAllThreads() internally.
"""
obj = None
try:
doc = documents.GetActiveDocument()
if doc is None: return None
obj = c4d.BaseObject(typ)
obj.SetName(name)
c4d.StopAllThreads()
doc.InsertObject(obj)
if undo is True:
doc.AddUndo(c4d.UNDOTYPE_NEW, obj)
c4d.EventAdd()
except Exception as e: # IGNORE:W0703
print("*** Caught Exception: %r ***" % e)
return obj
def CreateReplaceObject(typ, name):
""" Create object with name 'name' removing and
replacing any object with the same name.
"""
doc = c4d.documents.GetActiveDocument()
if doc is None:
return False
obj = doc.SearchObject(name)
if obj is not None:
obj.Remove()
obj = CreateObject(typ, name)
return obj
def InsertUnderNull(objs, grp=None, name="Group", copy=False):
"""
Inserts objects under a group (null) object, optionally creating the group.
Note: currently does not reset obj's coordinate frame
to that of the new parent.
objs BaseObject can be a single object or a list of objects
grp BaseObject the group to place the objects under
(if None a new null object will be created)
name str name for the new group
copy bool copy the objects if True
Returns the modyfied/created group on success, None on failure.
"""
if grp is None:
grp = CreateObject(c4d.Onull, name)
if copy == True:
objs = [i.GetClone() for i in objs]
if DEBUG: print("inserting objs into group '%s'" % grp.GetName())
if isinstance(objs, list):
for obj in objs:
obj.Remove()
obj.InsertUnder(grp)
else:
objs.Remove()
objs.InsertUnder(grp)
c4d.EventAdd()
return grp
@deprecated(since="0.5")
def RecursiveInsertGroups(entry, parent, root, tree, pmatch='90%'):
if isinstance(entry, dict):
for node in entry:
nodeobj = None
for op, lvl in ObjectIterator(root.op, root.op): # IGNORE:W0612 #@UnusedVariable
if op.GetName() == node.name:
nodeobj = op
if not nodeobj:
nodeobj = CreateObject(c4d.Onull, node.name)
nodeobj.InsertUnder(parent.op)
return RecursiveInsertGroups(node, node, root, entry, pmatch)
elif isinstance(entry, list):
for child in entry: # type(child) == <type: TreeEntry> or another dict
if isinstance(child, dict):
return RecursiveInsertGroups(child, parent, root, tree, pmatch)
else:
childobj = FindObject(child.name, start=root.op, matchfunc=FuzzyCompareStrings, limit=pmatch)
if not childobj:
childobj = CreateObject(c4d.Onull, child.name)
childobj.InsertUnder(parent.op)
else:
children = tree[entry]
return RecursiveInsertGroups(children, entry, root, tree, pmatch)
def UniqueSequentialName(name_base, template=u'%(name)s.%(num)s'):
""" Return a new sequential name based on a naming template and a
base name such that the name uniquely identifies an object in
the scene.
By default, mimicks the names generated by CINEMA 4D when
multiple objects of the same type are created in quick succession.
For example if the scene had the following objects::
Cube
Cube.1
Cube.12
the function would return ``Cube.13`` as a new name.
"""
doc = c4d.documents.GetActiveDocument()
if doc is None:
return False
oh = ObjectHierarchy()
objs = oh.Get(r"!" + name_base + r".*?\d*")
nums = []
for obj in objs:
name = obj.GetName()
mat = re.search(ur'(\d+)$', UnescapeUnicode(name), flags=re.UNICODE)
if mat and mat.group(1):
try:
nums.append(int(mat.group(1), 10))
except ValueError:
pass
new_num = 1
if len(nums) == 0:
if doc.SearchObject(name_base) is None:
return name_base
else:
new_num = max(nums) + 1
new_name = template % ({'name': name_base, 'num': new_num})
return EscapeUnicode(new_name)
def GetGlobalPosition(obj):
return obj.GetMg().off
def GetGlobalRotation(obj):
return c4d.utils.MatrixToHPB(obj.GetMg())
def GetGlobalScale(obj):
m = obj.GetMg()
return c4d.Vector(m.v1.GetLength(),
m.v2.GetLength(),
m.v3.GetLength())
def SetGlobalPosition(obj, pos):
m = obj.GetMg()
m.off = pos
obj.SetMg(m)
def SetGlobalRotation(obj, rot):
"""
Please remember, like most 3D engines
CINEMA 4D handles rotation in radians.
Example for H=10, P=20, B=30:
import c4d
from c4d import utils
#...
hpb = c4d.Vector(utils.Rad(10), utils.Rad(20), utils.Rad(30))
SetGlobalRotation(obj, hpb) #object's rotation is 10, 20, 30
"""
m = obj.GetMg()
pos = m.off
scale = c4d.Vector(m.v1.GetLength(),
m.v2.GetLength(),
m.v3.GetLength())
m = c4d.utils.HPBToMatrix(rot)
m.off = pos
m.v1 = m.v1.GetNormalized() * scale.x
m.v2 | |
TBI - Shadow projection type? Matrix value?
new_projection = MappingProjection(sender=correct_sender,
receiver=input_port)
self.add_projection(new_projection, sender=correct_sender, receiver=input_port)
return original_senders
def _update_shadow_projections(self, context=None):
for node in self.nodes:
for input_port in node.input_ports:
if input_port.shadow_inputs:
original_senders = self._get_original_senders(input_port, input_port.shadow_inputs.path_afferents)
for shadow_projection in input_port.path_afferents:
if shadow_projection.sender not in original_senders:
self.remove_projection(shadow_projection)
# MODIFIED 4/4/20 OLD:
# # If the node does not have any roles, it is internal
# if len(self.get_roles_by_node(node)) == 0:
# self._add_node_role(node, NodeRole.INTERNAL)
# MODIFIED 4/4/20 END
def _check_for_projection_assignments(self, context=None):
"""Check that all Projections and Ports with require_projection_in_composition attribute are configured.
Validate that all InputPorts with require_projection_in_composition == True have an afferent Projection.
Validate that all OuputStates with require_projection_in_composition == True have an efferent Projection.
Validate that all Projections have senders and receivers.
"""
projections = self.projections.copy()
for node in self.nodes:
if isinstance(node, Projection):
projections.append(node)
continue
if context.source != ContextFlags.INITIALIZING:
for input_port in node.input_ports:
if input_port.require_projection_in_composition and not input_port.path_afferents:
warnings.warn(f'{InputPort.__name__} ({input_port.name}) of {node.name} '
f'doesn\'t have any afferent {Projection.__name__}s')
for output_port in node.output_ports:
if output_port.require_projection_in_composition and not output_port.efferents:
warnings.warn(f'{OutputPort.__name__} ({output_port.name}) of {node.name} '
f'doesn\'t have any efferent {Projection.__name__}s in {self.name}')
for projection in projections:
if not projection.sender:
warnings.warn(f'{Projection.__name__} {projection.name} is missing a sender')
if not projection.receiver:
warnings.warn(f'{Projection.__name__} {projection.name} is missing a receiver')
def get_feedback_status(self, projection):
"""Return True if **projection** is designated as a `feedback Projection <_Composition_Feedback_Designation>`
in the Composition, else False.
"""
return projection in self.feedback_projections
def _check_for_existing_projections(self,
projection=None,
sender=None,
receiver=None,
in_composition:bool=True):
"""Check for Projection with same sender and receiver
If **in_composition** is True, return only Projections found in the current Composition
If **in_composition** is False, return only Projections that are found outside the current Composition
Return Projection or list of Projections that satisfies the conditions, else False
"""
assert projection or (sender and receiver), \
f'_check_for_existing_projection must be passed a projection or a sender and receiver'
if projection:
sender = projection.sender
receiver = projection.receiver
else:
if isinstance(sender, Mechanism):
sender = sender.output_port
elif isinstance(sender, Composition):
sender = sender.output_CIM.output_port
if isinstance(receiver, Mechanism):
receiver = receiver.input_port
elif isinstance(receiver, Composition):
receiver = receiver.input_CIM.input_port
existing_projections = [proj for proj in sender.efferents if proj.receiver is receiver]
existing_projections_in_composition = [proj for proj in existing_projections if proj in self.projections]
assert len(existing_projections_in_composition) <= 1, \
f"PROGRAM ERROR: More than one identical projection found " \
f"in {self.name}: {existing_projections_in_composition}."
if in_composition:
if existing_projections_in_composition:
return existing_projections_in_composition[0]
else:
if existing_projections and not existing_projections_in_composition:
return existing_projections
return False
def _check_for_unnecessary_feedback_projections(self):
"""
Warn if there exist projections in the graph that the user
labeled as EdgeType.FEEDBACK (True) but are not in a cycle
"""
unnecessary_feedback_specs = []
cycles = self.graph.get_strongly_connected_components()
for proj in self.projections:
try:
vert = self.graph.comp_to_vertex[proj]
if vert.feedback is EdgeType.FEEDBACK:
for c in cycles:
if proj in c:
break
else:
unnecessary_feedback_specs.append(proj)
except KeyError:
pass
if unnecessary_feedback_specs:
warnings.warn(
'The following projections were labeled as feedback, '
'but they are not in any cycles: {0}'.format(
', '.join([str(x) for x in unnecessary_feedback_specs])
)
)
# ******************************************************************************************************************
# PATHWAYS
# ******************************************************************************************************************
# ----------------------------------------- PROCESSING -----------------------------------------------------------
# FIX: REFACTOR TO TAKE Pathway OBJECT AS ARGUMENT
def add_pathway(self, pathway):
"""Add an existing `Pathway <Composition_Pathways>` to the Composition
Arguments
---------
pathway : the `Pathway <Composition_Pathways>` to be added
"""
# identify nodes and projections
nodes, projections = [], []
for c in pathway.graph.vertices:
if isinstance(c.component, Mechanism):
nodes.append(c.component)
elif isinstance(c.component, Composition):
nodes.append(c.component)
elif isinstance(c.component, Projection):
projections.append(c.component)
# add all nodes first
for node in nodes:
self.add_node(node)
# then projections
for p in projections:
self.add_projection(p, p.sender.owner, p.receiver.owner)
self._analyze_graph()
@handle_external_context()
def add_linear_processing_pathway(self, pathway, name:str=None, context=None, *args):
"""Add sequence of Mechanisms and/or Compositions with intercolated Projections.
A `MappingProjection` is created for each contiguous pair of `Mechanisms <Mechanism>` and/or Compositions
in the **pathway** argument, from the `primary OutputPort <OutputPort_Primary>` of the first one to the
`primary InputPort <InputPort_Primary>` of the second.
Tuples (Mechanism, `NodeRoles <NodeRole>`) can be used to assign `required_roles
<Composition.add_node.required_roles>` to Mechanisms.
Note that any specifications of the **monitor_for_control** `argument
<ControlMechanism_Monitor_for_Control_Argument>` of a constructor for a `ControlMechanism` or the **monitor**
argument specified in the constructor for an ObjectiveMechanism in the **objective_mechanism** `argument
<ControlMechanism_ObjectiveMechanism>` of a ControlMechanism supercede any MappingProjections that would
otherwise be created for them when specified in the **pathway** argument of add_linear_processing_pathway.
Arguments
---------
pathway : `Node <Composition_Nodes>`, list or `Pathway`
specifies the `Nodes <Composition_Nodes>`, and optionally `Projections <Projection>`, used to construct a
processing `Pathway <Pathway>`. Any standard form of `Pathway specification <Pathway_Specification>` can
be used, however if a 2-item (Pathway, LearningFunction) tuple is used the `LearningFunction` will be
ignored (this should be used with `add_linear_learning_pathway` if a `learning Pathway
<Composition_Learning_Pathway>` is desired). A `Pathway` object can also be used; again, however, any
learning-related specifications will be ignored, as will its `name <Pathway.name>` if the **name**
argument of add_linear_processing_pathway is specified.
name : str
species the name used for `Pathway`; supercedes `name <Pathway.name>` of `Pathway` object if it is has one.
Returns
-------
`Pathway` :
`Pathway` added to Composition.
"""
from psyneulink.core.compositions.pathway import Pathway, _is_node_spec, _is_pathway_entry_spec
nodes = []
# If called internally, use its pathway_arg_str in error messages (in context.string)
if context.source is not ContextFlags.COMMAND_LINE:
pathway_arg_str = context.string
# Otherwise, refer to call from this method
else:
pathway_arg_str = f"'pathway' arg for add_linear_procesing_pathway method of {self.name}"
context.source = ContextFlags.METHOD
context.string = pathway_arg_str
# First, deal with Pathway() or tuple specifications
if isinstance(pathway, Pathway):
# Give precedence to name specified in call to add_linear_processing_pathway
pathway_name = name or pathway.name
pathway = pathway.pathway
else:
pathway_name = name
if _is_pathway_entry_spec(pathway, ANY):
pathway = convert_to_list(pathway)
elif isinstance(pathway, tuple):
# If tuple is used to specify a sequence of nodes, convert to list (even though not documented):
if all(_is_pathway_entry_spec(n, ANY) for n in pathway):
pathway = list(pathway)
# If tuple is (pathway, LearningFunction), get pathway and ignore LearningFunction
elif isinstance(pathway[1],type) and issubclass(pathway[1], LearningFunction):
warnings.warn(f"{LearningFunction.__name__} found in specification of {pathway_arg_str}: {pathway[1]}; "
f"it will be ignored")
pathway = pathway[0]
else:
raise CompositionError(f"Unrecognized tuple specification in {pathway_arg_str}: {pathway}")
else:
raise CompositionError(f"Unrecognized specification in {pathway_arg_str}: {pathway}")
# Then, verify that the pathway begins with a node
if _is_node_spec(pathway[0]):
# Use add_nodes so that node spec can also be a tuple with required_roles
self.add_nodes(nodes=[pathway[0]],
context=context)
nodes.append(pathway[0])
else:
# 'MappingProjection has no attribute _name' error is thrown when pathway[0] is passed to the error msg
raise CompositionError(f"First item in {pathway_arg_str} must be "
f"a Node (Mechanism or Composition): {pathway}.")
# Next, add all of the remaining nodes in the pathway
for c in range(1, len(pathway)):
# if the current item is a Mechanism, Composition or (Mechanism, NodeRole(s)) tuple, add it
if _is_node_spec(pathway[c]):
self.add_nodes(nodes=[pathway[c]],
context=context)
nodes.append(pathway[c])
# Then, delete any ControlMechanism that has its monitor_for_control attribute assigned
# and any ObjectiveMechanism that projects to a ControlMechanism,
# as well as any projections to them specified in the pathway;
# this is to avoid instantiating projections to them that might conflict with those
# instantiated by their constructors or, for a controller, _add_controller()
items_to_delete = []
for i, item in enumerate(pathway):
if ((isinstance(item, ControlMechanism) and item.monitor_for_control)
or (isinstance(item, ObjectiveMechanism) and
set(self.get_roles_by_node(item)).intersection({NodeRole.CONTROL_OBJECTIVE,
NodeRole.CONTROLLER_OBJECTIVE}))):
items_to_delete.append(item)
# Delete any projections to the ControlMechanism or ObjectiveMechanism specified in pathway
if i>0 and _is_pathway_entry_spec(pathway[i - 1],PROJECTION):
items_to_delete.append(pathway[i - 1])
for item in items_to_delete:
if isinstance(item, ControlMechanism):
arg_name = f'in the {repr(MONITOR_FOR_CONTROL)} of its constructor'
else:
arg_name = f'either in the {repr(MONITOR)} arg of its constructor, ' \
f'or in the {repr(MONITOR_FOR_CONTROL)} arg of its associated {ControlMechanism.__name__}'
warnings.warn(f'No new {Projection.__name__}s were added to {item.name} that was included in '
f'the {pathway_arg_str}, since there were ones already specified {arg_name}.')
del pathway[pathway.index(item)]
# MODIFIED 8/12/19 END
# Then, loop through pathway and validate that the Mechanism-Projection relationships make sense
# and add MappingProjection(s) where needed
projections = []
for c in range(1, len(pathway)):
# if the current item is a Node
if _is_node_spec(pathway[c]):
if _is_node_spec(pathway[c - 1]):
# if the previous item was also a node, add a MappingProjection between them
if isinstance(pathway[c - 1], tuple):
sender = pathway[c - 1][0]
else:
sender | |
# output cif files for incorrect space groups
if check is True:
if check_struct_group(rand_crystal, sg, dim=3):
pass
else:
t += " xxxxx"
# rand_crystal.to_file("poscar", "1.vasp")
# import sys
# sys.exit()
outstructs.append(rand_crystal.to_pymatgen())
outstrings.append(str("3D_Molecular_" + str(sg) + ".vasp"))
fprint("\t{}\t|\t{}\t|\t{}\t|\t{}".format(sg, ans1, ans2, t))
else:
fprint(
"~~~~ Error: Could not generate space group {} after {}".format(sg, t)
)
failed.append(sg)
if slow != []:
fprint("~~~~ The following space groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following space groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_atomic_2D():
global outstructs
global outstrings
fprint("=== Testing generation of atomic 2D crystals. This may take some time. ===")
slow = []
failed = []
fprint(" Layer group # | Symbol | Time Elapsed")
skip = []
for sg in range(1, 81):
if sg not in skip:
g = Group(sg, dim=2)
multiplicity = len(g[0]) # multiplicity of the general position
start = time()
rand_crystal = pyxtal()
rand_crystal.from_random(2, sg, ["C"], [multiplicity], 4.0)
end = time()
timespent = np.around((end - start), decimals=2)
t = str(timespent)
if len(t) == 3:
t += "0"
t += " s"
if timespent >= 1.0:
t += " ~"
if timespent >= 3.0:
t += "~"
if timespent >= 10.0:
t += "~"
if timespent >= 60.0:
t += "~"
slow.append(sg)
if rand_crystal.valid:
if check_struct_group(rand_crystal, sg, dim=2):
pass
else:
t += " xxxxx"
outstructs.append(rand_crystal.to_pymatgen())
outstrings.append(str("atomic_2D_" + str(sg) + ".vasp"))
symbol = g.symbol
fprint("\t{}\t|\t{}\t|\t{}".format(sg, symbol, t))
else:
fprint(
"~~~~ Error: Could not generate layer group {} after {}".format(sg, t)
)
failed.append(sg)
if slow != []:
fprint("~~~~ The following layer groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following layer groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_molecular_2D():
global outstructs
global outstrings
fprint(
"=== Testing generation of molecular 2D crystals. This may take some time. ==="
)
slow = []
failed = []
fprint(" Layer group # | Symbol | Time Elapsed")
skip = []
for sg in range(1, 81):
if sg not in skip:
g = Group(sg, dim=2)
multiplicity = len(g[0]) # multiplicity of the general position
start = time()
rand_crystal = pyxtal(molecular=True)
rand_crystal.from_random(2, sg, ["H2O"], [multiplicity], 4.0)
end = time()
timespent = np.around((end - start), decimals=2)
t = str(timespent)
if len(t) == 3:
t += "0"
t += " s"
if timespent >= 1.0:
t += " ~"
if timespent >= 3.0:
t += "~"
if timespent >= 10.0:
t += "~"
if timespent >= 60.0:
t += "~"
slow.append(sg)
if rand_crystal.valid:
if check_struct_group(rand_crystal, sg, dim=2):
pass
else:
t += " xxxxx"
outstructs.append(rand_crystal.to_pymatgen())
outstrings.append(str("molecular_2D_" + str(sg) + ".vasp"))
symbol = g.symbol
fprint("\t{}\t|\t{}\t|\t{}".format(sg, symbol, t))
else:
fprint(
"~~~~ Error: Could not generate layer group {} after {}".format(sg, t)
)
failed.append(sg)
if slow != []:
fprint("~~~~ The following layer groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following layer groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_atomic_1D():
global outstructs
global outstrings
fprint("=== Testing generation of atomic 1D crystals. This may take some time. ===")
slow = []
failed = []
fprint(" Rod group | Gen sg. (SPG) | Gen. sg (PMG) |Time Elapsed")
skip = [] # slow to generate
for num in range(1, 76):
if num not in skip:
multiplicity = len(get_rod(num)[0]) # multiplicity of the general position
start = time()
rand_crystal = pyxtal()
rand_crystal.from_random(1, num, ["H"], [multiplicity], 4.0)
end = time()
timespent = np.around((end - start), decimals=2)
t = str(timespent)
if len(t) == 3:
t += "0"
t += " s"
if timespent >= 1.0:
t += " ~"
if timespent >= 3.0:
t += "~"
if timespent >= 10.0:
t += "~"
if timespent >= 60.0:
t += "~"
slow.append(num)
if rand_crystal.valid:
try:
ans1 = get_symmetry_dataset(rand_crystal.to_ase(), symprec=1e-1)
except:
ans1 = "???"
if ans1 is None or ans1 == "???":
ans1 = "???"
else:
ans1 = ans1["number"]
sga = SpacegroupAnalyzer(rand_crystal.to_pymatgen())
try:
ans2 = sga.get_space_group_number()
except:
ans2 = "???"
if ans2 is None:
ans2 = "???"
check = True
# output cif files for incorrect space groups
if check is True:
if check_struct_group(rand_crystal, num, dim=1):
pass
else:
t += " xxxxx"
outstructs.append(rand_crystal.to_pymatgen)
outstrings.append(str("1D_Atomic_" + str(num) + ".vasp"))
fprint("\t{}\t|\t{}\t|\t{}\t|\t{}".format(num, ans1, ans2, t))
else:
fprint(
"~~~~ Error: Could not generate layer group {} after {}".format(
num, t
)
)
failed.append(num)
if slow != []:
fprint("~~~~ The following layer groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following layer groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_molecular_1D():
global outstructs
global outstrings
fprint(
"=== Testing generation of molecular 1D crystals. This may take some time. ==="
)
slow = []
failed = []
fprint(" Rod group | Gen sg. (SPG) | Gen. sg (PMG) |Time Elapsed")
skip = [] # slow to generate
for num in range(1, 76):
if num not in skip:
multiplicity = len(get_rod(num)[0]) # multiplicity of the general position
start = time()
rand_crystal = pyxtal(molecular=True)
rand_crystal.from_random(1, num, ["H2O"], [multiplicity], 4.0)
end = time()
timespent = np.around((end - start), decimals=2)
t = str(timespent)
if len(t) == 3:
t += "0"
t += " s"
if timespent >= 1.0:
t += " ~"
if timespent >= 3.0:
t += "~"
if timespent >= 10.0:
t += "~"
if timespent >= 60.0:
t += "~"
slow.append(num)
if rand_crystal.valid:
try:
ans1 = get_symmetry_dataset(rand_crystal.to_ase(), symprec=1e-1)
except:
ans1 = "???"
if ans1 is None or ans1 == "???":
ans1 = "???"
else:
ans1 = ans1["number"]
sga = SpacegroupAnalyzer(rand_crystal.to_pymatgen())
try:
ans2 = sga.get_space_group_number()
except:
ans2 = "???"
if ans2 is None:
ans2 = "???"
check = True
# output cif files for incorrect space groups
if check is True:
if check_struct_group(rand_crystal, num, dim=1):
pass
else:
t += " xxxxx"
outstructs.append(rand_crystal.to_pymatgen())
outstrings.append(str("1D_Molecular_" + str(num) + ".vasp"))
fprint("\t{}\t|\t{}\t|\t{}\t|\t{}".format(num, ans1, ans2, t))
else:
fprint(
"~~~~ Error: Could not generate layer group {} after {}".format(
num, t
)
)
failed.append(num)
if slow != []:
fprint("~~~~ The following layer groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following layer groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_cluster():
global outstructs
global outstrings
fprint("=== Testing generation of point group clusters. This may take some time. ===")
slow = []
failed = []
fprint(" Point group # | Symbol | Time Elapsed")
skip = [56] # [32,55,56]#[28,29,30,31,32,55,56]
for sg in range(1, 57):
if sg not in skip:
multiplicity = len(
Group(sg, dim=0)[0]
) # multiplicity of the general position
start = time()
rand_crystal = pyxtal()
rand_crystal.from_random(0, sg, ["C"], [multiplicity], 1.0)
end = time()
timespent = np.around((end - start), decimals=2)
t = str(timespent)
if len(t) == 3:
t += "0"
t += " s"
if timespent >= 1.0:
t += " ~"
if timespent >= 3.0:
t += "~"
if timespent >= 10.0:
t += "~"
if timespent >= 60.0:
t += "~"
slow.append(sg)
if rand_crystal.valid:
if check_struct_group(rand_crystal, sg, dim=0):
pass
else:
t += " xxxxx"
outstructs.append(rand_crystal.to_pymatgen())
outstrings.append(str("Cluster_" + str(sg) + ".vasp"))
pgsymbol = Group(sg, dim=0).symbol
fprint("\t{}\t|\t{}\t|\t{}".format(sg, pgsymbol, t))
else:
fprint(
"~~~~ Error: Could not generate space group {} after {}".format(sg, t)
)
failed.append(sg)
if slow != []:
fprint("~~~~ The following space groups took more than 60 seconds to generate:")
for i in slow:
fprint(" " + str(i))
if failed != []:
fprint("~~~~ The following space groups failed to generate:")
for i in failed:
fprint(" " + str(i))
def test_modules():
fprint("====== Testing functionality for pyXtal version 0.1dev ======")
global failed_package
failed_package = False # Record if errors occur at any level
reset()
| |
<filename>zippy/benchmarks/src/benchmarks/whoosh/tests/test_parse_plugins.py
from __future__ import with_statement
import inspect
from datetime import datetime
from whoosh import analysis, fields, formats, qparser, query
from whoosh.compat import u, text_type, xrange
from whoosh.filedb.filestore import RamStorage
from whoosh.qparser import dateparse, default, plugins, syntax
from whoosh.util.times import adatetime
def _plugin_classes(ignore):
# Get all the subclasses of Plugin in whoosh.qparser.plugins
return [c for _, c in inspect.getmembers(plugins, inspect.isclass)
if plugins.Plugin in c.__bases__ and c not in ignore]
def test_combos():
qs = ('w:a "hi there"^4.2 AND x:b^2.3 OR c AND (y:d OR e) ' +
'(apple ANDNOT bear)^2.3')
init_args = {plugins.MultifieldPlugin: (["content", "title"],
{"content": 1.0, "title": 1.2}),
plugins.FieldAliasPlugin: ({"content": ("text", "body")},),
plugins.CopyFieldPlugin: ({"name": "phone"},),
plugins.PseudoFieldPlugin: ({"name": lambda x: x}),
}
pis = _plugin_classes(())
for i, plugin in enumerate(pis):
try:
pis[i] = plugin(*init_args.get(plugin, ()))
except TypeError:
raise TypeError("Error instantiating %s" % plugin)
count = 0
for i, first in enumerate(pis):
for j in xrange(len(pis)):
if i == j:
continue
plist = [p for p in pis[:j] if p is not first] + [first]
qp = qparser.QueryParser("text", None, plugins=plist)
qp.parse(qs)
count += 1
def test_field_alias():
qp = qparser.QueryParser("content", None)
qp.add_plugin(plugins.FieldAliasPlugin({"title": ("article", "caption")}))
q = qp.parse("alfa title:bravo article:charlie caption:delta")
assert text_type(q) == u("(content:alfa AND title:bravo AND title:charlie AND title:delta)")
def test_dateparser():
schema = fields.Schema(text=fields.TEXT, date=fields.DATETIME)
qp = default.QueryParser("text", schema)
errs = []
def cb(arg):
errs.append(arg)
basedate = datetime(2010, 9, 20, 15, 16, 6, 454000)
qp.add_plugin(dateparse.DateParserPlugin(basedate, callback=cb))
q = qp.parse(u("hello date:'last tuesday'"))
assert q.__class__ == query.And
assert q[1].__class__ == query.DateRange
assert q[1].startdate == adatetime(2010, 9, 14).floor()
assert q[1].enddate == adatetime(2010, 9, 14).ceil()
q = qp.parse(u("date:'3am to 5pm'"))
assert q.__class__ == query.DateRange
assert q.startdate == adatetime(2010, 9, 20, 3).floor()
assert q.enddate == adatetime(2010, 9, 20, 17).ceil()
q = qp.parse(u("date:blah"))
assert q == query.NullQuery
assert errs[0] == "blah"
q = qp.parse(u("hello date:blarg"))
assert q.__unicode__() == "(text:hello AND <_NullQuery>)"
assert q[1].error == "blarg"
assert errs[1] == "blarg"
q = qp.parse(u("hello date:20055x10"))
assert q.__unicode__() == "(text:hello AND <_NullQuery>)"
assert q[1].error == "20055x10"
assert errs[2] == "20055x10"
q = qp.parse(u("hello date:'2005 19 32'"))
assert q.__unicode__() == "(text:hello AND <_NullQuery>)"
assert q[1].error == "2005 19 32"
assert errs[3] == "2005 19 32"
q = qp.parse(u("date:'march 24 to dec 12'"))
assert q.__class__ == query.DateRange
assert q.startdate == adatetime(2010, 3, 24).floor()
assert q.enddate == adatetime(2010, 12, 12).ceil()
q = qp.parse(u("date:('30 june' OR '10 july') quick"))
assert q.__class__ == query.And
assert len(q) == 2
assert q[0].__class__ == query.Or
assert q[0][0].__class__ == query.DateRange
assert q[0][1].__class__ == query.DateRange
def test_date_range():
schema = fields.Schema(text=fields.TEXT, date=fields.DATETIME)
qp = qparser.QueryParser("text", schema)
basedate = datetime(2010, 9, 20, 15, 16, 6, 454000)
qp.add_plugin(dateparse.DateParserPlugin(basedate))
q = qp.parse(u("date:['30 march' to 'next wednesday']"))
assert q.__class__ == query.DateRange
assert q.startdate == adatetime(2010, 3, 30).floor()
assert q.enddate == adatetime(2010, 9, 22).ceil()
q = qp.parse(u("date:[to 'next wednesday']"))
assert q.__class__ == query.DateRange
assert q.startdate is None
assert q.enddate == adatetime(2010, 9, 22).ceil()
q = qp.parse(u("date:['30 march' to]"))
assert q.__class__ == query.DateRange
assert q.startdate == adatetime(2010, 3, 30).floor()
assert q.enddate is None
q = qp.parse(u("date:[30 march to next wednesday]"))
assert q.__class__ == query.DateRange
assert q.startdate == adatetime(2010, 3, 30).floor()
assert q.enddate == adatetime(2010, 9, 22).ceil()
q = qp.parse(u("date:[to next wednesday]"))
assert q.__class__ == query.DateRange
assert q.startdate is None
assert q.enddate == adatetime(2010, 9, 22).ceil()
q = qp.parse(u("date:[30 march to]"))
assert q.__class__ == query.DateRange
assert q.startdate == adatetime(2010, 3, 30).floor()
assert q.enddate is None
def test_daterange_multi():
schema = fields.Schema(text=fields.TEXT, start=fields.DATETIME,
end=fields.DATETIME)
qp = qparser.QueryParser("text", schema)
basedate = datetime(2010, 9, 20, 15, 16, 6, 454000)
qp.add_plugin(dateparse.DateParserPlugin(basedate))
q = qp.parse("start:[2008 to] AND end:[2011 to 2011]")
assert q.__class__ == query.And
assert q[0].__class__ == query.DateRange
assert q[1].__class__ == query.DateRange
assert q[0].startdate == adatetime(2008).floor()
assert q[0].enddate is None
assert q[1].startdate == adatetime(2011).floor()
assert q[1].enddate == adatetime(2011).ceil()
def test_daterange_empty_field():
schema = fields.Schema(test=fields.DATETIME)
ix = RamStorage().create_index(schema)
writer = ix.writer()
writer.add_document(test=None)
writer.commit()
with ix.searcher() as s:
q = query.DateRange("test", datetime.fromtimestamp(0),
datetime.today())
r = s.search(q)
assert len(r) == 0
def test_free_dates():
a = analysis.StandardAnalyzer(stoplist=None)
schema = fields.Schema(text=fields.TEXT(analyzer=a), date=fields.DATETIME)
qp = qparser.QueryParser("text", schema)
basedate = datetime(2010, 9, 20, 15, 16, 6, 454000)
qp.add_plugin(dateparse.DateParserPlugin(basedate, free=True))
q = qp.parse(u("hello date:last tuesday"))
assert q.__class__ == query.And
assert len(q) == 2
assert q[0].__class__ == query.Term
assert q[0].text == "hello"
assert q[1].__class__ == query.DateRange
assert q[1].startdate == adatetime(2010, 9, 14).floor()
assert q[1].enddate == adatetime(2010, 9, 14).ceil()
q = qp.parse(u("date:mar 29 1972 hello"))
assert q.__class__ == query.And
assert len(q) == 2
assert q[0].__class__ == query.DateRange
assert q[0].startdate == adatetime(1972, 3, 29).floor()
assert q[0].enddate == adatetime(1972, 3, 29).ceil()
assert q[1].__class__ == query.Term
assert q[1].text == "hello"
q = qp.parse(u("date:2005 march 2"))
assert q.__class__ == query.DateRange
assert q.startdate == adatetime(2005, 3, 2).floor()
assert q.enddate == adatetime(2005, 3, 2).ceil()
q = qp.parse(u("date:'2005' march 2"))
assert q.__class__ == query.And
assert len(q) == 3
assert q[0].__class__ == query.DateRange
assert q[0].startdate == adatetime(2005).floor()
assert q[0].enddate == adatetime(2005).ceil()
assert q[1].__class__ == query.Term
assert q[1].fieldname == "text"
assert q[1].text == "march"
q = qp.parse(u("date:march 24 to dec 12"))
assert q.__class__ == query.DateRange
assert q.startdate == adatetime(2010, 3, 24).floor()
assert q.enddate == adatetime(2010, 12, 12).ceil()
q = qp.parse(u("date:5:10pm"))
assert q.__class__ == query.DateRange
assert q.startdate == adatetime(2010, 9, 20, 17, 10).floor()
assert q.enddate == adatetime(2010, 9, 20, 17, 10).ceil()
q = qp.parse(u("(date:30 june OR date:10 july) quick"))
assert q.__class__ == query.And
assert len(q) == 2
assert q[0].__class__ == query.Or
assert q[0][0].__class__ == query.DateRange
assert q[0][1].__class__ == query.DateRange
def test_prefix_plugin():
schema = fields.Schema(id=fields.ID, text=fields.TEXT)
ix = RamStorage().create_index(schema)
w = ix.writer()
w.add_document(id=u("1"), text=u("alfa"))
w.add_document(id=u("2"), text=u("bravo"))
w.add_document(id=u("3"), text=u("buono"))
w.commit()
with ix.searcher() as s:
qp = qparser.QueryParser("text", schema)
qp.remove_plugin_class(plugins.WildcardPlugin)
qp.add_plugin(plugins.PrefixPlugin)
q = qp.parse(u("b*"))
r = s.search(q, limit=None)
assert len(r) == 2
q = qp.parse(u("br*"))
r = s.search(q, limit=None)
assert len(r) == 1
def test_custom_tokens():
qp = qparser.QueryParser("text", None)
qp.remove_plugin_class(plugins.OperatorsPlugin)
cp = plugins.OperatorsPlugin(And="&", Or="\\|", AndNot="&!", AndMaybe="&~",
Not="-")
qp.add_plugin(cp)
q = qp.parse("this | that")
assert q.__class__ == query.Or
assert q[0].__class__ == query.Term
assert q[0].text == "this"
assert q[1].__class__ == query.Term
assert q[1].text == "that"
q = qp.parse("this&!that")
assert q.__class__ == query.AndNot
assert q.a.__class__ == query.Term
assert q.a.text == "this"
assert q.b.__class__ == query.Term
assert q.b.text == "that"
q = qp.parse("alfa -bravo NOT charlie")
assert len(q) == 4
assert q[1].__class__ == query.Not
assert q[1].query.text == "bravo"
assert q[2].text == "NOT"
def test_copyfield():
qp = qparser.QueryParser("a", None)
qp.add_plugin(plugins.CopyFieldPlugin({"b": "c"}, None))
assert text_type(qp.parse("hello b:matt")) == "(a:hello AND b:matt AND c:matt)"
qp = qparser.QueryParser("a", None)
qp.add_plugin(plugins.CopyFieldPlugin({"b": "c"}, syntax.AndMaybeGroup))
assert text_type(qp.parse("hello b:matt")) == "(a:hello AND (b:matt ANDMAYBE c:matt))"
qp = qparser.QueryParser("a", None)
qp.add_plugin(plugins.CopyFieldPlugin({"b": "c"}, syntax.RequireGroup))
assert text_type(qp.parse("hello (there OR b:matt)")) == "(a:hello AND (a:there OR (b:matt REQUIRE c:matt)))"
qp = qparser.QueryParser("a", None)
qp.add_plugin(plugins.CopyFieldPlugin({"a": "c"}, syntax.OrGroup))
assert text_type(qp.parse("hello there")) == "((a:hello OR c:hello) AND (a:there OR c:there))"
qp = qparser.QueryParser("a", None)
qp.add_plugin(plugins.CopyFieldPlugin({"b": "c"}, mirror=True))
assert text_type(qp.parse("hello c:matt")) == "(a:hello AND (c:matt OR b:matt))"
qp = qparser.QueryParser("a", None)
qp.add_plugin(plugins.CopyFieldPlugin({"c": "a"}, mirror=True))
assert text_type(qp.parse("hello c:matt")) == "((a:hello OR c:hello) AND (c:matt OR a:matt))"
ana = analysis.RegexAnalyzer(r"\w+") | analysis.DoubleMetaphoneFilter()
fmt = formats.Frequency()
schema = fields.Schema(name=fields.KEYWORD,
name_phone=fields.FieldType(fmt, ana,
multitoken_query="or"))
qp = qparser.QueryParser("name", schema)
qp.add_plugin(plugins.CopyFieldPlugin({"name": "name_phone"}))
assert text_type(qp.parse(u("spruce view"))) == "((name:spruce OR name_phone:SPRS) AND (name:view OR name_phone:F OR name_phone:FF))"
def test_gtlt():
schema = fields.Schema(a=fields.KEYWORD, b=fields.NUMERIC,
c=fields.KEYWORD,
d=fields.NUMERIC(float), e=fields.DATETIME)
qp = qparser.QueryParser("a", schema)
qp.add_plugin(plugins.GtLtPlugin())
qp.add_plugin(dateparse.DateParserPlugin())
q = qp.parse(u("a:hello b:>100 c:<=z there"))
assert q.__class__ == query.And
assert len(q) == 4
assert q[0] == query.Term("a", "hello")
assert q[1] == query.NumericRange("b", 100, None, startexcl=True)
assert q[2] == query.TermRange("c", None, 'z')
assert q[3] == query.Term("a", "there")
q = qp.parse(u("hello e:>'29 mar 2001' there"))
assert q.__class__ == query.And
assert len(q) == 3
assert q[0] == query.Term("a", "hello")
# As of this writing, date ranges don't support startexcl/endexcl
assert q[1] == query.DateRange("e", datetime(2001, 3, 29, 0, 0), None)
assert q[2] == query.Term("a", "there")
q = qp.parse(u("a:> alfa c:<= bravo"))
assert text_type(q) == "(a:a: AND a:alfa AND a:c: AND a:bravo)"
qp.remove_plugin_class(plugins.FieldsPlugin)
qp.remove_plugin_class(plugins.RangePlugin)
q = qp.parse(u("hello a:>500 there"))
assert text_type(q) == "(a:hello AND a:a: AND a:500 AND a:there)"
def test_regex():
schema = fields.Schema(a=fields.KEYWORD, b=fields.TEXT)
qp = qparser.QueryParser("a", schema)
qp.add_plugin(plugins.RegexPlugin())
q = qp.parse(u("a:foo-bar b:foo-bar"))
assert q.__unicode__() == '(a:foo-bar AND b:foo AND b:bar)'
q = qp.parse(u('a:r"foo-bar" b:r"foo-bar"'))
assert q.__unicode__() == '(a:r"foo-bar" AND b:r"foo-bar")'
def test_pseudofield():
schema = fields.Schema(a=fields.KEYWORD, b=fields.TEXT)
def regex_maker(node):
if node.has_text:
node = | |
<reponame>wuli133144/video_parse
"""
mp4 support parse maintain tools
"""
import os
import sys
import math
from Mp4.stream_file import *
from Mp4.ftypbox import *
from Mp4.define import *
from Mp4.freebox import *
from Mp4.mdatbox import *
from Mp4.moovbox import *
from Mp4.mvhd_box import *
from Mp4.track import *
from Mp4.tkhdbox import *
from Mp4.edtsbox import *
from Mp4.DataBox import *
from Mp4.elstbox import *
import binascii
#CURRENT_DIR=os.path.dirname()
VERSION="1.0.0"
class mp4_parse(stream_file):
def __init__(self,filepath):
self.m_boxs =[]
self.m_data =None
self.isstart =0
super().__init__(filepath)
def parse_ftyp_box(self):
#ftypbox_ojb=ftypbox()
size=int(binascii.b2a_hex(self.readbytes(4)),16)
data=self.readbytes(4)
ty ="%c%c%c%c" %(data[0],data[1],data[2],data[3])
print(ty)
print(size)
if size ==0:
print("mp4 file format error")
return
ftypbox_ojb = ftypbox(size,ty)
major_branch=self.readbytes(4)
major_branch="%c%c%c%c" %(major_branch[0],major_branch[1],major_branch[2],major_branch[3])
minor_branch=int(binascii.b2a_hex(self.readbytes(4)), 16)
compatible_branch=self.readbytes(4)
compatible_branch = "%c%c%c%c" % (compatible_branch[0], compatible_branch[1], compatible_branch[2], compatible_branch[3])
ftypbox_ojb.set_major_branch(major_branch)
ftypbox_ojb.set_minor_version(minor_branch)
ftypbox_ojb.set_compatible_brand(compatible_branch)
print(ftypbox_ojb)
self.m_boxs.append(ftypbox_ojb)
self.isstart=1
self.skip(8)
self.parse_free_box()
self.parse_mdat_box()
pass
def parse_free_box(self):
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
ty = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
fbox=freebox(size,ty)
#fbox.container.append([])
self.m_boxs.append(fbox)
def parse_mdat_box(self):
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
ty = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("##########parse_mdat_box start ##########")
data_container=self.readbytes(size-BOX_HEADER_SIZE)
print("##########parse_mdat_box end ##########")
mdabox=mdatbox(size,ty)
mdabox.setdata(data_container)
self.m_boxs.append(mdabox)
def parse_moov_box(self):
if self.isstart ==0:
print("please parse ftyp box firstly otherwise it's error")
exit( -1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
ty = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print(ty)
print(size)
mobox=moovbox(size,ty)
###########
#todo
############
#moovbox.add()
self.parse_mvhd_box(mobox)
self.parse_track(mobox)
#######
#self.parse_track(mobox)
def parse_mvhd_box(self,movbox):
if isinstance(movbox,moovbox) is False:
print("please make sure moovbox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse mvlb############")
print(size,type)
print("###########parse mvlb end############")
data_con=self.readbytes(size-BOX_HEADER_SIZE)
mvhd=mvhdbox(size,type)
mvhd.setdata(data_con)
movbox.add(mvhd)
def parse_track(self,movbox):
if isinstance(movbox,moovbox) is False:
print("please make sure moovbox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse parse_track############")
print(size,type)
print("###########parse parse_track end############")
trabox=trackbox(size,type)
###parse tkhd box
self.parse_tkhdbox(trabox)
self.parse_edtsbox(trabox)
self.parse_mdia(trabox)
movbox.add(trabox)
#data_con=self.readbytes(size-BOX_HEADER_SIZE)
#mvhd=mvhdbox(size,type)
#mvhd.setdata(data_con)
#movbox.add(mvhd)
def parse_mdia(self,trck):
if isinstance(trck,trackbox) is False:
print("please make sure trackbox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size,type)
print("###########parse_mdia end############")
dbox=databox(size,type)
self.parse_mdhd(dbox) ##dbox is mdia
self.parse_hdlr(dbox)
self.parse_minf(dbox)
def parse_minf(self,dbox):
if isinstance(dbox, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size, type)
print("###########parse_mdia end############")
dbox_minf = databox(size, type)
dbox.add(dbox_minf)
self.parse_vmhd(dbox_minf)
self.parse_dinf(dbox_minf)
self.parse_stbl(dbox_minf)
def parse_vmhd(self,dbox_minf):
if isinstance(dbox_minf,databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size,type)
print("###########parse_mdia end############")
dbox_vmhd=databox(size,type)
data=self.readbytes(size-BOX_HEADER_SIZE)
dbox_vmhd.setdata(data)
dbox_minf.add(dbox_vmhd)
pass
def parse_dinf(self,dbox_minf):
if isinstance(dbox_minf, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size, type)
print("###########parse_mdia end############")
dbox_minf = databox(size, type)
dbox_minf.add(dbox_minf)
self.parse_dref(dbox_minf)
self.parse_url(dbox_minf)
pass
def parse_url(self,dbox_minf):
if isinstance(dbox_minf, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size, type)
print("###########parse_mdia end############")
dbox_hdlr = databox(size, type)
data = self.readbytes(size - BOX_HEADER_SIZE)
dbox_hdlr.setdata(data)
dbox_minf.add(dbox_hdlr)
def parse_dref(self,dbox_minf):
if isinstance(dbox_minf, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size, type)
print("###########parse_mdia end############")
dbox_hdlr = databox(size, type)
# data = self.readbytes(size - BOX_HEADER_SIZE)
# dbox_hdlr.setdata(data)
dbox_minf.add(dbox_hdlr)
pass
def parse_stbl(self,dbox_minf):
if isinstance(dbox_minf, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size, type)
print("###########parse_mdia end############")
dbox_minf = databox(size, type)
dbox_minf.add(dbox_minf)
self.parse_stsd(dbox_minf)
self.parse_stts(dbox_minf)
self.parse_stss(dbox_minf)
self.parse_stsc(dbox_minf)
self.parse_stsz(dbox_minf)
self.parse_stco(dbox_minf)
def parse_stts(self,dbox):
if isinstance(dbox, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size, type)
print("###########parse_mdia end############")
dbox_minf = databox(size, type)
dbox.add(dbox_minf)
pass
def parse_stss(self,dbox):
if isinstance(dbox, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size, type)
print("###########parse_mdia end############")
dbox_minf = databox(size, type)
dbox.add(dbox_minf)
pass
def parse_stsc(self,dbox):
if isinstance(dbox, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size, type)
print("###########parse_mdia end############")
dbox_minf = databox(size, type)
dbox.add(dbox_minf)
pass
def parse_stsz(self,dbox):
if isinstance(dbox, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size, type)
print("###########parse_mdia end############")
dbox_minf = databox(size, type)
dbox.add(dbox_minf)
pass
def parse_stco(self,dbox):
if isinstance(dbox, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size, type)
print("###########parse_mdia end############")
dbox_minf = databox(size, type)
dbox.add(dbox_minf)
pass
def parse_stsd(self,dbox):
if isinstance(dbox, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mdia############")
print(size, type)
print("###########parse_mdia end############")
dbox_minf = databox(size, type)
dbox.add(dbox_minf)
self.parse_mp4v(dbox_minf)
def parse_mp4v(self,dbox):
if isinstance(dbox, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_mp4v############")
print(size, type)
print("###########parse_mp4v end############")
dbox_minf = databox(size, type)
dbox.add(dbox_minf)
self.parse_esds(dbox_minf)
self.parse_pasp(dbox_minf)
pass
def parse_esds(self,dbox):
if isinstance(dbox, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_esds############")
print(size, type)
print("###########parse_esds end############")
dbox_minf = databox(size, type)
dbox.add(dbox_minf)
pass
def parse_pasp(self,dbox):
if isinstance(dbox, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's error")
exit(-1)
return
# ftypbox_ojb=ftypbox()
size = int(binascii.b2a_hex(self.readbytes(4)), 16)
data = self.readbytes(4)
type = "%c%c%c%c" % (data[0], data[1], data[2], data[3])
print("###########parse_pasp############")
print(size, type)
print("###########parse_pasp end############")
dbox_minf = databox(size, type)
dbox.add(dbox_minf)
pass
def parse_hdlr(self,dbox):
if isinstance(dbox, databox) is False:
print("please make sure databox type ")
return
if self.isstart == 0:
print("please parse ftyp box firstly otherwise it's | |
import numpy as np
import os
def parse_automesh(file):
if os.path.exists(file):
lines = open(file, 'r').readlines()
return lines
def parse_sfo(filename, verbose=False):
"""
Master parser for the SFO file.
Returns the output dict.
"""
groups = parse_sfo_into_groups(filename)
d = {
'wall_segments':[],
'other':{}
}
segments = []
for g in groups:
dat = process_group(g, verbose=verbose)
type = dat['type']
if type == 'wall_segment':
d['wall_segments'].append(dat)
elif type in ['summary','BeamEnergy']:
d[type] = dat
elif type == 'header':
d['header'] = parse_header_lines(dat['lines'])
else:
d['other'][type] = dat
# update Kinetic energy in 'summary' with the right value
if 'summary' in d and 'BeamEnergy' in d:
d['summary']['data']['kinetic_energy'] = d['BeamEnergy']['data']['BeamEnergy']/1e6
d['summary']['units']['kinetic_energy'] = 'MeV'
return d
def parse_sfo_into_groups(filename):
"""
Parses SFO file into groups according to separator that starts with:
'-------------------'
Returns a list of dicts, with:
raw_type: the first line
lines: list of lines
"""
with open(filename, 'r') as f:
lines = f.readlines()
groups = []
sep = '-------------------'
g = {'raw_type':'header', 'lines':[]}
groups = [g]
new_group = False
for line in lines:
line = line.strip()
# Skip empty lines
if not line:
continue
# Look for new group
if line.startswith(sep):
new_group = True
continue
# Check for new group
if new_group:
gname = line
new_group = False
g = {'raw_type': gname, 'lines':[]}
groups.append(g)
continue
# regular line
g['lines'].append(line)
return groups
def process_group(group, verbose=False):
"""
processes a single output group dict into usable data.
"""
rtype = group['raw_type']
lines = group['lines']
d = {}
if rtype.startswith('All calculated values below refer to the mesh geometry only'):
d['type'] = 'summary'
d['data'], d['units'] = parse_sfo_summary_group(lines)
elif rtype.startswith('Power and fields on wall segment') or rtype.startswith('Fields on segment'):
d['type'] = 'wall_segment'
line1 = rtype # This should be parsed fully
d.update(parse_sfo_segment([line1]+lines))
elif rtype.startswith('The field normalization factor ASCALE for this problem is based'):
d['type'] = 'BeamEnergy'
d['data'], d['units'] = parse_sfo_beam_energy(lines)
else:
# No parser yet:
if verbose:
print('No parser for:', rtype)
d['type'] = rtype
d['lines'] = lines
return d
#_________________________________
# T7 files
def parse_fish_t7(t7file, geometry='cylindrical'):
"""
Parses a T7 file. The T7 header should have:
xmin(cm), xmax(cm), nx-1
freq(MHz)
ymin(cm), ymax(cm), ny-1
4 columns of data: Ez, Er, E, Hphi
TODO: Poisson problems, detect rectangular or cylindrical coordinates
Returns a dict with:
rmin
rmax
nr
zmin
zmax
nz
freq: frequency in MHz
data: 2D array of shape (nr, nz)
"""
# Read header
# xmin(cm), xmax(cm), nx-1
# freq(MHz)
# ymin(cm), ymax(cm), ny-1
with open(t7file, 'r') as f:
line1 = f.readline().split()
freq_MHz = float(f.readline())
line3 = f.readline().split()
# Form output dict
d = {}
d['geometry'] = geometry
d['problem'] = 'fish'
d['zmin'], d['zmax'], d['nz'] = float(line1[0]), float(line1[1]), int(line1[2])+1
d['freq'] = freq_MHz
d['rmin'], d['rmax'], d['nr'] = float(line3[0]), float(line3[1]), int(line3[2])+1
# These should be the labels
labels=['Ez', 'Er', 'E', 'Hphi']
# Read and reshape
dat4 = np.loadtxt(t7file, skiprows=3)
ncol = len(labels)
dat4 = dat4.reshape(d['nr'], d['nz'], ncol)
for i, label in enumerate(labels):
d[label] = dat4[:,:,i]
return d
def parse_poisson_t7(t7file, type='electric', geometry='cylindrical'):
"""
Parses a T7 file. The T7 header should have:
xmin(cm), xmax(cm), nx-1
ymin(cm), ymax(cm), ny-1
For type=='electric':
2 columns of data: Er, Ez
Units are in V/cm
For type=='magnetic':
2 columns of data: Br, Bz
Units are G
Returns a dict with:
rmin
rmax
nr
ymin
ymax
ny
data: 2D array of shape (nx, ny)
"""
assert geometry == 'cylindrical', 'TODO: other geometries'
if type == 'electric':
labels = 'Er', 'Ez'
elif type == 'magnetic':
labels = 'Br', 'Bz'
# Read header
# xmin(cm), xmax(cm), nx-1 # r in cylindrical geometry
# ymin(cm), ymax(cm), ny-1 # z in cylindrical geometry
with open(t7file, 'r') as f:
xline = f.readline().split()
yline = f.readline().split()
# Form output dict
d = {}
d['geometry'] = geometry
d['problem'] = 'poisson'
d['rmin'], d['rmax'], d['nr'] = float(xline[0]), float(xline[1]), int(xline[2])+1
d['zmin'], d['zmax'], d['nz'] = float(yline[0]), float(yline[1]), int(yline[2])+1
# Read and reshape
dat4 = np.loadtxt(t7file, skiprows=2)
ncol = len(labels)
dat4 = dat4.reshape(d['nz'], d['nr'], ncol)
for i, label in enumerate(labels):
d[label] = dat4[:,:,i].T
return d
#_________________________________
#_________________________________
# Individual parsers
#_________________________________
# Header
def parse_header_variable(line):
"""
Parses a line that follows:
Variable Code Value Description
Returns:
key, value, description, in_automesh
"""
x = line.split()
key = x[0]
if x[1] == 'A':
in_automesh = True
s = x[2]
d = x[3:]
else:
in_automesh = False
s = x[1]
d = x[2:]
descrip = ' '.join(d)
try:
val = int(s)
except ValueError:
val = float(s)
return key, val, descrip, in_automesh
def parse_header_lines(lines):
"""
Parses the header lines
"""
header = 'Variable Code Value Description'
d = {}
description = {}
in_automesh = {}
comments = []
in_header=False
for line in lines:
if line == header:
in_header = True
continue
if not in_header:
comments.append(line)
continue
key, val, descrip, in_am = parse_header_variable(line)
d[key] = val
description[key] = descrip
in_automesh[key] = in_am
return {'variable':d, 'description':description, 'in_automesh':in_automesh, 'comments':'\n'.join(comments)}
#_________________________________
# Wall segments
def parse_wall_segment_line1(line):
"""
helper parse_sfo_segment
"""
d = {}
ix, x = line.split('segment')[1].split(' K,L =')
d['segment_number'] = int(float((ix)))
kl0, kl1 = x.split('to')
k0, l0 = kl0.split(',')
d['K_beg'], d['L_beg'] = int(k0), int(l0)
k1, l1 = kl1.split(',')
d['K_end'], d['L_end'] = int(k1), int(l1)
return d
def parse_sfo_segment(lines):
"""
Parses lines that start with:
'Power and fields on wall segment'
"""
# key = value lines
info = parse_wall_segment_line1(lines[0])
inside = False
fields = None
units = None
for L in lines[1:]:
L = L.strip()
# Look for key=value
if '=' in L:
key, val = L.split('=')
info[key.strip()] = val
continue
if L.startswith("K L"):
nskip = 2
fields = {name.strip('|'):[] for name in L.split()[nskip:]}
continue
elif L.startswith( "m K L"):
nskip = 3
fields = {name.strip('|'):[] for name in L.split()[nskip:]}
continue
if not fields:
continue
# Look for units
if fields and not units:
unit_labels = L.split()
assert len(unit_labels) == len(fields), print(unit_labels)
# make dict
units = dict(zip(list(fields),unit_labels))
inside = True
continue
# This might come at the end
if L.startswith('Summary'):
inside = False
# Must be inside. Add data
if inside:
x = [float(y) for y in L.split()]
# Special care if there are blanks for the skip columns
if len(x) == len(fields) + nskip:
x = x[nskip:]
for i, name in enumerate(fields):
fields[name].append(x[i])
# Exiting
for k, v in fields.items():
fields[k] = np.array(v)
return {'wall':fields, 'info':info, 'units':units}
#_________________________________
# Summary
def parse_sfo_beam_energy(lines):
d_vals = {}
d_units = {}
for line in lines:
line = line.strip()
if line.startswith('V0'):
line = line.split('=')[-1]
line = line.strip()
line = line.split(' ')
data = line[0]
data = float(data)
unit = line[1]
d_vals['BeamEnergy'] = data
d_units['BeamEnergy'] = unit
return d_vals, d_units
def parse_sfo_summary_group(lines):
"""
"""
d_vals = {}
d_units = {}
for line in lines:
if line == "":
break
else:
d_val, d_unit = parse_sfo_summary_group_line(line)
d_vals.update(d_val)
d_units.update(d_unit)
return d_vals, d_units
def parse_simple_summary_line(line):
# deal with simple line with one key and one value
d_val = {}
d_unit = {}
line = line.split("=")
if len(line) == 1:
return d_val, d_unit
key = line[0].strip()
val = line[-1]
val = val.strip()
val = val.split(" ")
d_val[key] = float(val[0])
if len(val) > 1:
d_unit[key] = val[1]
else:
d_unit[key] = ""
return d_val, d_unit
def parse_sfo_summary_group_line(line):
d_val = {}
d_unit = {}
if line.startswith('Field normalization'):
line = line.split("=")
val = line[-1]
val = val.strip()
val = val.split(" ")
d_val['Enorm'] = float(val[0])
d_unit['Enorm'] = val[1]
return d_val, d_unit
# 'for the integration path from point Z1,R1 = 50.50000 cm, 0.00000 cm',
if line.startswith('for the integration path'):
line = line.split("=")
val = line[-1]
val = val.strip()
val = val.split(",")
v1 = val[0].split(" ")
v1 = v1[0]
v2 = val[1].strip()
v2 = v2.split(" ")
v2 = v2[0]
d_val['integration_Z1'] = float(v1)
d_unit['integration_Z1'] = "cm"
d_val['integration_R1'] = float(v2)
d_unit['integration_R1'] = "cm"
return d_val, d_unit
# 'to ending point | |
'spiderwebs',
'spiel',
'spiffy',
'spikan',
'spikanor',
'spike',
'spikes',
'spiko',
'spill',
'spilled',
'spilling',
'spills',
'spin',
'spin-out',
'spin-to-win',
'spinach',
'spines',
'spinner',
'spinning',
'spins',
'spiral',
'spirit',
'spirited',
'spiriting',
'spirits',
'spit',
'spiteful',
'spits',
'spittake',
'splash',
'splashed',
'splasher',
'splashers',
'splashes',
'splashing',
'splashy',
'splat',
'splatoon',
'splatter',
'splatters',
'splendid',
'splice',
'spliced',
'splices',
'splicing',
'splinter',
'splinters',
'splish',
'splish-splash',
'split',
'splitting',
'splurge',
'spoiled',
'spoiler',
'spoke',
'spoken',
'spondee',
'sponge',
'spongy',
'sponsor',
'sponsored',
'sponsoring',
'sponsors',
'spook',
'spooks',
'spooky',
'spookyrandi',
'spoon',
'spoons',
'sport',
'sported',
'sporting',
'sportive',
'sports',
'spot',
"spot's",
'spotcheek',
'spotify',
'spotless',
'spotlight',
'spots',
'spotted',
'spotting',
'spotz',
'spout',
'spouts',
'spray',
'sprays',
'spree',
'sprightly',
'spring',
'springer',
'springers',
'springing',
'springs',
'springtime',
'springy',
'sprinkle',
'sprinkled',
'sprinkler',
'sprinkles',
'sprinkling',
'sprint',
'sprinting',
'sprite',
'sprites',
'sprocket',
'sprockets',
'sprouse',
'sprout',
'sprouter',
'spruce',
'spud',
'spuds',
'spunkiness',
'spunky',
'spy',
'spyp.o.d.',
'spypod',
'spyro',
'sqad364',
'squad',
'squall',
"squall's",
'squalls',
'square',
'squared',
'squarely',
'squares',
'squaring',
'squash',
'squashed',
'squashing',
'squawk',
'squawks',
'squeak',
'squeaker',
'squeakers',
'squeakity',
'squeaky',
'squeal',
'squeeks',
'squeeze',
'squeezebox',
'squeezed',
'squeezing',
'squid',
"squid's",
'squids',
'squidzoid',
'squiggle',
'squigglebee',
'squiggleberry',
'squiggleblabber',
'squigglebocker',
'squiggleboing',
'squiggleboom',
'squigglebounce',
'squigglebouncer',
'squigglebrains',
'squigglebubble',
'squigglebumble',
'squigglebump',
'squigglebumper',
'squiggleburger',
'squigglechomp',
'squigglecorn',
'squigglecrash',
'squigglecrumbs',
'squigglecrump',
'squigglecrunch',
'squiggledoodle',
'squiggledorf',
'squiggleface',
'squigglefidget',
'squigglefink',
'squigglefish',
'squiggleflap',
'squiggleflapper',
'squiggleflinger',
'squiggleflip',
'squiggleflipper',
'squigglefoot',
'squigglefuddy',
'squigglefussen',
'squigglegadget',
'squigglegargle',
'squigglegloop',
'squiggleglop',
'squigglegoober',
'squigglegoose',
'squigglegrooven',
'squigglehoffer',
'squigglehopper',
'squigglejinks',
'squiggleklunk',
'squiggleknees',
'squigglemarble',
'squigglemash',
'squigglemonkey',
'squigglemooch',
'squigglemouth',
'squigglemuddle',
'squigglemuffin',
'squigglemush',
'squigglenerd',
'squigglenoodle',
'squigglenose',
'squigglenugget',
'squigglephew',
'squigglephooey',
'squigglepocket',
'squigglepoof',
'squigglepop',
'squigglepounce',
'squigglepow',
'squigglepretzel',
'squigglequack',
'squiggleroni',
'squigglescooter',
'squigglescreech',
'squigglesmirk',
'squigglesnooker',
'squigglesnoop',
'squigglesnout',
'squigglesocks',
'squigglespeed',
'squigglespinner',
'squigglesplat',
'squigglesprinkles',
'squigglesticks',
'squigglestink',
'squiggleswirl',
'squiggleteeth',
'squigglethud',
'squiggletoes',
'squiggleton',
'squiggletoon',
'squiggletooth',
'squiggletwist',
'squigglewhatsit',
'squigglewhip',
'squigglewig',
'squigglewoof',
'squigglezaner',
'squigglezap',
'squigglezapper',
'squigglezilla',
'squigglezoom',
'squiggly',
'squillace',
'squire',
'squirmy',
'squirrel',
'squirrelfish',
'squirrels',
'squirt',
'squirt gun',
'squirting',
'squirtle',
'squirtless',
'squishy',
'srawhats',
'sri',
'srry',
'sry',
'ssw',
'st',
'st.',
'stabber',
'stack',
'stackable',
'stacker',
'stacking',
'stacks',
'stadium',
'stadiums',
'staff',
"staff's",
'staffed',
'staffer',
'staffers',
'staffing',
'staffs',
'stage',
'staged',
'stager',
'stagers',
'stages',
'staging',
'staid',
'stain',
'stained-glass',
'stainless',
'stains',
'stair',
"stair's",
'stairs',
'stake',
'stalkers',
'stall',
'stallion',
'stamp',
'stamped',
'stamper',
'stampers',
'stamping',
'stamps',
'stan',
'stanchion',
'stanchions',
'stand',
'stand-up',
'stand-up-and-cheer',
'standard',
'standardly',
'standards',
'stander',
'standing',
'standings',
'stands',
'stanley',
"stanley's",
'star',
"star's",
'star-chaser',
'star-shaped',
'starboard',
'starbr',
'starcatchers',
'starch',
'stardom',
'stareaston',
'stared',
'starer',
'starfire',
'starfish',
'stargate',
'stargazer',
'staring',
'starlight',
'starmie',
'starring',
'starry',
'stars',
'starscream',
'start',
'started',
'starter',
'starters',
'starting',
'starting-line',
'starts',
'staryu',
'stas',
'stash',
'stat',
'statefarm',
'statement',
"statement's",
'statements',
'states',
'station',
"station's",
'stationed',
'stationer',
'stationery',
'stationing',
'stations',
'statistic',
'statler',
'stats',
'statuary',
'statue',
'statues',
'statuesque',
'status',
'statuses',
'stay',
'stayed',
'staying',
'stayne',
'stays',
'steadfast',
'steadman',
'steady',
'steak',
'steakhouse',
'steakhouses',
'steal',
'stealer',
'stealing',
'steals',
'stealth',
'steam',
'steamboat',
'steaming',
'steel',
'steelhawk',
'steeple',
'steer',
'steered',
'steerer',
'steering',
'steers',
'steffi',
'stella',
'stem',
'stench',
'stenches',
'stenchy',
'step',
"step's",
'stepanek',
'steph',
'stephante',
'stepped',
'stepping',
'steps',
'sterling',
'stern',
'stetson',
'steve',
'steven',
'stew',
'stewart',
'stflush',
'stick',
'sticker',
'stickerbook',
'stickers',
'sticking',
'sticks',
'sticky',
"sticky's",
'stickyfeet',
'still',
'stilled',
'stiller',
'stillest',
'stilling',
'stillness',
'stills',
'stillwater',
'sting',
'stinger',
'stingers',
'stings',
'stink',
'stinkbucket',
'stinkbugs',
'stinker',
'stinking',
'stinks',
'stinky',
"stinky's",
'stir',
'stitch',
"stitch's",
'stitched',
'stitcher',
'stitches',
'stitching',
'stock',
'stocked',
'stockers',
'stockier',
'stocking',
'stockings',
'stockpile',
'stocks',
'stoke',
'stoked',
'stole',
'stolen',
'stomp',
'stomper',
'stone',
'stones',
'stood',
'stool',
'stools',
'stop',
"stop's",
'stoped',
'stoppable',
'stopped',
'stopper',
'stopping',
'stops',
'storage',
'store',
'stored',
'stores',
'storied',
'stories',
'storing',
'storm',
"storm's",
'storm-sail',
'stormbringers',
'stormed',
'stormers',
'stormfire',
'stormhaw',
'stormhold',
'storming',
'stormlords',
'stormrider',
'storms',
'stormy',
'story',
"story's",
'storybook',
'storybookland',
'storybooks',
'storying',
'storylines',
'storytelling',
'stow',
'stowaway',
'str',
'straggler',
'stragglers',
'straight',
'strait',
'strand',
'strands',
'strange',
'strangely',
'stranger',
'strangers',
'strangest',
'strategies',
'strategists',
'strategy',
"strategy's",
'straw',
'strawberrie',
'strawberries',
'strawberry',
'strawhats',
'strays',
'stream',
'streamer',
'streamers',
'streaming',
'streams',
'street',
'streeters',
'streetlight',
'streetlights',
'streets',
'streetwise',
'strength',
'strengthen',
'strengthens',
'stress',
'stressed',
'stresses',
'stressful',
'stressing',
'stretch',
'stretched',
'stretcher',
'stretchers',
'stretches',
'stretching',
'strict',
'strictly',
'striders',
'strike',
'striker',
'strikers',
'strikes',
'striking',
'string',
'stringbean',
'stringing',
'strings',
'stringy',
'stripe',
'strive',
'stroll',
'strolling',
'strom',
'strong',
'strong-minded',
'strongbox',
'stronger',
'strongest',
'strongly',
'structure',
'struggle',
'struggled',
'struggling',
'strung',
'strut',
'stu',
'stubborn',
'stubby',
'stuck',
'stud',
'studded',
'studied',
'studier',
'studies',
'studio',
"studio's",
'studios',
'study',
'studying',
'stuff',
'stuffed',
'stuffer',
'stuffing',
'stuffings',
'stuffs',
'stuffy',
'stumble',
'stump',
'stumps',
'stumpy',
'stun',
'stunned',
'stunners',
'stunning',
'stuns',
'stunts',
'stupendous',
'sturdy',
'stut',
'stutter',
'stutters',
'style',
'style-talent',
'styled',
'styler',
'stylers',
'styles',
"stylin'",
'styling',
'stylish',
'sub',
'subject',
"subject's",
'subjected',
'subjecting',
'subjective',
'subjects',
'sublocation',
'submarine',
'submarines',
'submit',
'submits',
'submitted',
'submitting',
'subscribe',
'subscribed',
'subscribers',
'subscribing',
'subscription',
'subscriptions',
'substance',
'substitute',
'subtalent',
'subtalents',
'subtitle',
'subtle',
'subzero',
'succeed',
'succeeded',
'succeeder',
'succeeding',
'succeeds',
'success',
'successes',
'successful',
'successfully',
'successive',
'successor',
'succinct',
'succinctly',
'such',
'sucha',
'sucker',
'suckerpunch',
'suction',
'sudan',
'sudden',
'suddenly',
'sudoku',
'sudoron',
'sue',
'suffice',
'suffix',
'suffixes',
'sufrigate',
'sugar',
'sugarplum',
'sugary',
'suggest',
'suggested',
'suggester',
'suggesting',
'suggestion',
"suggestion's",
'suggestions',
'suggestive',
'suggests',
'suit',
"suit's",
'suitcase',
'suitcases',
'suite',
'suited',
'suiters',
'suiting',
'suits',
'sulfur',
'sulley',
'sully',
'sultan',
'sum',
"sum's",
'sumer',
'sumhajee',
'summary',
'summer',
"summer's",
'summered',
'summering',
'summerland',
'summers',
'summit',
'summon',
'summoned',
'summoning',
'summons',
'sumo',
'sums',
'sun',
"sun's",
'sunburst',
'sundae',
'sundaes',
'sunday',
'sundays',
'sundown',
'suneroo',
'sunflower-seed',
'sunflowers',
'sung',
'sunk',
'sunken',
'sunnies',
'sunny',
"sunny's",
'sunrise',
'suns',
'sunsational',
'sunscreen',
'sunset',
'sunsets',
"sunshine's",
'sunshines',
'sunswept',
'suoicodilaipxecitsiligarfilacrepus',
'sup',
'supa-star',
'super',
"super's",
'super-cool',
'super-duper',
'super-powerful',
'super-talented',
'super-thoughtful',
'super-toon',
'superb',
'superbee',
'superberry',
'superblabber',
'superbly',
'superbocker',
'superboing',
'superboom',
'superbounce',
'superbouncer',
'superbrains',
'superbubble',
'superbumble',
'superbump',
'superbumper',
'superburger',
'supercalifragilisticexpialidocious',
'superchomp',
'supercool',
'supercorn',
'supercrash',
'supercrumbs',
'supercrump',
'supercrunch',
'superdoodle',
'superdorf',
'superduper',
'superface',
'superficial',
'superficially',
'superfidget',
'superfink',
'superfish',
'superflap',
'superflapper',
'superflinger',
'superflip',
'superflipper',
'superfluous',
'superfoot',
'superfuddy',
'superfussen',
'supergadget',
'supergargle',
'supergloop',
'superglop',
'supergoober',
'supergoose',
'supergrooven',
'superhero',
"superhero's",
'superheroes',
'superhoffer',
'superhopper',
'superior',
'superjinks',
'superklunk',
'superknees',
'superman',
'supermarble',
'supermash',
'supermonkey',
'supermooch',
'supermouth',
'supermuddle',
'supermuffin',
'supermush',
'supernatural',
'supernerd',
'supernoodle',
'supernose',
'supernugget',
'superphew',
'superphooey',
'superpocket',
'superpoof',
'superpop',
'superpounce',
'superpow',
'superpretzel',
'superquack',
'superroni',
'supers',
'superscooter',
'superscreech',
'superserpents',
'supersmirk',
'supersnooker',
'supersnoop',
'supersnout',
'supersocks',
'superspeed',
'superspinner',
'supersplat',
'supersprinkles',
'superstar',
'supersticks',
'superstink',
'superstition',
'superstitions',
'superswirl',
'superteeth',
'superthud',
'supertoes',
'superton',
'supertoon',
'supertoons',
'supertooth',
'supertwist',
'supervise',
'supervised',
'supervising',
'supervisor',
'supervisors',
'superwhatsit',
'superwhip',
'superwig',
'superwoof',
'superzaner',
'superzap',
'superzapper',
'superzilla',
'superzoom',
'supplement',
'supplication',
'supplied',
'supplier',
'suppliers',
'supplies',
'supply',
"supply's",
'supplying',
'support',
'supported',
'supporter',
'supporters',
'supporting',
'supportive',
'supports',
'suppose',
'supposed',
'supposer',
'supposes',
'supposing',
'supreme',
'supremo',
"supremo's",
'sure',
'sured',
'surely',
'surer',
'surest',
'surf',
"surf's",
'surface',
'surfaced',
'surfacer',
'surfacers',
'surfaces',
'surfacing',
'surfari',
'surfboard',
'surfer',
'surfers',
"surfin'",
'surfing',
'surfs',
'surge',
'surgeon',
'surgeons',
'surges',
'surging',
'surlee',
'surplus',
'surprise',
"surprise's",
'surprised',
'surpriser',
'surprises',
'surprising',
'surprize',
'surrender',
'surrendered',
'surrendering',
'surrenders',
'surround',
'surrounded',
'surrounding',
'surroundings',
'surrounds',
'surves',
'survey',
'surveying',
'survival',
'survive',
'survived',
'surviver',
'survives',
'surviving',
'survivor',
"survivor's",
'survivors',
'susan',
"susan's",
'sushi',
'suspect',
'suspected',
'suspecting',
'suspects',
'suspended',
'suspenders',
'suspense',
'suspicion',
'suspicions',
'suspicious',
'suspiciously',
'svaal',
'svage',
'sven',
'svetlana',
'swab',
'swabbie',
"swabbin'",
'swabby',
'swag',
'swaggy',
'swagyolomoneyhaxmlgpro',
'swain',
'swam',
'swamies',
'swamp',
'swamps',
'swan',
'swanky',
'swann',
"swann's",
'swans',
'swap',
'swapped',
'swapping',
'swaps',
'swarm',
'swarthy',
'swash',
'swashbuckler',
'swashbucklers',
'swashbuckling',
'swashbucler',
'swashbuculer',
'swat',
'swats',
'swatted',
'swatting',
'sweat',
'sweater',
'sweaters',
'sweatheart',
'sweatshirt',
'sweatshirts',
'sweaty',
'swede',
'sweden',
'swedes',
'swedish',
'sweep',
'sweeping',
'sweeps',
'sweepstakes',
'sweet',
'sweeten',
'sweetens',
'sweeter',
'sweetest',
'sweetgum',
'sweetie',
'sweeting',
'sweetly',
'sweetness',
'sweets',
'sweetums',
'sweetwrap',
'sweety',
'swell',
'swelled',
'swelling',
'swellings',
'swells',
'swept',
'swervy',
'swift',
'swiftness',
'swifty',
'swig',
'swim',
'swimer',
'swimmer',
'swimming',
'swimmingly',
'swims',
'swimwear',
'swindler',
'swindlers',
'swine',
'swing',
'swinger',
'swingers',
'swinging',
'swings',
'swipe',
'swiped',
'swipes',
"swipin'",
'swirl',
'swirled',
'swirls',
'swirly',
'swiss',
'switch',
"switch's",
'switchbox',
'switched',
'switcher',
'switcheroo',
'switchers',
'switches',
'switching',
'switchings',
'swiveling',
'swoop',
'sword',
"sword's",
'swordbreakers',
'swords',
'swordslashers',
'swordsman',
'swordsmen',
'sycamore',
'sydney',
'sylveon',
'sylvia',
'symbiote',
'symbol',
'symbols',
'symmetrical',
'symmetry',
'symphonies',
'symphony',
'symposia',
'symposium',
'symposiums',
'sync',
'syncopation',
'syndicate',
'synergise',
'synergised',
'synergises',
'synergising',
'synergized',
'synergizes',
'synergizing',
'synergy',
'synopsis',
'synthesis',
'syrberus',
'syrup',
'syrupy',
'system',
"system's",
'systems',
't-shirt',
't-shirts',
't-squad',
"t-squad's",
't.b.',
't.p.',
'ta',
'tab',
'tabatha',
'tabbed',
'tabby',
'tabitha',
"tabitha's",
'table',
"table's",
'table-setting-talent',
'tabled',
'tables',
'tableset',
'tabling',
'tabs',
'tabulate',
'tack',
'tacked',
'tacking',
'tackle',
'tackled',
'tackles',
'tackling',
'tacks',
'tacky',
'taco',
'tact',
'tactful',
'tactics',
'tad',
'taffy',
'tag',
'tags',
'tailed',
'tailgater',
'tailgaters',
'tailgating',
'tailing',
'tailor',
'tailored',
'tailoring',
'tailors',
'tailpipe',
'tailpipes',
'tails',
'tailswim',
'tainted',
'take',
'taken',
'taker',
'takers',
'takes',
'taketh',
"takin'",
'taking',
'takings',
'takion',
'tale',
"tale's",
'talent',
'talented',
'talents',
'tales',
'talespin',
'talk',
'talkative',
'talked',
'talker',
'talkers',
'talkin',
'talking',
'talks',
'tall',
'tall-tale-telling-talent',
'taller',
'tallest',
'tally',
'talon',
'talons',
'tam',
'tamazoa',
'tamers',
'tammy',
'tampa',
'tan',
'tandemfrost',
'tangaroa',
"tangaroa's",
'tangaroa-ru',
"tangaroa-ru's",
'tangela',
'tangerine',
'tangle',
'tango',
'tangoed',
'tangoing',
'tangos',
'tangy',
'tanith',
'tank',
'tanker',
'tankers',
'tanking',
'tanks',
'tanned',
'tanning',
'tanny',
'tans',
'tansy',
'tap',
"tap's",
'tape',
'taped',
'taper',
'tapers',
'tapes',
'tapestry',
'taping',
'tapings',
'taps',
'tar',
'tara',
'tarantula',
'target',
'targeted',
'targeting',
| |
at (-1, 1, -1, 1),
A vertex at (-1, -1, -1, 1),
A vertex at (-1, -1, -1, -1))
You can use the
:meth:`~sage.geometry.polyhedron.representation.PolyhedronRepresentation.index`
method to enumerate vertices and inequalities::
sage: def get_idx(rep): return rep.index()
sage: [get_idx(_) for _ in face.ambient_Hrepresentation()]
[4]
sage: [get_idx(_) for _ in face.ambient_Vrepresentation()]
[8, 9, 10, 11, 12, 13, 14, 15]
sage: [ ([get_idx(_) for _ in face.ambient_Vrepresentation()],
....: [get_idx(_) for _ in face.ambient_Hrepresentation()])
....: for face in p.faces(3) ]
[([0, 5, 6, 7, 8, 9, 14, 15], [7]),
([1, 4, 5, 6, 10, 13, 14, 15], [6]),
([1, 2, 6, 7, 8, 10, 11, 15], [5]),
([8, 9, 10, 11, 12, 13, 14, 15], [4]),
([0, 3, 4, 5, 9, 12, 13, 14], [3]),
([0, 2, 3, 7, 8, 9, 11, 12], [2]),
([1, 2, 3, 4, 10, 11, 12, 13], [1]),
([0, 1, 2, 3, 4, 5, 6, 7], [0])]
TESTS::
sage: pr = Polyhedron(rays = [[1,0,0],[-1,0,0],[0,1,0]], vertices = [[-1,-1,-1]], lines=[(0,0,1)])
sage: pr.faces(4)
()
sage: pr.faces(3)[0].ambient_V_indices()
(0, 1, 2, 3)
sage: pr.facets()[0].ambient_V_indices()
(0, 1, 2)
sage: pr.faces(1)
()
sage: pr.faces(0)
()
sage: pr.faces(-1)
(A -1-dimensional face of a Polyhedron in QQ^3,)
"""
return tuple(self.face_generator(face_dimension))
def facets(self):
r"""
Return the facets of the polyhedron.
Facets are the maximal nontrivial faces of polyhedra.
The empty face and the polyhedron itself are trivial.
A facet of a `d`-dimensional polyhedron is a face of dimension
`d-1`. For `d \neq 0` the converse is true as well.
OUTPUT:
A tuple of
:class:`~sage.geometry.polyhedron.face.PolyhedronFace`. See
:mod:`~sage.geometry.polyhedron.face` for details. The order
is random but fixed.
.. SEEALSO:: :meth:`facets`
EXAMPLES:
Here we find the eight three-dimensional facets of the
four-dimensional hypercube::
sage: p = polytopes.hypercube(4)
sage: p.facets()
(A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices)
This is the same result as explicitly finding the
three-dimensional faces::
sage: dim = p.dimension()
sage: p.faces(dim-1)
(A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices,
A 3-dimensional face of a Polyhedron in ZZ^4 defined as the convex hull of 8 vertices)
The ``0``-dimensional polyhedron does not have facets::
sage: P = Polyhedron([[0]])
sage: P.facets()
()
"""
if self.dimension() == 0:
return ()
return self.faces(self.dimension()-1)
@cached_method(do_pickle=True)
def f_vector(self):
r"""
Return the f-vector.
OUTPUT:
Returns a vector whose `i`-th entry is the number of
`i-2`-dimensional faces of the polytope.
.. NOTE::
The ``vertices`` as given by :meth:`Polyhedron_base.vertices`
do not need to correspond to `0`-dimensional faces. If a polyhedron
contains `k` lines they correspond to `k`-dimensional faces.
See example below
EXAMPLES::
sage: p = Polyhedron(vertices=[[1, 2, 3], [1, 3, 2],
....: [2, 1, 3], [2, 3, 1], [3, 1, 2], [3, 2, 1], [0, 0, 0]])
sage: p.f_vector()
(1, 7, 12, 7, 1)
sage: polytopes.cyclic_polytope(4,10).f_vector()
(1, 10, 45, 70, 35, 1)
sage: polytopes.hypercube(5).f_vector()
(1, 32, 80, 80, 40, 10, 1)
Polyhedra with lines do not have `0`-faces::
sage: Polyhedron(ieqs=[[1,-1,0,0],[1,1,0,0]]).f_vector()
(1, 0, 0, 2, 1)
However, the method :meth:`Polyhedron_base.vertices` returns
two points that belong to the ``Vrepresentation``::
sage: P = Polyhedron(ieqs=[[1,-1,0],[1,1,0]])
sage: P.vertices()
(A vertex at (1, 0), A vertex at (-1, 0))
sage: P.f_vector()
(1, 0, 2, 1)
TESTS:
Check that :trac:`28828` is fixed::
sage: P.f_vector().is_immutable()
True
The cache of the f-vector is being pickled::
sage: P = polytopes.cube()
sage: P.f_vector()
(1, 8, 12, 6, 1)
sage: Q = loads(dumps(P))
sage: Q.f_vector.is_in_cache()
True
"""
return self.combinatorial_polyhedron().f_vector()
def flag_f_vector(self, *args):
r"""
Return the flag f-vector.
For each `-1 < i_0 < \dots < i_n < d` the flag f-vector
counts the number of flags `F_0 \subset \dots \subset F_n`
with `F_j` of dimension `i_j` for each `0 \leq j \leq n`,
where `d` is the dimension of the polyhedron.
INPUT:
- ``args`` -- integers (optional); specify an entry of the
flag-f-vector; must be an increasing sequence of integers
OUTPUT:
- a dictionary, if no arguments were given
- an Integer, if arguments were given
EXAMPLES:
Obtain the entire flag-f-vector::
sage: P = polytopes.twenty_four_cell()
sage: P.flag_f_vector()
{(-1,): 1,
(0,): 24,
(0, 1): 192,
(0, 1, 2): 576,
(0, 1, 2, 3): 1152,
(0, 1, 3): 576,
(0, 2): 288,
(0, 2, 3): 576,
(0, 3): 144,
(1,): 96,
(1, 2): 288,
(1, 2, 3): 576,
(1, 3): 288,
(2,): 96,
(2, 3): 192,
(3,): 24,
(4,): 1}
Specify an entry::
sage: P.flag_f_vector(0,3)
144
sage: P.flag_f_vector(2)
96
Leading ``-1`` and trailing entry of dimension are allowed::
sage: P.flag_f_vector(-1,0,3)
144
sage: P.flag_f_vector(-1,0,3,4)
144
One can get the number of trivial faces::
sage: P.flag_f_vector(-1)
1
sage: P.flag_f_vector(4)
1
Polyhedra with lines, have ``0`` entries accordingly::
sage: P = (Polyhedron(lines=[[1]]) * polytopes.cross_polytope(3))
sage: P.flag_f_vector()
{(-1,): 1,
(0, 1): 0,
(0, 1, 2): 0,
(0, 1, 3): 0,
(0, 2): 0,
(0, 2, 3): 0,
(0, 3): 0,
(0,): 0,
(1, 2): 24,
(1, 2, 3): 48,
(1, 3): 24,
(1,): 6,
(2, 3): 24,
(2,): 12,
(3,): 8,
4: 1}
If the arguments are not stricly increasing or out of range, a key error is raised::
sage: P.flag_f_vector(-1,0,3,6)
Traceback (most recent call last):
...
KeyError: (0, 3, 6)
sage: P.flag_f_vector(-1,3,0)
Traceback (most recent call last):
...
KeyError: (3, 0)
"""
flag = self._flag_f_vector()
if len(args) == 0:
return flag
elif len(args) == 1:
return flag[(args[0],)]
else:
dim = self.dimension()
if args[0] == -1:
args = args[1:]
if args[-1] == dim:
args = args[:-1]
return flag[tuple(args)]
@cached_method(do_pickle=True)
def _flag_f_vector(self):
r"""
Return the flag-f-vector.
See :meth:`flag_f_vector`.
TESTS::
sage: polytopes.hypercube(4)._flag_f_vector()
{(-1,): 1,
(0,): 16,
(0, 1): 64,
(0, 1, 2): 192,
(0, 1, 2, 3): 384,
(0, 1, 3): 192,
(0, 2): 96,
(0, 2, 3): 192,
(0, 3): 64,
(1,): 32,
(1, 2): 96,
(1, 2, 3): 192,
(1, 3): 96,
(2,): 24,
(2, 3): 48,
(3,): 8,
(4,): 1}
"""
return self.combinatorial_polyhedron()._flag_f_vector()
def vertex_graph(self):
"""
Return a graph in which the vertices correspond to vertices
of the polyhedron, and edges to edges.
..NOTE::
The graph of a polyhedron with lines has no vertices,
as the polyhedron has no vertices (`0`-faces).
The method :meth:`Polyhedron_base:vertices` returns
the defining points in this case.
EXAMPLES::
sage: g3 = polytopes.hypercube(3).vertex_graph(); g3
Graph on 8 vertices
sage: g3.automorphism_group().cardinality()
48
sage: s4 = polytopes.simplex(4).vertex_graph(); s4
Graph on 5 vertices
sage: s4.is_eulerian()
True
The graph of an unbounded polyhedron
is the graph of the bounded complex::
sage: open_triangle = Polyhedron(vertices=[[1,0], [0,1]],
....: rays =[[1,1]])
sage: open_triangle.vertex_graph()
Graph on 2 vertices
The graph of a polyhedron with lines has no vertices::
sage: line = Polyhedron(lines=[[0,1]])
sage: line.vertex_graph()
Graph on 0 vertices
TESTS:
Check for a line segment (:trac:`30545`)::
sage: polytopes.simplex(1).graph().edges()
[(A vertex | |
from random import random
import subprocess
from time import time
from random import randint, random
from tqdm import trange
import matplotlib.pyplot as plt
from math import sqrt, exp
pattern_num = 5
index_num = 38
'''
translate_raw = [
[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15], [16, 17, 18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29, 30, 31], [32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47], [48, 49, 50, 51, 52, 53, 54, 55], [56, 57, 58, 59, 60, 61, 62, 63],
[0, 8, 16, 24, 32, 40, 48, 56], [1, 9, 17, 25, 33, 41, 49, 57], [2, 10, 18, 26, 34, 42, 50, 58], [3, 11, 19, 27, 35, 43, 51, 59], [4, 12, 20, 28, 36, 44, 52, 60], [5, 13, 21, 29, 37, 45, 53, 61], [6, 14, 22, 30, 38, 46, 54, 62], [7, 15, 23, 31, 39, 47, 55, 63],
[5, 14, 23], [4, 13, 22, 31], [3, 12, 21, 30, 39], [2, 11, 20, 29, 38, 47], [1, 10, 19, 28, 37, 46, 55], [0, 9, 18, 27, 36, 45, 54, 63], [8, 17, 26, 35, 44, 53, 62], [16, 25, 34, 43, 52, 61], [24, 33, 42, 51, 60], [32, 41, 50, 59], [40, 49, 58],
[2, 9, 16], [3, 10, 17, 24], [4, 11, 18, 25, 32], [5, 12, 19, 26, 33, 40], [6, 13, 20, 27, 34, 41, 48], [7, 14, 21, 28, 35, 42, 49, 56], [15, 22, 29, 36, 43, 50, 57], [23, 30, 37, 44, 51, 58], [31, 38, 45, 52, 59], [39, 46, 53, 60], [47, 54, 61]
]
same_param = [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, 3, 3, 2, 1, 0, 4, 5, 6, 7, 8, 9, 8, 7, 6, 5, 4, 4, 5, 6, 7, 8, 9, 8, 7, 6, 5, 4]
'''
translate = []
eval_translate = []
each_param_num = []
edge1 = [
[54, 63, 62, 61, 60, 59, 58, 57, 56, 49],
[49, 56, 48, 40, 32, 24, 16, 8, 0, 9],
[9, 0, 1, 2, 3, 4, 5, 6, 7, 14],
[14, 7, 15, 23, 31, 39, 47, 55, 63, 54]
]
edge2 = []
for arr in edge1:
edge2.append(arr)
edge2.append(list(reversed(arr)))
translate.append(edge2)
eval_translate.append(edge1)
each_param_num.append(3 ** 10)
corner1= [
[3, 2, 1, 0, 9, 8, 16, 24],
[4, 5, 6, 7, 14, 15, 23, 31],
[60, 61, 62, 63, 54, 55, 47, 39],
[59, 58, 57, 56, 49, 48, 40, 32]
]
corner2 = [
[3, 2, 1, 0, 9, 8, 16, 24],
[24, 16, 8, 0, 9, 1, 2, 3],
[4, 5, 6, 7, 14, 15, 23, 31],
[31, 23, 15, 7, 14, 6, 5, 4],
[60, 61, 62, 63, 54, 55, 47, 39],
[39, 47, 55, 63, 54, 62, 61, 60],
[59, 58, 57, 56, 49, 48, 40, 32],
[32, 40, 48, 56, 49, 57, 58, 59]
]
translate.append(corner2)
eval_translate.append(corner1)
each_param_num.append(3 ** 8)
corner24 = [
[0, 1, 2, 3, 8, 9, 10, 11],
[0, 8, 16, 24, 1, 9, 17, 25],
[7, 6, 5, 4, 15, 14, 13, 12],
[7, 15, 23, 31, 6, 14, 22, 30],
[63, 62, 61, 60, 55, 54, 53, 52],
[63, 55, 47, 39, 62, 54, 46, 38],
[56, 57, 58, 59, 48, 49, 50, 51],
[56, 48, 40, 32, 57, 49, 41, 33]
]
translate.append(corner24)
eval_translate.append(corner24)
each_param_num.append(3 ** 8)
diagonal1 = [
[0, 9, 18, 27, 36, 45, 54, 63],
[7, 14, 21, 28, 35, 42, 49, 56]
]
diagonal2 = []
for arr in diagonal1:
diagonal2.append(arr)
diagonal2.append(list(reversed(arr)))
translate.append(diagonal2)
eval_translate.append(diagonal1)
each_param_num.append(3 ** 8)
edge1 = [
[0, 1, 2, 3, 4, 5, 6, 7],
[7, 15, 23, 31, 39, 47, 55, 63],
[63, 62, 61, 60, 59, 58, 57, 56],
[56, 48, 40, 32, 24, 26, 8, 0]
]
edge2 = []
for arr in edge1:
edge2.append(arr)
edge2.append(list(reversed(arr)))
translate.append(edge2)
eval_translate.append(edge1)
each_param_num.append(3 ** 8)
pattern_param = [[] for _ in range(pattern_num)]
with open('param_pattern.txt', 'r') as f:
for i in range(pattern_num):
for j in range(each_param_num[i]):
pattern_param[i].append(float(f.readline()))
weight = [[] for _ in range(pattern_num)]
with open('patttern_weight.txt', 'r') as f:
for i in range(pattern_num):
for j in range(3):
weight[i].append(float(f.readline()))
win_num = [[0 for _ in range(each_param_num[i])] for i in range(pattern_num)]
seen_num = [[0 for _ in range(each_param_num[i])] for i in range(pattern_num)]
ans = [[0 for _ in range(each_param_num[i])] for i in range(pattern_num)]
seen_grid = []
prospect = []
hw = 8
hw2 = 64
dy = [0, 1, 0, -1, 1, 1, -1, -1]
dx = [1, 0, -1, 0, 1, -1, 1, -1]
def empty(grid, y, x):
return grid[y][x] == -1 or grid[y][x] == 2
def inside(y, x):
return 0 <= y < hw and 0 <= x < hw
def check(grid, player, y, x):
res_grid = [[False for _ in range(hw)] for _ in range(hw)]
res = 0
for dr in range(8):
ny = y + dy[dr]
nx = x + dx[dr]
if not inside(ny, nx):
continue
if empty(grid, ny, nx):
continue
if grid[ny][nx] == player:
continue
#print(y, x, dr, ny, nx)
plus = 0
flag = False
for d in range(hw):
nny = ny + d * dy[dr]
nnx = nx + d * dx[dr]
if not inside(nny, nnx):
break
if empty(grid, nny, nnx):
break
if grid[nny][nnx] == player:
flag = True
break
#print(y, x, dr, nny, nnx)
plus += 1
if flag:
res += plus
for d in range(plus):
nny = ny + d * dy[dr]
nnx = nx + d * dx[dr]
res_grid[nny][nnx] = True
return res, res_grid
class reversi:
def __init__(self):
self.grid = [[-1 for _ in range(hw)] for _ in range(hw)]
self.grid[3][3] = 1
self.grid[3][4] = 0
self.grid[4][3] = 0
self.grid[4][4] = 1
self.player = 0 # 0: 黒 1: 白
self.nums = [2, 2]
def move(self, y, x):
plus, plus_grid = check(self.grid, self.player, y, x)
if (not empty(self.grid, y, x)) or (not inside(y, x)) or not plus:
print('Please input a correct move')
return 1
self.grid[y][x] = self.player
for ny in range(hw):
for nx in range(hw):
if plus_grid[ny][nx]:
self.grid[ny][nx] = self.player
self.nums[self.player] += 1 + plus
self.nums[1 - self.player] -= plus
self.player = 1 - self.player
return 0
def check_pass(self):
for y in range(hw):
for x in range(hw):
if self.grid[y][x] == 2:
self.grid[y][x] = -1
res = True
for y in range(hw):
for x in range(hw):
if not empty(self.grid, y, x):
continue
plus, _ = check(self.grid, self.player, y, x)
if plus:
res = False
self.grid[y][x] = 2
if res:
#print('Pass!')
self.player = 1 - self.player
return res
def output(self):
print(' ', end='')
for i in range(hw):
print(chr(ord('a') + i), end=' ')
print('')
for y in range(hw):
print(str(y + 1) + ' ', end='')
for x in range(hw):
print('○' if self.grid[y][x] == 0 else '●' if self.grid[y][x] == 1 else '* ' if self.grid[y][x] == 2 else '. ', end='')
print('')
def output_file(self):
res = ''
for y in range(hw):
for x in range(hw):
res += '*' if self.grid[y][x] == 0 else 'O' if self.grid[y][x] == 1 else '-'
res += ' *'
return res
def end(self):
if min(self.nums) == 0:
return True
res = True
for y in range(hw):
for x in range(hw):
if self.grid[y][x] == -1 or self.grid[y][x] == 2:
res = False
return res
def judge(self):
if self.nums[0] > self.nums[1]:
#print('Black won!', self.nums[0], '-', self.nums[1])
return 0
elif self.nums[1] > self.nums[0]:
#print('White won!', self.nums[0], '-', self.nums[1])
return 1
else:
#print('Draw!', self.nums[0], '-', self.nums[1])
return -1
def translate_p(grid, arr):
res = []
for i in range(len(arr)):
tmp = 0
for j in reversed(range(len(arr[i]))):
tmp *= 3
tmp2 = grid[arr[i][j] // hw][arr[i][j] % hw]
if tmp2 == 0:
tmp += 1
elif tmp2 == 1:
tmp += 2
res.append(tmp)
return res
def translate_o(grid, arr):
res = []
for i in range(len(arr)):
tmp = 0
for j in reversed(range(len(arr[i]))):
tmp *= 3
tmp2 = grid[arr[i][j] // hw][arr[i][j] % hw]
if tmp2 == 1:
tmp += 1
elif tmp2 == 0:
tmp += 2
res.append(tmp)
return res
def calc_weight(idx, x):
x1 = 4.0 / 64
x2 = 32.0 / 64
x3 = 64.0 / 64
y1, y2, y3 = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.